X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fieee754%2Fpart_mul_add%2Fmultiply.py;h=672bbfd33acdef4510167e6ec1d8c78e0bf3603f;hb=5e1e5b602e087661f3ee58d608ae1396ecbc14b3;hp=f55d1f7670a75a3faadd35f0b00abefee8d3bbf8;hpb=5b9ebb523a4b4c22aef8a90193aba886a6e7a52a;p=ieee754fpu.git diff --git a/src/ieee754/part_mul_add/multiply.py b/src/ieee754/part_mul_add/multiply.py index f55d1f76..672bbfd3 100644 --- a/src/ieee754/part_mul_add/multiply.py +++ b/src/ieee754/part_mul_add/multiply.py @@ -71,17 +71,20 @@ class PartitionPoints(dict): for point, enabled in self.items(): yield enabled.eq(rhs[point]) - def as_mask(self, width): + def as_mask(self, width, mul=1): """Create a bit-mask from `self`. Each bit in the returned mask is clear only if the partition point at the same bit-index is enabled. :param width: the bit width of the resulting mask + :param mul: a "multiplier" which in-place expands the partition points + typically set to "2" when used for multipliers """ bits = [] for i in range(width): - if i in self: + i /= mul + if i.is_integer() and int(i) in self: bits.append(~self[i]) else: bits.append(True) @@ -227,13 +230,16 @@ class PartitionedAdder(Elaboratable): supported, except for by ``Signal.eq``. """ - def __init__(self, width, partition_points): + def __init__(self, width, partition_points, partition_step=1): """Create a ``PartitionedAdder``. :param width: the bit width of the input and output :param partition_points: the input partition points + :param partition_step: a multiplier (typically double) step + which in-place "expands" the partition points """ self.width = width + self.pmul = partition_step self.a = Signal(width, reset_less=True) self.b = Signal(width, reset_less=True) self.output = Signal(width, reset_less=True) @@ -267,11 +273,12 @@ class PartitionedAdder(Elaboratable): # carry has been carried *over* the break point. for i in range(self.width): - if i in self.partition_points: + pi = i/self.pmul # double the range of the partition point test + if pi.is_integer() and pi in self.partition_points: # add extra bit set to 0 + 0 for enabled partition points # and 1 + 0 for disabled partition points ea.append(expanded_a[expanded_index]) - al.append(~self.partition_points[i]) # add extra bit in a + al.append(~self.partition_points[pi]) # add extra bit in a eb.append(expanded_b[expanded_index]) bl.append(C(0)) # yes, add a zero expanded_index += 1 # skip the extra point. NOT in the output @@ -298,41 +305,41 @@ FULL_ADDER_INPUT_COUNT = 3 class AddReduceData: - def __init__(self, ppoints, n_inputs, output_width, n_parts): + def __init__(self, part_pts, n_inputs, output_width, n_parts): self.part_ops = [Signal(2, name=f"part_ops_{i}", reset_less=True) for i in range(n_parts)] - self.inputs = [Signal(output_width, name=f"inputs_{i}", + self.terms = [Signal(output_width, name=f"inputs_{i}", reset_less=True) for i in range(n_inputs)] - self.reg_partition_points = ppoints.like() + self.part_pts = part_pts.like() - def eq_from(self, reg_partition_points, inputs, part_ops): - return [self.reg_partition_points.eq(reg_partition_points)] + \ - [self.inputs[i].eq(inputs[i]) - for i in range(len(self.inputs))] + \ + def eq_from(self, part_pts, inputs, part_ops): + return [self.part_pts.eq(part_pts)] + \ + [self.terms[i].eq(inputs[i]) + for i in range(len(self.terms))] + \ [self.part_ops[i].eq(part_ops[i]) for i in range(len(self.part_ops))] def eq(self, rhs): - return self.eq_from(rhs.reg_partition_points, rhs.inputs, rhs.part_ops) + return self.eq_from(rhs.part_pts, rhs.terms, rhs.part_ops) class FinalReduceData: - def __init__(self, ppoints, output_width, n_parts): + def __init__(self, part_pts, output_width, n_parts): self.part_ops = [Signal(2, name=f"part_ops_{i}", reset_less=True) for i in range(n_parts)] self.output = Signal(output_width, reset_less=True) - self.reg_partition_points = ppoints.like() + self.part_pts = part_pts.like() - def eq_from(self, reg_partition_points, output, part_ops): - return [self.reg_partition_points.eq(reg_partition_points)] + \ + def eq_from(self, part_pts, output, part_ops): + return [self.part_pts.eq(part_pts)] + \ [self.output.eq(output)] + \ [self.part_ops[i].eq(part_ops[i]) for i in range(len(self.part_ops))] def eq(self, rhs): - return self.eq_from(rhs.reg_partition_points, rhs.output, rhs.part_ops) + return self.eq_from(rhs.part_pts, rhs.output, rhs.part_ops) class FinalAdd(Elaboratable): @@ -363,18 +370,19 @@ class FinalAdd(Elaboratable): m.d.comb += output.eq(0) elif self.n_inputs == 1: # handle single input - m.d.comb += output.eq(self.i.inputs[0]) + m.d.comb += output.eq(self.i.terms[0]) else: # base case for adding 2 inputs assert self.n_inputs == 2 - adder = PartitionedAdder(output_width, self.i.reg_partition_points) + adder = PartitionedAdder(output_width, + self.i.part_pts, 2) m.submodules.final_adder = adder - m.d.comb += adder.a.eq(self.i.inputs[0]) - m.d.comb += adder.b.eq(self.i.inputs[1]) + m.d.comb += adder.a.eq(self.i.terms[0]) + m.d.comb += adder.b.eq(self.i.terms[1]) m.d.comb += output.eq(adder.output) # create output - m.d.comb += self.o.eq_from(self.i.reg_partition_points, output, + m.d.comb += self.o.eq_from(self.i.part_pts, output, self.i.part_ops) return m @@ -473,13 +481,13 @@ class AddReduceSingle(Elaboratable): terms.append(adder_i.mcarry) # handle the remaining inputs. if self.n_inputs % FULL_ADDER_INPUT_COUNT == 1: - terms.append(self.i.inputs[-1]) + terms.append(self.i.terms[-1]) elif self.n_inputs % FULL_ADDER_INPUT_COUNT == 2: # Just pass the terms to the next layer, since we wouldn't gain # anything by using a half adder since there would still be 2 terms # and just passing the terms to the next layer saves gates. - terms.append(self.i.inputs[-2]) - terms.append(self.i.inputs[-1]) + terms.append(self.i.terms[-2]) + terms.append(self.i.terms[-1]) else: assert self.n_inputs % FULL_ADDER_INPUT_COUNT == 0 @@ -493,26 +501,27 @@ class AddReduceSingle(Elaboratable): # copy the intermediate terms to the output for i, value in enumerate(terms): - m.d.comb += self.o.inputs[i].eq(value) + m.d.comb += self.o.terms[i].eq(value) # copy reg part points and part ops to output - m.d.comb += self.o.reg_partition_points.eq(self.i.reg_partition_points) + m.d.comb += self.o.part_pts.eq(self.i.part_pts) m.d.comb += [self.o.part_ops[i].eq(self.i.part_ops[i]) for i in range(len(self.i.part_ops))] # set up the partition mask (for the adders) part_mask = Signal(self.output_width, reset_less=True) - mask = self.i.reg_partition_points.as_mask(self.output_width) + # get partition points as a mask + mask = self.i.part_pts.as_mask(self.output_width, mul=2) m.d.comb += part_mask.eq(mask) # add and link the intermediate term modules for i, (iidx, adder_i) in enumerate(adders): setattr(m.submodules, f"adder_{i}", adder_i) - m.d.comb += adder_i.in0.eq(self.i.inputs[iidx]) - m.d.comb += adder_i.in1.eq(self.i.inputs[iidx + 1]) - m.d.comb += adder_i.in2.eq(self.i.inputs[iidx + 2]) + m.d.comb += adder_i.in0.eq(self.i.terms[iidx]) + m.d.comb += adder_i.in1.eq(self.i.terms[iidx + 1]) + m.d.comb += adder_i.in2.eq(self.i.terms[iidx + 2]) m.d.comb += adder_i.mask.eq(part_mask) return m @@ -579,8 +588,8 @@ class AddReduce(Elaboratable): next_levels, partition_points) mods.append(next_level) next_levels = list(AddReduce.next_register_levels(next_levels)) - partition_points = next_level.i.reg_partition_points - inputs = next_level.o.inputs + partition_points = next_level.i.part_pts + inputs = next_level.o.terms ilen = len(inputs) part_ops = next_level.i.part_ops @@ -777,10 +786,10 @@ class LSBNegTerm(Elaboratable): class Parts(Elaboratable): - def __init__(self, pbwid, epps, n_parts): + def __init__(self, pbwid, part_pts, n_parts): self.pbwid = pbwid # inputs - self.epps = PartitionPoints.like(epps, name="epps") # expanded points + self.part_pts = PartitionPoints.like(part_pts) # outputs self.parts = [Signal(name=f"part_{i}", reset_less=True) for i in range(n_parts)] @@ -788,13 +797,13 @@ class Parts(Elaboratable): def elaborate(self, platform): m = Module() - epps, parts = self.epps, self.parts + part_pts, parts = self.part_pts, self.parts # collect part-bytes (double factor because the input is extended) pbs = Signal(self.pbwid, reset_less=True) tl = [] for i in range(self.pbwid): pb = Signal(name="pb%d" % i, reset_less=True) - m.d.comb += pb.eq(epps.part_byte(i, mfactor=2)) # double + m.d.comb += pb.eq(part_pts.part_byte(i)) tl.append(pb) m.d.comb += pbs.eq(Cat(*tl)) @@ -830,10 +839,10 @@ class Part(Elaboratable): the extra terms - as separate terms - are then thrown at the AddReduce alongside the multiplication part-results. """ - def __init__(self, epps, width, n_parts, n_levels, pbwid): + def __init__(self, part_pts, width, n_parts, n_levels, pbwid): self.pbwid = pbwid - self.epps = epps + self.part_pts = part_pts # inputs self.a = Signal(64, reset_less=True) @@ -857,9 +866,9 @@ class Part(Elaboratable): m = Module() pbs, parts = self.pbs, self.parts - epps = self.epps - m.submodules.p = p = Parts(self.pbwid, epps, len(parts)) - m.d.comb += p.epps.eq(epps) + part_pts = self.part_pts + m.submodules.p = p = Parts(self.pbwid, part_pts, len(parts)) + m.d.comb += p.part_pts.eq(part_pts) parts = p.parts byte_count = 8 // len(parts) @@ -939,9 +948,9 @@ class FinalOut(Elaboratable): that some partitions requested 8-bit computation whilst others requested 16 or 32 bit. """ - def __init__(self, output_width, n_parts, partition_points): - self.expanded_part_points = partition_points - self.i = IntermediateData(partition_points, output_width, n_parts) + def __init__(self, output_width, n_parts, part_pts): + self.part_pts = part_pts + self.i = IntermediateData(part_pts, output_width, n_parts) self.out_wid = output_width//2 # output self.out = Signal(self.out_wid, reset_less=True) @@ -950,13 +959,13 @@ class FinalOut(Elaboratable): def elaborate(self, platform): m = Module() - eps = self.expanded_part_points - m.submodules.p_8 = p_8 = Parts(8, eps, 8) - m.submodules.p_16 = p_16 = Parts(8, eps, 4) - m.submodules.p_32 = p_32 = Parts(8, eps, 2) - m.submodules.p_64 = p_64 = Parts(8, eps, 1) + part_pts = self.part_pts + m.submodules.p_8 = p_8 = Parts(8, part_pts, 8) + m.submodules.p_16 = p_16 = Parts(8, part_pts, 4) + m.submodules.p_32 = p_32 = Parts(8, part_pts, 2) + m.submodules.p_64 = p_64 = Parts(8, part_pts, 1) - out_part_pts = self.i.reg_partition_points + out_part_pts = self.i.part_pts # temporaries d8 = [Signal(name=f"d8_{i}", reset_less=True) for i in range(8)] @@ -968,10 +977,10 @@ class FinalOut(Elaboratable): i32 = Signal(self.out_wid, reset_less=True) i64 = Signal(self.out_wid, reset_less=True) - m.d.comb += p_8.epps.eq(out_part_pts) - m.d.comb += p_16.epps.eq(out_part_pts) - m.d.comb += p_32.epps.eq(out_part_pts) - m.d.comb += p_64.epps.eq(out_part_pts) + m.d.comb += p_8.part_pts.eq(out_part_pts) + m.d.comb += p_16.part_pts.eq(out_part_pts) + m.d.comb += p_32.part_pts.eq(out_part_pts) + m.d.comb += p_64.part_pts.eq(out_part_pts) for i in range(len(p_8.parts)): m.d.comb += d8[i].eq(p_8.parts[i]) @@ -1047,18 +1056,18 @@ class Signs(Elaboratable): class IntermediateData: - def __init__(self, ppoints, output_width, n_parts): + def __init__(self, part_pts, output_width, n_parts): self.part_ops = [Signal(2, name=f"part_ops_{i}", reset_less=True) for i in range(n_parts)] - self.reg_partition_points = ppoints.like() + self.part_pts = part_pts.like() self.outputs = [Signal(output_width, name="io%d" % i, reset_less=True) for i in range(4)] # intermediates (needed for unit tests) self.intermediate_output = Signal(output_width) - def eq_from(self, reg_partition_points, outputs, intermediate_output, + def eq_from(self, part_pts, outputs, intermediate_output, part_ops): - return [self.reg_partition_points.eq(reg_partition_points)] + \ + return [self.part_pts.eq(part_pts)] + \ [self.intermediate_output.eq(intermediate_output)] + \ [self.outputs[i].eq(outputs[i]) for i in range(4)] + \ @@ -1066,10 +1075,129 @@ class IntermediateData: for i in range(len(self.part_ops))] def eq(self, rhs): - return self.eq_from(rhs.reg_partition_points, rhs.outputs, + return self.eq_from(rhs.part_pts, rhs.outputs, rhs.intermediate_output, rhs.part_ops) +class AllTermsData: + + def __init__(self, partition_points): + self.a = Signal(64) + self.b = Signal(64) + self.part_pts = partition_points.like() + self.part_ops = [Signal(2, name=f"part_ops_{i}") for i in range(8)] + + def eq_from(self, part_pts, inputs, part_ops): + return [self.part_pts.eq(part_pts)] + \ + [self.a.eq(a), self.b.eq(b)] + \ + [self.part_ops[i].eq(part_ops[i]) + for i in range(len(self.part_ops))] + + def eq(self, rhs): + return self.eq_from(rhs.part_pts, rhs.a, rhs.b, rhs.part_ops) + + +class AllTerms(Elaboratable): + """Set of terms to be added together + """ + + def __init__(self, n_inputs, output_width, n_parts, register_levels, + partition_points): + """Create an ``AddReduce``. + + :param inputs: input ``Signal``s to be summed. + :param output_width: bit-width of ``output``. + :param register_levels: List of nesting levels that should have + pipeline registers. + :param partition_points: the input partition points. + """ + self.i = AllTermsData(partition_points) + self.register_levels = register_levels + self.n_inputs = n_inputs + self.n_parts = n_parts + self.output_width = output_width + self.o = AddReduceData(self.i.part_pts, n_inputs, + output_width, n_parts) + + def elaborate(self, platform): + m = Module() + + eps = self.i.part_pts + + # collect part-bytes + pbs = Signal(8, reset_less=True) + tl = [] + for i in range(8): + pb = Signal(name="pb%d" % i, reset_less=True) + m.d.comb += pb.eq(eps.part_byte(i)) + tl.append(pb) + m.d.comb += pbs.eq(Cat(*tl)) + + # local variables + signs = [] + for i in range(8): + s = Signs() + signs.append(s) + setattr(m.submodules, "signs%d" % i, s) + m.d.comb += s.part_ops.eq(self.i.part_ops[i]) + + n_levels = len(self.register_levels)+1 + m.submodules.part_8 = part_8 = Part(eps, 128, 8, n_levels, 8) + m.submodules.part_16 = part_16 = Part(eps, 128, 4, n_levels, 8) + m.submodules.part_32 = part_32 = Part(eps, 128, 2, n_levels, 8) + m.submodules.part_64 = part_64 = Part(eps, 128, 1, n_levels, 8) + nat_l, nbt_l, nla_l, nlb_l = [], [], [], [] + for mod in [part_8, part_16, part_32, part_64]: + m.d.comb += mod.a.eq(self.i.a) + m.d.comb += mod.b.eq(self.i.b) + for i in range(len(signs)): + m.d.comb += mod.a_signed[i].eq(signs[i].a_signed) + m.d.comb += mod.b_signed[i].eq(signs[i].b_signed) + m.d.comb += mod.pbs.eq(pbs) + nat_l.append(mod.not_a_term) + nbt_l.append(mod.not_b_term) + nla_l.append(mod.neg_lsb_a_term) + nlb_l.append(mod.neg_lsb_b_term) + + terms = [] + + for a_index in range(8): + t = ProductTerms(8, 128, 8, a_index, 8) + setattr(m.submodules, "terms_%d" % a_index, t) + + m.d.comb += t.a.eq(self.i.a) + m.d.comb += t.b.eq(self.i.b) + m.d.comb += t.pb_en.eq(pbs) + + for term in t.terms: + terms.append(term) + + # it's fine to bitwise-or data together since they are never enabled + # at the same time + m.submodules.nat_or = nat_or = OrMod(128) + m.submodules.nbt_or = nbt_or = OrMod(128) + m.submodules.nla_or = nla_or = OrMod(128) + m.submodules.nlb_or = nlb_or = OrMod(128) + for l, mod in [(nat_l, nat_or), + (nbt_l, nbt_or), + (nla_l, nla_or), + (nlb_l, nlb_or)]: + for i in range(len(l)): + m.d.comb += mod.orin[i].eq(l[i]) + terms.append(mod.orout) + + # copy the intermediate terms to the output + for i, value in enumerate(terms): + m.d.comb += self.o.terms[i].eq(value) + + # copy reg part points and part ops to output + m.d.comb += self.o.part_pts.eq(eps) + m.d.comb += [self.o.part_ops[i].eq(self.i.part_ops[i]) + for i in range(len(self.i.part_ops))] + + return m + + class Intermediates(Elaboratable): """ Intermediate output modules """ @@ -1082,7 +1210,7 @@ class Intermediates(Elaboratable): m = Module() out_part_ops = self.i.part_ops - out_part_pts = self.i.reg_partition_points + out_part_pts = self.i.part_pts # create _output_64 m.submodules.io64 = io64 = IntermediateOut(64, 128, 1) @@ -1114,7 +1242,7 @@ class Intermediates(Elaboratable): for i in range(8): m.d.comb += self.o.part_ops[i].eq(out_part_ops[i]) - m.d.comb += self.o.reg_partition_points.eq(out_part_pts) + m.d.comb += self.o.part_pts.eq(out_part_pts) m.d.comb += self.o.intermediate_output.eq(self.i.output) return m @@ -1173,156 +1301,40 @@ class Mul8_16_32_64(Elaboratable): def elaborate(self, platform): m = Module() - # collect part-bytes - pbs = Signal(8, reset_less=True) - tl = [] - for i in range(8): - pb = Signal(name="pb%d" % i, reset_less=True) - m.d.comb += pb.eq(self.part_pts.part_byte(i)) - tl.append(pb) - m.d.comb += pbs.eq(Cat(*tl)) - - # create (doubled) PartitionPoints (output is double input width) - expanded_part_pts = eps = PartitionPoints() - for i, v in self.part_pts.items(): - ep = Signal(name=f"expanded_part_pts_{i*2}", reset_less=True) - expanded_part_pts[i * 2] = ep - m.d.comb += ep.eq(v) + part_pts = self.part_pts n_inputs = 64 + 4 n_parts = 8 #len(self.part_pts) - t = AllTerms(8, n_inputs, 128, n_parts, self.register_levels, - eps) + t = AllTerms(n_inputs, 128, n_parts, self.register_levels, part_pts) m.submodules.allterms = t - m.d.comb += t.a.eq(self.a) - m.d.comb += t.b.eq(self.b) - m.d.comb += t.pbs.eq(pbs) - m.d.comb += t.epps.eq(eps) + m.d.comb += t.i.a.eq(self.a) + m.d.comb += t.i.b.eq(self.b) + m.d.comb += t.i.part_pts.eq(part_pts) for i in range(8): - m.d.comb += t.part_ops[i].eq(self.part_ops[i]) + m.d.comb += t.i.part_ops[i].eq(self.part_ops[i]) - terms = t.o.inputs + terms = t.o.terms add_reduce = AddReduce(terms, 128, self.register_levels, - t.o.reg_partition_points, + t.o.part_pts, t.o.part_ops) out_part_ops = add_reduce.o.part_ops - out_part_pts = add_reduce.o.reg_partition_points + out_part_pts = add_reduce.o.part_pts m.submodules.add_reduce = add_reduce - m.d.comb += self.intermediate_output.eq(add_reduce.o.output) - interm = Intermediates(128, 8, expanded_part_pts) + interm = Intermediates(128, 8, part_pts) m.submodules.intermediates = interm m.d.comb += interm.i.eq(add_reduce.o) # final output - m.submodules.finalout = finalout = FinalOut(128, 8, expanded_part_pts) + m.submodules.finalout = finalout = FinalOut(128, 8, part_pts) m.d.comb += finalout.i.eq(interm.o) m.d.comb += self.output.eq(finalout.out) - - return m - - -class AllTerms(Elaboratable): - """Set of terms to be added together - """ - - def __init__(self, pbwid, n_inputs, output_width, n_parts, register_levels, - partition_points): - """Create an ``AddReduce``. - - :param inputs: input ``Signal``s to be summed. - :param output_width: bit-width of ``output``. - :param register_levels: List of nesting levels that should have - pipeline registers. - :param partition_points: the input partition points. - """ - self.epps = partition_points.like() - self.register_levels = register_levels - self.pbwid = pbwid - self.n_inputs = n_inputs - self.n_parts = n_parts - self.output_width = output_width - self.o = AddReduceData(self.epps, n_inputs, - output_width, n_parts) - - self.a = Signal(64) - self.b = Signal(64) - - self.pbs = Signal(pbwid, reset_less=True) - self.part_ops = [Signal(2, name=f"part_ops_{i}") for i in range(8)] - - def elaborate(self, platform): - m = Module() - - pbs = self.pbs - eps = self.epps - - # local variables - signs = [] - for i in range(8): - s = Signs() - signs.append(s) - setattr(m.submodules, "signs%d" % i, s) - m.d.comb += s.part_ops.eq(self.part_ops[i]) - - n_levels = len(self.register_levels)+1 - m.submodules.part_8 = part_8 = Part(eps, 128, 8, n_levels, 8) - m.submodules.part_16 = part_16 = Part(eps, 128, 4, n_levels, 8) - m.submodules.part_32 = part_32 = Part(eps, 128, 2, n_levels, 8) - m.submodules.part_64 = part_64 = Part(eps, 128, 1, n_levels, 8) - nat_l, nbt_l, nla_l, nlb_l = [], [], [], [] - for mod in [part_8, part_16, part_32, part_64]: - m.d.comb += mod.a.eq(self.a) - m.d.comb += mod.b.eq(self.b) - for i in range(len(signs)): - m.d.comb += mod.a_signed[i].eq(signs[i].a_signed) - m.d.comb += mod.b_signed[i].eq(signs[i].b_signed) - m.d.comb += mod.pbs.eq(pbs) - nat_l.append(mod.not_a_term) - nbt_l.append(mod.not_b_term) - nla_l.append(mod.neg_lsb_a_term) - nlb_l.append(mod.neg_lsb_b_term) - - terms = [] - - for a_index in range(8): - t = ProductTerms(8, 128, 8, a_index, 8) - setattr(m.submodules, "terms_%d" % a_index, t) - - m.d.comb += t.a.eq(self.a) - m.d.comb += t.b.eq(self.b) - m.d.comb += t.pb_en.eq(pbs) - - for term in t.terms: - terms.append(term) - - # it's fine to bitwise-or data together since they are never enabled - # at the same time - m.submodules.nat_or = nat_or = OrMod(128) - m.submodules.nbt_or = nbt_or = OrMod(128) - m.submodules.nla_or = nla_or = OrMod(128) - m.submodules.nlb_or = nlb_or = OrMod(128) - for l, mod in [(nat_l, nat_or), - (nbt_l, nbt_or), - (nla_l, nla_or), - (nlb_l, nlb_or)]: - for i in range(len(l)): - m.d.comb += mod.orin[i].eq(l[i]) - terms.append(mod.orout) - - # copy the intermediate terms to the output - for i, value in enumerate(terms): - m.d.comb += self.o.inputs[i].eq(value) - - # copy reg part points and part ops to output - m.d.comb += self.o.reg_partition_points.eq(eps) - m.d.comb += [self.o.part_ops[i].eq(self.part_ops[i]) - for i in range(len(self.part_ops))] + m.d.comb += self.intermediate_output.eq(finalout.intermediate_output) return m