1 # SPDX-License-Identifier: LGPL-2.1-or-later
2 # See Notices.txt for copyright information
3 """Integer Multiplication."""
5 from nmigen
import Signal
, Module
, Value
, Elaboratable
, Cat
, C
, Mux
, Repl
6 from nmigen
.hdl
.ast
import Assign
7 from abc
import ABCMeta
, abstractmethod
8 from nmigen
.cli
import main
9 from functools
import reduce
10 from operator
import or_
13 class PartitionPoints(dict):
14 """Partition points and corresponding ``Value``s.
16 The points at where an ALU is partitioned along with ``Value``s that
17 specify if the corresponding partition points are enabled.
19 For example: ``{1: True, 5: True, 10: True}`` with
20 ``width == 16`` specifies that the ALU is split into 4 sections:
23 * bits 5 <= ``i`` < 10
24 * bits 10 <= ``i`` < 16
26 If the partition_points were instead ``{1: True, 5: a, 10: True}``
27 where ``a`` is a 1-bit ``Signal``:
28 * If ``a`` is asserted:
31 * bits 5 <= ``i`` < 10
32 * bits 10 <= ``i`` < 16
35 * bits 1 <= ``i`` < 10
36 * bits 10 <= ``i`` < 16
39 def __init__(self
, partition_points
=None):
40 """Create a new ``PartitionPoints``.
42 :param partition_points: the input partition points to values mapping.
45 if partition_points
is not None:
46 for point
, enabled
in partition_points
.items():
47 if not isinstance(point
, int):
48 raise TypeError("point must be a non-negative integer")
50 raise ValueError("point must be a non-negative integer")
51 self
[point
] = Value
.wrap(enabled
)
53 def like(self
, name
=None, src_loc_at
=0, mul
=1):
54 """Create a new ``PartitionPoints`` with ``Signal``s for all values.
56 :param name: the base name for the new ``Signal``s.
57 :param mul: a multiplication factor on the indices
60 name
= Signal(src_loc_at
=1+src_loc_at
).name
# get variable name
61 retval
= PartitionPoints()
62 for point
, enabled
in self
.items():
64 retval
[point
] = Signal(enabled
.shape(), name
=f
"{name}_{point}")
68 """Assign ``PartitionPoints`` using ``Signal.eq``."""
69 if set(self
.keys()) != set(rhs
.keys()):
70 raise ValueError("incompatible point set")
71 for point
, enabled
in self
.items():
72 yield enabled
.eq(rhs
[point
])
74 def as_mask(self
, width
):
75 """Create a bit-mask from `self`.
77 Each bit in the returned mask is clear only if the partition point at
78 the same bit-index is enabled.
80 :param width: the bit width of the resulting mask
83 for i
in range(width
):
90 def get_max_partition_count(self
, width
):
91 """Get the maximum number of partitions.
93 Gets the number of partitions when all partition points are enabled.
96 for point
in self
.keys():
101 def fits_in_width(self
, width
):
102 """Check if all partition points are smaller than `width`."""
103 for point
in self
.keys():
108 def part_byte(self
, index
, mfactor
=1): # mfactor used for "expanding"
109 if index
== -1 or index
== 7:
111 assert index
>= 0 and index
< 8
112 return self
[(index
* 8 + 8)*mfactor
]
115 class FullAdder(Elaboratable
):
118 :attribute in0: the first input
119 :attribute in1: the second input
120 :attribute in2: the third input
121 :attribute sum: the sum output
122 :attribute carry: the carry output
124 Rather than do individual full adders (and have an array of them,
125 which would be very slow to simulate), this module can specify the
126 bit width of the inputs and outputs: in effect it performs multiple
127 Full 3-2 Add operations "in parallel".
130 def __init__(self
, width
):
131 """Create a ``FullAdder``.
133 :param width: the bit width of the input and output
135 self
.in0
= Signal(width
)
136 self
.in1
= Signal(width
)
137 self
.in2
= Signal(width
)
138 self
.sum = Signal(width
)
139 self
.carry
= Signal(width
)
141 def elaborate(self
, platform
):
142 """Elaborate this module."""
144 m
.d
.comb
+= self
.sum.eq(self
.in0 ^ self
.in1 ^ self
.in2
)
145 m
.d
.comb
+= self
.carry
.eq((self
.in0
& self
.in1
)
146 |
(self
.in1
& self
.in2
)
147 |
(self
.in2
& self
.in0
))
151 class MaskedFullAdder(Elaboratable
):
152 """Masked Full Adder.
154 :attribute mask: the carry partition mask
155 :attribute in0: the first input
156 :attribute in1: the second input
157 :attribute in2: the third input
158 :attribute sum: the sum output
159 :attribute mcarry: the masked carry output
161 FullAdders are always used with a "mask" on the output. To keep
162 the graphviz "clean", this class performs the masking here rather
163 than inside a large for-loop.
165 See the following discussion as to why this is no longer derived
166 from FullAdder. Each carry is shifted here *before* being ANDed
167 with the mask, so that an AOI cell may be used (which is more
169 https://en.wikipedia.org/wiki/AND-OR-Invert
170 https://groups.google.com/d/msg/comp.arch/fcq-GLQqvas/vTxmcA0QAgAJ
173 def __init__(self
, width
):
174 """Create a ``MaskedFullAdder``.
176 :param width: the bit width of the input and output
179 self
.mask
= Signal(width
, reset_less
=True)
180 self
.mcarry
= Signal(width
, reset_less
=True)
181 self
.in0
= Signal(width
, reset_less
=True)
182 self
.in1
= Signal(width
, reset_less
=True)
183 self
.in2
= Signal(width
, reset_less
=True)
184 self
.sum = Signal(width
, reset_less
=True)
186 def elaborate(self
, platform
):
187 """Elaborate this module."""
189 s1
= Signal(self
.width
, reset_less
=True)
190 s2
= Signal(self
.width
, reset_less
=True)
191 s3
= Signal(self
.width
, reset_less
=True)
192 c1
= Signal(self
.width
, reset_less
=True)
193 c2
= Signal(self
.width
, reset_less
=True)
194 c3
= Signal(self
.width
, reset_less
=True)
195 m
.d
.comb
+= self
.sum.eq(self
.in0 ^ self
.in1 ^ self
.in2
)
196 m
.d
.comb
+= s1
.eq(Cat(0, self
.in0
))
197 m
.d
.comb
+= s2
.eq(Cat(0, self
.in1
))
198 m
.d
.comb
+= s3
.eq(Cat(0, self
.in2
))
199 m
.d
.comb
+= c1
.eq(s1
& s2
& self
.mask
)
200 m
.d
.comb
+= c2
.eq(s2
& s3
& self
.mask
)
201 m
.d
.comb
+= c3
.eq(s3
& s1
& self
.mask
)
202 m
.d
.comb
+= self
.mcarry
.eq(c1 | c2 | c3
)
206 class PartitionedAdder(Elaboratable
):
207 """Partitioned Adder.
209 Performs the final add. The partition points are included in the
210 actual add (in one of the operands only), which causes a carry over
211 to the next bit. Then the final output *removes* the extra bits from
214 partition: .... P... P... P... P... (32 bits)
215 a : .... .... .... .... .... (32 bits)
216 b : .... .... .... .... .... (32 bits)
217 exp-a : ....P....P....P....P.... (32+4 bits, P=1 if no partition)
218 exp-b : ....0....0....0....0.... (32 bits plus 4 zeros)
219 exp-o : ....xN...xN...xN...xN... (32+4 bits - x to be discarded)
220 o : .... N... N... N... N... (32 bits - x ignored, N is carry-over)
222 :attribute width: the bit width of the input and output. Read-only.
223 :attribute a: the first input to the adder
224 :attribute b: the second input to the adder
225 :attribute output: the sum output
226 :attribute partition_points: the input partition points. Modification not
227 supported, except for by ``Signal.eq``.
230 def __init__(self
, width
, partition_points
):
231 """Create a ``PartitionedAdder``.
233 :param width: the bit width of the input and output
234 :param partition_points: the input partition points
237 self
.a
= Signal(width
)
238 self
.b
= Signal(width
)
239 self
.output
= Signal(width
)
240 self
.partition_points
= PartitionPoints(partition_points
)
241 if not self
.partition_points
.fits_in_width(width
):
242 raise ValueError("partition_points doesn't fit in width")
244 for i
in range(self
.width
):
245 if i
in self
.partition_points
:
248 self
._expanded
_width
= expanded_width
249 # XXX these have to remain here due to some horrible nmigen
250 # simulation bugs involving sync. it is *not* necessary to
251 # have them here, they should (under normal circumstances)
252 # be moved into elaborate, as they are entirely local
253 self
._expanded
_a
= Signal(expanded_width
) # includes extra part-points
254 self
._expanded
_b
= Signal(expanded_width
) # likewise.
255 self
._expanded
_o
= Signal(expanded_width
) # likewise.
257 def elaborate(self
, platform
):
258 """Elaborate this module."""
261 # store bits in a list, use Cat later. graphviz is much cleaner
262 al
, bl
, ol
, ea
, eb
, eo
= [],[],[],[],[],[]
264 # partition points are "breaks" (extra zeros or 1s) in what would
265 # otherwise be a massive long add. when the "break" points are 0,
266 # whatever is in it (in the output) is discarded. however when
267 # there is a "1", it causes a roll-over carry to the *next* bit.
268 # we still ignore the "break" bit in the [intermediate] output,
269 # however by that time we've got the effect that we wanted: the
270 # carry has been carried *over* the break point.
272 for i
in range(self
.width
):
273 if i
in self
.partition_points
:
274 # add extra bit set to 0 + 0 for enabled partition points
275 # and 1 + 0 for disabled partition points
276 ea
.append(self
._expanded
_a
[expanded_index
])
277 al
.append(~self
.partition_points
[i
]) # add extra bit in a
278 eb
.append(self
._expanded
_b
[expanded_index
])
279 bl
.append(C(0)) # yes, add a zero
280 expanded_index
+= 1 # skip the extra point. NOT in the output
281 ea
.append(self
._expanded
_a
[expanded_index
])
282 eb
.append(self
._expanded
_b
[expanded_index
])
283 eo
.append(self
._expanded
_o
[expanded_index
])
286 ol
.append(self
.output
[i
])
289 # combine above using Cat
290 m
.d
.comb
+= Cat(*ea
).eq(Cat(*al
))
291 m
.d
.comb
+= Cat(*eb
).eq(Cat(*bl
))
292 m
.d
.comb
+= Cat(*ol
).eq(Cat(*eo
))
294 # use only one addition to take advantage of look-ahead carry and
295 # special hardware on FPGAs
296 m
.d
.comb
+= self
._expanded
_o
.eq(
297 self
._expanded
_a
+ self
._expanded
_b
)
301 FULL_ADDER_INPUT_COUNT
= 3
304 class AddReduceSingle(Elaboratable
):
305 """Add list of numbers together.
307 :attribute inputs: input ``Signal``s to be summed. Modification not
308 supported, except for by ``Signal.eq``.
309 :attribute register_levels: List of nesting levels that should have
311 :attribute output: output sum.
312 :attribute partition_points: the input partition points. Modification not
313 supported, except for by ``Signal.eq``.
316 def __init__(self
, inputs
, output_width
, register_levels
, partition_points
,
318 """Create an ``AddReduce``.
320 :param inputs: input ``Signal``s to be summed.
321 :param output_width: bit-width of ``output``.
322 :param register_levels: List of nesting levels that should have
324 :param partition_points: the input partition points.
326 self
.part_ops
= part_ops
327 self
.out_part_ops
= [Signal(2, name
=f
"out_part_ops_{i}")
328 for i
in range(len(part_ops
))]
329 self
.inputs
= list(inputs
)
330 self
._resized
_inputs
= [
331 Signal(output_width
, name
=f
"resized_inputs[{i}]")
332 for i
in range(len(self
.inputs
))]
333 self
.register_levels
= list(register_levels
)
334 self
.output
= Signal(output_width
)
335 self
.partition_points
= PartitionPoints(partition_points
)
336 if not self
.partition_points
.fits_in_width(output_width
):
337 raise ValueError("partition_points doesn't fit in output_width")
338 self
._reg
_partition
_points
= self
.partition_points
.like()
340 max_level
= AddReduceSingle
.get_max_level(len(self
.inputs
))
341 for level
in self
.register_levels
:
342 if level
> max_level
:
344 "not enough adder levels for specified register levels")
346 # this is annoying. we have to create the modules (and terms)
347 # because we need to know what they are (in order to set up the
348 # interconnects back in AddReduce), but cannot do the m.d.comb +=
349 # etc because this is not in elaboratable.
350 self
.groups
= AddReduceSingle
.full_adder_groups(len(self
.inputs
))
351 self
._intermediate
_terms
= []
352 if len(self
.groups
) != 0:
353 self
.create_next_terms()
356 def get_max_level(input_count
):
357 """Get the maximum level.
359 All ``register_levels`` must be less than or equal to the maximum
364 groups
= AddReduceSingle
.full_adder_groups(input_count
)
367 input_count
%= FULL_ADDER_INPUT_COUNT
368 input_count
+= 2 * len(groups
)
372 def full_adder_groups(input_count
):
373 """Get ``inputs`` indices for which a full adder should be built."""
375 input_count
- FULL_ADDER_INPUT_COUNT
+ 1,
376 FULL_ADDER_INPUT_COUNT
)
378 def elaborate(self
, platform
):
379 """Elaborate this module."""
382 # resize inputs to correct bit-width and optionally add in
384 resized_input_assignments
= [self
._resized
_inputs
[i
].eq(self
.inputs
[i
])
385 for i
in range(len(self
.inputs
))]
386 copy_part_ops
= [self
.out_part_ops
[i
].eq(self
.part_ops
[i
])
387 for i
in range(len(self
.part_ops
))]
388 if 0 in self
.register_levels
:
389 m
.d
.sync
+= copy_part_ops
390 m
.d
.sync
+= resized_input_assignments
391 m
.d
.sync
+= self
._reg
_partition
_points
.eq(self
.partition_points
)
393 m
.d
.comb
+= copy_part_ops
394 m
.d
.comb
+= resized_input_assignments
395 m
.d
.comb
+= self
._reg
_partition
_points
.eq(self
.partition_points
)
397 for (value
, term
) in self
._intermediate
_terms
:
398 m
.d
.comb
+= term
.eq(value
)
400 # if there are no full adders to create, then we handle the base cases
401 # and return, otherwise we go on to the recursive case
402 if len(self
.groups
) == 0:
403 if len(self
.inputs
) == 0:
404 # use 0 as the default output value
405 m
.d
.comb
+= self
.output
.eq(0)
406 elif len(self
.inputs
) == 1:
407 # handle single input
408 m
.d
.comb
+= self
.output
.eq(self
._resized
_inputs
[0])
410 # base case for adding 2 inputs
411 assert len(self
.inputs
) == 2
412 adder
= PartitionedAdder(len(self
.output
),
413 self
._reg
_partition
_points
)
414 m
.submodules
.final_adder
= adder
415 m
.d
.comb
+= adder
.a
.eq(self
._resized
_inputs
[0])
416 m
.d
.comb
+= adder
.b
.eq(self
._resized
_inputs
[1])
417 m
.d
.comb
+= self
.output
.eq(adder
.output
)
420 mask
= self
._reg
_partition
_points
.as_mask(len(self
.output
))
421 m
.d
.comb
+= self
.part_mask
.eq(mask
)
423 # add and link the intermediate term modules
424 for i
, (iidx
, adder_i
) in enumerate(self
.adders
):
425 setattr(m
.submodules
, f
"adder_{i}", adder_i
)
427 m
.d
.comb
+= adder_i
.in0
.eq(self
._resized
_inputs
[iidx
])
428 m
.d
.comb
+= adder_i
.in1
.eq(self
._resized
_inputs
[iidx
+ 1])
429 m
.d
.comb
+= adder_i
.in2
.eq(self
._resized
_inputs
[iidx
+ 2])
430 m
.d
.comb
+= adder_i
.mask
.eq(self
.part_mask
)
434 def create_next_terms(self
):
436 # go on to prepare recursive case
437 intermediate_terms
= []
438 _intermediate_terms
= []
440 def add_intermediate_term(value
):
441 intermediate_term
= Signal(
443 name
=f
"intermediate_terms[{len(intermediate_terms)}]")
444 _intermediate_terms
.append((value
, intermediate_term
))
445 intermediate_terms
.append(intermediate_term
)
447 # store mask in intermediary (simplifies graph)
448 self
.part_mask
= Signal(len(self
.output
), reset_less
=True)
450 # create full adders for this recursive level.
451 # this shrinks N terms to 2 * (N // 3) plus the remainder
453 for i
in self
.groups
:
454 adder_i
= MaskedFullAdder(len(self
.output
))
455 self
.adders
.append((i
, adder_i
))
456 # add both the sum and the masked-carry to the next level.
457 # 3 inputs have now been reduced to 2...
458 add_intermediate_term(adder_i
.sum)
459 add_intermediate_term(adder_i
.mcarry
)
460 # handle the remaining inputs.
461 if len(self
.inputs
) % FULL_ADDER_INPUT_COUNT
== 1:
462 add_intermediate_term(self
._resized
_inputs
[-1])
463 elif len(self
.inputs
) % FULL_ADDER_INPUT_COUNT
== 2:
464 # Just pass the terms to the next layer, since we wouldn't gain
465 # anything by using a half adder since there would still be 2 terms
466 # and just passing the terms to the next layer saves gates.
467 add_intermediate_term(self
._resized
_inputs
[-2])
468 add_intermediate_term(self
._resized
_inputs
[-1])
470 assert len(self
.inputs
) % FULL_ADDER_INPUT_COUNT
== 0
472 self
.intermediate_terms
= intermediate_terms
473 self
._intermediate
_terms
= _intermediate_terms
476 class AddReduce(Elaboratable
):
477 """Recursively Add list of numbers together.
479 :attribute inputs: input ``Signal``s to be summed. Modification not
480 supported, except for by ``Signal.eq``.
481 :attribute register_levels: List of nesting levels that should have
483 :attribute output: output sum.
484 :attribute partition_points: the input partition points. Modification not
485 supported, except for by ``Signal.eq``.
488 def __init__(self
, inputs
, output_width
, register_levels
, partition_points
,
490 """Create an ``AddReduce``.
492 :param inputs: input ``Signal``s to be summed.
493 :param output_width: bit-width of ``output``.
494 :param register_levels: List of nesting levels that should have
496 :param partition_points: the input partition points.
499 self
.part_ops
= part_ops
500 self
.out_part_ops
= [Signal(2, name
=f
"out_part_ops_{i}")
501 for i
in range(len(part_ops
))]
502 self
.output
= Signal(output_width
)
503 self
.output_width
= output_width
504 self
.register_levels
= register_levels
505 self
.partition_points
= partition_points
510 def get_max_level(input_count
):
511 return AddReduceSingle
.get_max_level(input_count
)
514 def next_register_levels(register_levels
):
515 """``Iterable`` of ``register_levels`` for next recursive level."""
516 for level
in register_levels
:
520 def create_levels(self
):
521 """creates reduction levels"""
524 next_levels
= self
.register_levels
525 partition_points
= self
.partition_points
527 part_ops
= self
.part_ops
529 next_level
= AddReduceSingle(inputs
, self
.output_width
, next_levels
,
530 partition_points
, part_ops
)
531 mods
.append(next_level
)
532 if len(next_level
.groups
) == 0:
534 next_levels
= list(AddReduce
.next_register_levels(next_levels
))
535 partition_points
= next_level
._reg
_partition
_points
536 inputs
= next_level
.intermediate_terms
537 part_ops
= next_level
.out_part_ops
541 def elaborate(self
, platform
):
542 """Elaborate this module."""
545 for i
, next_level
in enumerate(self
.levels
):
546 setattr(m
.submodules
, "next_level%d" % i
, next_level
)
548 # output comes from last module
549 m
.d
.comb
+= self
.output
.eq(next_level
.output
)
550 copy_part_ops
= [self
.out_part_ops
[i
].eq(next_level
.out_part_ops
[i
])
551 for i
in range(len(self
.part_ops
))]
552 m
.d
.comb
+= copy_part_ops
558 OP_MUL_SIGNED_HIGH
= 1
559 OP_MUL_SIGNED_UNSIGNED_HIGH
= 2 # a is signed, b is unsigned
560 OP_MUL_UNSIGNED_HIGH
= 3
563 def get_term(value
, shift
=0, enabled
=None):
564 if enabled
is not None:
565 value
= Mux(enabled
, value
, 0)
567 value
= Cat(Repl(C(0, 1), shift
), value
)
573 class ProductTerm(Elaboratable
):
574 """ this class creates a single product term (a[..]*b[..]).
575 it has a design flaw in that is the *output* that is selected,
576 where the multiplication(s) are combinatorially generated
580 def __init__(self
, width
, twidth
, pbwid
, a_index
, b_index
):
581 self
.a_index
= a_index
582 self
.b_index
= b_index
583 shift
= 8 * (self
.a_index
+ self
.b_index
)
589 self
.ti
= Signal(self
.width
, reset_less
=True)
590 self
.term
= Signal(twidth
, reset_less
=True)
591 self
.a
= Signal(twidth
//2, reset_less
=True)
592 self
.b
= Signal(twidth
//2, reset_less
=True)
593 self
.pb_en
= Signal(pbwid
, reset_less
=True)
596 min_index
= min(self
.a_index
, self
.b_index
)
597 max_index
= max(self
.a_index
, self
.b_index
)
598 for i
in range(min_index
, max_index
):
599 tl
.append(self
.pb_en
[i
])
600 name
= "te_%d_%d" % (self
.a_index
, self
.b_index
)
602 term_enabled
= Signal(name
=name
, reset_less
=True)
605 self
.enabled
= term_enabled
606 self
.term
.name
= "term_%d_%d" % (a_index
, b_index
) # rename
608 def elaborate(self
, platform
):
611 if self
.enabled
is not None:
612 m
.d
.comb
+= self
.enabled
.eq(~
(Cat(*self
.tl
).bool()))
614 bsa
= Signal(self
.width
, reset_less
=True)
615 bsb
= Signal(self
.width
, reset_less
=True)
616 a_index
, b_index
= self
.a_index
, self
.b_index
618 m
.d
.comb
+= bsa
.eq(self
.a
.part(a_index
* pwidth
, pwidth
))
619 m
.d
.comb
+= bsb
.eq(self
.b
.part(b_index
* pwidth
, pwidth
))
620 m
.d
.comb
+= self
.ti
.eq(bsa
* bsb
)
621 m
.d
.comb
+= self
.term
.eq(get_term(self
.ti
, self
.shift
, self
.enabled
))
623 #TODO: sort out width issues, get inputs a/b switched on/off.
624 #data going into Muxes is 1/2 the required width
628 bsa = Signal(self.twidth//2, reset_less=True)
629 bsb = Signal(self.twidth//2, reset_less=True)
630 asel = Signal(width, reset_less=True)
631 bsel = Signal(width, reset_less=True)
632 a_index, b_index = self.a_index, self.b_index
633 m.d.comb += asel.eq(self.a.part(a_index * pwidth, pwidth))
634 m.d.comb += bsel.eq(self.b.part(b_index * pwidth, pwidth))
635 m.d.comb += bsa.eq(get_term(asel, self.shift, self.enabled))
636 m.d.comb += bsb.eq(get_term(bsel, self.shift, self.enabled))
637 m.d.comb += self.ti.eq(bsa * bsb)
638 m.d.comb += self.term.eq(self.ti)
644 class ProductTerms(Elaboratable
):
645 """ creates a bank of product terms. also performs the actual bit-selection
646 this class is to be wrapped with a for-loop on the "a" operand.
647 it creates a second-level for-loop on the "b" operand.
649 def __init__(self
, width
, twidth
, pbwid
, a_index
, blen
):
650 self
.a_index
= a_index
655 self
.a
= Signal(twidth
//2, reset_less
=True)
656 self
.b
= Signal(twidth
//2, reset_less
=True)
657 self
.pb_en
= Signal(pbwid
, reset_less
=True)
658 self
.terms
= [Signal(twidth
, name
="term%d"%i, reset_less
=True) \
659 for i
in range(blen
)]
661 def elaborate(self
, platform
):
665 for b_index
in range(self
.blen
):
666 t
= ProductTerm(self
.pwidth
, self
.twidth
, self
.pbwid
,
667 self
.a_index
, b_index
)
668 setattr(m
.submodules
, "term_%d" % b_index
, t
)
670 m
.d
.comb
+= t
.a
.eq(self
.a
)
671 m
.d
.comb
+= t
.b
.eq(self
.b
)
672 m
.d
.comb
+= t
.pb_en
.eq(self
.pb_en
)
674 m
.d
.comb
+= self
.terms
[b_index
].eq(t
.term
)
679 class LSBNegTerm(Elaboratable
):
681 def __init__(self
, bit_width
):
682 self
.bit_width
= bit_width
683 self
.part
= Signal(reset_less
=True)
684 self
.signed
= Signal(reset_less
=True)
685 self
.op
= Signal(bit_width
, reset_less
=True)
686 self
.msb
= Signal(reset_less
=True)
687 self
.nt
= Signal(bit_width
*2, reset_less
=True)
688 self
.nl
= Signal(bit_width
*2, reset_less
=True)
690 def elaborate(self
, platform
):
693 bit_wid
= self
.bit_width
694 ext
= Repl(0, bit_wid
) # extend output to HI part
696 # determine sign of each incoming number *in this partition*
697 enabled
= Signal(reset_less
=True)
698 m
.d
.comb
+= enabled
.eq(self
.part
& self
.msb
& self
.signed
)
700 # for 8-bit values: form a * 0xFF00 by using -a * 0x100, the
701 # negation operation is split into a bitwise not and a +1.
702 # likewise for 16, 32, and 64-bit values.
704 # width-extended 1s complement if a is signed, otherwise zero
705 comb
+= self
.nt
.eq(Mux(enabled
, Cat(ext
, ~self
.op
), 0))
707 # add 1 if signed, otherwise add zero
708 comb
+= self
.nl
.eq(Cat(ext
, enabled
, Repl(0, bit_wid
-1)))
713 class Parts(Elaboratable
):
715 def __init__(self
, pbwid
, epps
, n_parts
):
718 self
.epps
= PartitionPoints
.like(epps
, name
="epps") # expanded points
720 self
.parts
= [Signal(name
=f
"part_{i}") for i
in range(n_parts
)]
722 def elaborate(self
, platform
):
725 epps
, parts
= self
.epps
, self
.parts
726 # collect part-bytes (double factor because the input is extended)
727 pbs
= Signal(self
.pbwid
, reset_less
=True)
729 for i
in range(self
.pbwid
):
730 pb
= Signal(name
="pb%d" % i
, reset_less
=True)
731 m
.d
.comb
+= pb
.eq(epps
.part_byte(i
, mfactor
=2)) # double
733 m
.d
.comb
+= pbs
.eq(Cat(*tl
))
735 # negated-temporary copy of partition bits
736 npbs
= Signal
.like(pbs
, reset_less
=True)
737 m
.d
.comb
+= npbs
.eq(~pbs
)
738 byte_count
= 8 // len(parts
)
739 for i
in range(len(parts
)):
741 pbl
.append(npbs
[i
* byte_count
- 1])
742 for j
in range(i
* byte_count
, (i
+ 1) * byte_count
- 1):
744 pbl
.append(npbs
[(i
+ 1) * byte_count
- 1])
745 value
= Signal(len(pbl
), name
="value_%d" % i
, reset_less
=True)
746 m
.d
.comb
+= value
.eq(Cat(*pbl
))
747 m
.d
.comb
+= parts
[i
].eq(~
(value
).bool())
752 class Part(Elaboratable
):
753 """ a key class which, depending on the partitioning, will determine
754 what action to take when parts of the output are signed or unsigned.
756 this requires 2 pieces of data *per operand, per partition*:
757 whether the MSB is HI/LO (per partition!), and whether a signed
758 or unsigned operation has been *requested*.
760 once that is determined, signed is basically carried out
761 by splitting 2's complement into 1's complement plus one.
762 1's complement is just a bit-inversion.
764 the extra terms - as separate terms - are then thrown at the
765 AddReduce alongside the multiplication part-results.
767 def __init__(self
, epps
, width
, n_parts
, n_levels
, pbwid
):
775 self
.a_signed
= [Signal(name
=f
"a_signed_{i}") for i
in range(8)]
776 self
.b_signed
= [Signal(name
=f
"_b_signed_{i}") for i
in range(8)]
777 self
.pbs
= Signal(pbwid
, reset_less
=True)
780 self
.parts
= [Signal(name
=f
"part_{i}") for i
in range(n_parts
)]
781 self
.delayed_parts
= [
782 [Signal(name
=f
"delayed_part_{delay}_{i}")
783 for i
in range(n_parts
)]
784 for delay
in range(n_levels
)]
785 # XXX REALLY WEIRD BUG - have to take a copy of the last delayed_parts
786 self
.dplast
= [Signal(name
=f
"dplast_{i}")
787 for i
in range(n_parts
)]
789 self
.not_a_term
= Signal(width
)
790 self
.neg_lsb_a_term
= Signal(width
)
791 self
.not_b_term
= Signal(width
)
792 self
.neg_lsb_b_term
= Signal(width
)
794 def elaborate(self
, platform
):
797 pbs
, parts
, delayed_parts
= self
.pbs
, self
.parts
, self
.delayed_parts
799 m
.submodules
.p
= p
= Parts(self
.pbwid
, epps
, len(parts
))
800 m
.d
.comb
+= p
.epps
.eq(epps
)
803 npbs
= Signal
.like(pbs
, reset_less
=True)
804 byte_count
= 8 // len(parts
)
805 for i
in range(len(parts
)):
806 m
.d
.comb
+= delayed_parts
[0][i
].eq(parts
[i
])
807 m
.d
.sync
+= [delayed_parts
[j
+ 1][i
].eq(delayed_parts
[j
][i
])
808 for j
in range(len(delayed_parts
)-1)]
809 m
.d
.comb
+= self
.dplast
[i
].eq(delayed_parts
[-1][i
])
811 not_a_term
, neg_lsb_a_term
, not_b_term
, neg_lsb_b_term
= \
812 self
.not_a_term
, self
.neg_lsb_a_term
, \
813 self
.not_b_term
, self
.neg_lsb_b_term
815 byte_width
= 8 // len(parts
) # byte width
816 bit_wid
= 8 * byte_width
# bit width
817 nat
, nbt
, nla
, nlb
= [], [], [], []
818 for i
in range(len(parts
)):
819 # work out bit-inverted and +1 term for a.
820 pa
= LSBNegTerm(bit_wid
)
821 setattr(m
.submodules
, "lnt_%d_a_%d" % (bit_wid
, i
), pa
)
822 m
.d
.comb
+= pa
.part
.eq(parts
[i
])
823 m
.d
.comb
+= pa
.op
.eq(self
.a
.part(bit_wid
* i
, bit_wid
))
824 m
.d
.comb
+= pa
.signed
.eq(self
.b_signed
[i
* byte_width
]) # yes b
825 m
.d
.comb
+= pa
.msb
.eq(self
.b
[(i
+ 1) * bit_wid
- 1]) # really, b
829 # work out bit-inverted and +1 term for b
830 pb
= LSBNegTerm(bit_wid
)
831 setattr(m
.submodules
, "lnt_%d_b_%d" % (bit_wid
, i
), pb
)
832 m
.d
.comb
+= pb
.part
.eq(parts
[i
])
833 m
.d
.comb
+= pb
.op
.eq(self
.b
.part(bit_wid
* i
, bit_wid
))
834 m
.d
.comb
+= pb
.signed
.eq(self
.a_signed
[i
* byte_width
]) # yes a
835 m
.d
.comb
+= pb
.msb
.eq(self
.a
[(i
+ 1) * bit_wid
- 1]) # really, a
839 # concatenate together and return all 4 results.
840 m
.d
.comb
+= [not_a_term
.eq(Cat(*nat
)),
841 not_b_term
.eq(Cat(*nbt
)),
842 neg_lsb_a_term
.eq(Cat(*nla
)),
843 neg_lsb_b_term
.eq(Cat(*nlb
)),
849 class IntermediateOut(Elaboratable
):
850 """ selects the HI/LO part of the multiplication, for a given bit-width
851 the output is also reconstructed in its SIMD (partition) lanes.
853 def __init__(self
, width
, out_wid
, n_parts
):
855 self
.n_parts
= n_parts
856 self
.part_ops
= [Signal(2, name
="dpop%d" % i
, reset_less
=True)
858 self
.intermed
= Signal(out_wid
, reset_less
=True)
859 self
.output
= Signal(out_wid
//2, reset_less
=True)
861 def elaborate(self
, platform
):
867 for i
in range(self
.n_parts
):
868 op
= Signal(w
, reset_less
=True, name
="op%d_%d" % (w
, i
))
870 Mux(self
.part_ops
[sel
* i
] == OP_MUL_LOW
,
871 self
.intermed
.part(i
* w
*2, w
),
872 self
.intermed
.part(i
* w
*2 + w
, w
)))
874 m
.d
.comb
+= self
.output
.eq(Cat(*ol
))
879 class FinalOut(Elaboratable
):
880 """ selects the final output based on the partitioning.
882 each byte is selectable independently, i.e. it is possible
883 that some partitions requested 8-bit computation whilst others
884 requested 16 or 32 bit.
886 def __init__(self
, out_wid
):
888 self
.d8
= [Signal(name
=f
"d8_{i}", reset_less
=True) for i
in range(8)]
889 self
.d16
= [Signal(name
=f
"d16_{i}", reset_less
=True) for i
in range(4)]
890 self
.d32
= [Signal(name
=f
"d32_{i}", reset_less
=True) for i
in range(2)]
892 self
.i8
= Signal(out_wid
, reset_less
=True)
893 self
.i16
= Signal(out_wid
, reset_less
=True)
894 self
.i32
= Signal(out_wid
, reset_less
=True)
895 self
.i64
= Signal(out_wid
, reset_less
=True)
898 self
.out
= Signal(out_wid
, reset_less
=True)
900 def elaborate(self
, platform
):
904 # select one of the outputs: d8 selects i8, d16 selects i16
905 # d32 selects i32, and the default is i64.
906 # d8 and d16 are ORed together in the first Mux
907 # then the 2nd selects either i8 or i16.
908 # if neither d8 nor d16 are set, d32 selects either i32 or i64.
909 op
= Signal(8, reset_less
=True, name
="op_%d" % i
)
911 Mux(self
.d8
[i
] | self
.d16
[i
// 2],
912 Mux(self
.d8
[i
], self
.i8
.part(i
* 8, 8),
913 self
.i16
.part(i
* 8, 8)),
914 Mux(self
.d32
[i
// 4], self
.i32
.part(i
* 8, 8),
915 self
.i64
.part(i
* 8, 8))))
917 m
.d
.comb
+= self
.out
.eq(Cat(*ol
))
921 class OrMod(Elaboratable
):
922 """ ORs four values together in a hierarchical tree
924 def __init__(self
, wid
):
926 self
.orin
= [Signal(wid
, name
="orin%d" % i
, reset_less
=True)
928 self
.orout
= Signal(wid
, reset_less
=True)
930 def elaborate(self
, platform
):
932 or1
= Signal(self
.wid
, reset_less
=True)
933 or2
= Signal(self
.wid
, reset_less
=True)
934 m
.d
.comb
+= or1
.eq(self
.orin
[0] | self
.orin
[1])
935 m
.d
.comb
+= or2
.eq(self
.orin
[2] | self
.orin
[3])
936 m
.d
.comb
+= self
.orout
.eq(or1 | or2
)
941 class Signs(Elaboratable
):
942 """ determines whether a or b are signed numbers
943 based on the required operation type (OP_MUL_*)
947 self
.part_ops
= Signal(2, reset_less
=True)
948 self
.a_signed
= Signal(reset_less
=True)
949 self
.b_signed
= Signal(reset_less
=True)
951 def elaborate(self
, platform
):
955 asig
= self
.part_ops
!= OP_MUL_UNSIGNED_HIGH
956 bsig
= (self
.part_ops
== OP_MUL_LOW
) \
957 |
(self
.part_ops
== OP_MUL_SIGNED_HIGH
)
958 m
.d
.comb
+= self
.a_signed
.eq(asig
)
959 m
.d
.comb
+= self
.b_signed
.eq(bsig
)
964 class Mul8_16_32_64(Elaboratable
):
965 """Signed/Unsigned 8/16/32/64-bit partitioned integer multiplier.
967 Supports partitioning into any combination of 8, 16, 32, and 64-bit
968 partitions on naturally-aligned boundaries. Supports the operation being
969 set for each partition independently.
971 :attribute part_pts: the input partition points. Has a partition point at
972 multiples of 8 in 0 < i < 64. Each partition point's associated
973 ``Value`` is a ``Signal``. Modification not supported, except for by
975 :attribute part_ops: the operation for each byte. The operation for a
976 particular partition is selected by assigning the selected operation
977 code to each byte in the partition. The allowed operation codes are:
979 :attribute OP_MUL_LOW: the LSB half of the product. Equivalent to
980 RISC-V's `mul` instruction.
981 :attribute OP_MUL_SIGNED_HIGH: the MSB half of the product where both
982 ``a`` and ``b`` are signed. Equivalent to RISC-V's `mulh`
984 :attribute OP_MUL_SIGNED_UNSIGNED_HIGH: the MSB half of the product
985 where ``a`` is signed and ``b`` is unsigned. Equivalent to RISC-V's
986 `mulhsu` instruction.
987 :attribute OP_MUL_UNSIGNED_HIGH: the MSB half of the product where both
988 ``a`` and ``b`` are unsigned. Equivalent to RISC-V's `mulhu`
992 def __init__(self
, register_levels
=()):
993 """ register_levels: specifies the points in the cascade at which
994 flip-flops are to be inserted.
998 self
.register_levels
= list(register_levels
)
1001 self
.part_pts
= PartitionPoints()
1002 for i
in range(8, 64, 8):
1003 self
.part_pts
[i
] = Signal(name
=f
"part_pts_{i}")
1004 self
.part_ops
= [Signal(2, name
=f
"part_ops_{i}") for i
in range(8)]
1008 # intermediates (needed for unit tests)
1009 self
._intermediate
_output
= Signal(128)
1012 self
.output
= Signal(64)
1014 def elaborate(self
, platform
):
1017 # collect part-bytes
1018 pbs
= Signal(8, reset_less
=True)
1021 pb
= Signal(name
="pb%d" % i
, reset_less
=True)
1022 m
.d
.comb
+= pb
.eq(self
.part_pts
.part_byte(i
))
1024 m
.d
.comb
+= pbs
.eq(Cat(*tl
))
1026 # create (doubled) PartitionPoints (output is double input width)
1027 expanded_part_pts
= eps
= PartitionPoints()
1028 for i
, v
in self
.part_pts
.items():
1029 ep
= Signal(name
=f
"expanded_part_pts_{i*2}", reset_less
=True)
1030 expanded_part_pts
[i
* 2] = ep
1031 m
.d
.comb
+= ep
.eq(v
)
1038 setattr(m
.submodules
, "signs%d" % i
, s
)
1039 m
.d
.comb
+= s
.part_ops
.eq(self
.part_ops
[i
])
1041 n_levels
= len(self
.register_levels
)+1
1042 m
.submodules
.part_8
= part_8
= Part(eps
, 128, 8, n_levels
, 8)
1043 m
.submodules
.part_16
= part_16
= Part(eps
, 128, 4, n_levels
, 8)
1044 m
.submodules
.part_32
= part_32
= Part(eps
, 128, 2, n_levels
, 8)
1045 m
.submodules
.part_64
= part_64
= Part(eps
, 128, 1, n_levels
, 8)
1046 nat_l
, nbt_l
, nla_l
, nlb_l
= [], [], [], []
1047 for mod
in [part_8
, part_16
, part_32
, part_64
]:
1048 m
.d
.comb
+= mod
.a
.eq(self
.a
)
1049 m
.d
.comb
+= mod
.b
.eq(self
.b
)
1050 for i
in range(len(signs
)):
1051 m
.d
.comb
+= mod
.a_signed
[i
].eq(signs
[i
].a_signed
)
1052 m
.d
.comb
+= mod
.b_signed
[i
].eq(signs
[i
].b_signed
)
1053 m
.d
.comb
+= mod
.pbs
.eq(pbs
)
1054 nat_l
.append(mod
.not_a_term
)
1055 nbt_l
.append(mod
.not_b_term
)
1056 nla_l
.append(mod
.neg_lsb_a_term
)
1057 nlb_l
.append(mod
.neg_lsb_b_term
)
1061 for a_index
in range(8):
1062 t
= ProductTerms(8, 128, 8, a_index
, 8)
1063 setattr(m
.submodules
, "terms_%d" % a_index
, t
)
1065 m
.d
.comb
+= t
.a
.eq(self
.a
)
1066 m
.d
.comb
+= t
.b
.eq(self
.b
)
1067 m
.d
.comb
+= t
.pb_en
.eq(pbs
)
1069 for term
in t
.terms
:
1072 # it's fine to bitwise-or data together since they are never enabled
1074 m
.submodules
.nat_or
= nat_or
= OrMod(128)
1075 m
.submodules
.nbt_or
= nbt_or
= OrMod(128)
1076 m
.submodules
.nla_or
= nla_or
= OrMod(128)
1077 m
.submodules
.nlb_or
= nlb_or
= OrMod(128)
1078 for l
, mod
in [(nat_l
, nat_or
),
1082 for i
in range(len(l
)):
1083 m
.d
.comb
+= mod
.orin
[i
].eq(l
[i
])
1084 terms
.append(mod
.orout
)
1086 add_reduce
= AddReduce(terms
,
1088 self
.register_levels
,
1092 out_part_ops
= add_reduce
.levels
[-1].out_part_ops
1093 out_part_pts
= add_reduce
.levels
[-1]._reg
_partition
_points
1095 m
.submodules
.add_reduce
= add_reduce
1096 m
.d
.comb
+= self
._intermediate
_output
.eq(add_reduce
.output
)
1098 m
.submodules
.io64
= io64
= IntermediateOut(64, 128, 1)
1099 m
.d
.comb
+= io64
.intermed
.eq(self
._intermediate
_output
)
1101 m
.d
.comb
+= io64
.part_ops
[i
].eq(out_part_ops
[i
])
1104 m
.submodules
.io32
= io32
= IntermediateOut(32, 128, 2)
1105 m
.d
.comb
+= io32
.intermed
.eq(self
._intermediate
_output
)
1107 m
.d
.comb
+= io32
.part_ops
[i
].eq(out_part_ops
[i
])
1110 m
.submodules
.io16
= io16
= IntermediateOut(16, 128, 4)
1111 m
.d
.comb
+= io16
.intermed
.eq(self
._intermediate
_output
)
1113 m
.d
.comb
+= io16
.part_ops
[i
].eq(out_part_ops
[i
])
1116 m
.submodules
.io8
= io8
= IntermediateOut(8, 128, 8)
1117 m
.d
.comb
+= io8
.intermed
.eq(self
._intermediate
_output
)
1119 m
.d
.comb
+= io8
.part_ops
[i
].eq(out_part_ops
[i
])
1121 m
.submodules
.p_8
= p_8
= Parts(8, eps
, len(part_8
.parts
))
1122 m
.submodules
.p_16
= p_16
= Parts(8, eps
, len(part_16
.parts
))
1123 m
.submodules
.p_32
= p_32
= Parts(8, eps
, len(part_32
.parts
))
1124 m
.submodules
.p_64
= p_64
= Parts(8, eps
, len(part_64
.parts
))
1126 m
.d
.comb
+= p_8
.epps
.eq(out_part_pts
)
1127 m
.d
.comb
+= p_16
.epps
.eq(out_part_pts
)
1128 m
.d
.comb
+= p_32
.epps
.eq(out_part_pts
)
1129 m
.d
.comb
+= p_64
.epps
.eq(out_part_pts
)
1132 m
.submodules
.finalout
= finalout
= FinalOut(64)
1133 for i
in range(len(part_8
.parts
)):
1134 m
.d
.comb
+= finalout
.d8
[i
].eq(p_8
.parts
[i
])
1135 for i
in range(len(part_16
.parts
)):
1136 m
.d
.comb
+= finalout
.d16
[i
].eq(p_16
.parts
[i
])
1137 for i
in range(len(part_32
.parts
)):
1138 m
.d
.comb
+= finalout
.d32
[i
].eq(p_32
.parts
[i
])
1139 m
.d
.comb
+= finalout
.i8
.eq(io8
.output
)
1140 m
.d
.comb
+= finalout
.i16
.eq(io16
.output
)
1141 m
.d
.comb
+= finalout
.i32
.eq(io32
.output
)
1142 m
.d
.comb
+= finalout
.i64
.eq(io64
.output
)
1143 m
.d
.comb
+= self
.output
.eq(finalout
.out
)
1148 if __name__
== "__main__":
1152 m
._intermediate
_output
,
1155 *m
.part_pts
.values()])