1 # SPDX-License-Identifier: LGPL-2.1-or-later
2 # See Notices.txt for copyright information
3 """Integer Multiplication."""
5 from nmigen
import Signal
, Module
, Value
, Elaboratable
, Cat
, C
, Mux
, Repl
6 from nmigen
.hdl
.ast
import Assign
7 from abc
import ABCMeta
, abstractmethod
8 from nmigen
.cli
import main
9 from functools
import reduce
10 from operator
import or_
13 class PartitionPoints(dict):
14 """Partition points and corresponding ``Value``s.
16 The points at where an ALU is partitioned along with ``Value``s that
17 specify if the corresponding partition points are enabled.
19 For example: ``{1: True, 5: True, 10: True}`` with
20 ``width == 16`` specifies that the ALU is split into 4 sections:
23 * bits 5 <= ``i`` < 10
24 * bits 10 <= ``i`` < 16
26 If the partition_points were instead ``{1: True, 5: a, 10: True}``
27 where ``a`` is a 1-bit ``Signal``:
28 * If ``a`` is asserted:
31 * bits 5 <= ``i`` < 10
32 * bits 10 <= ``i`` < 16
35 * bits 1 <= ``i`` < 10
36 * bits 10 <= ``i`` < 16
39 def __init__(self
, partition_points
=None):
40 """Create a new ``PartitionPoints``.
42 :param partition_points: the input partition points to values mapping.
45 if partition_points
is not None:
46 for point
, enabled
in partition_points
.items():
47 if not isinstance(point
, int):
48 raise TypeError("point must be a non-negative integer")
50 raise ValueError("point must be a non-negative integer")
51 self
[point
] = Value
.wrap(enabled
)
53 def like(self
, name
=None, src_loc_at
=0, mul
=1):
54 """Create a new ``PartitionPoints`` with ``Signal``s for all values.
56 :param name: the base name for the new ``Signal``s.
57 :param mul: a multiplication factor on the indices
60 name
= Signal(src_loc_at
=1+src_loc_at
).name
# get variable name
61 retval
= PartitionPoints()
62 for point
, enabled
in self
.items():
64 retval
[point
] = Signal(enabled
.shape(), name
=f
"{name}_{point}")
68 """Assign ``PartitionPoints`` using ``Signal.eq``."""
69 if set(self
.keys()) != set(rhs
.keys()):
70 raise ValueError("incompatible point set")
71 for point
, enabled
in self
.items():
72 yield enabled
.eq(rhs
[point
])
74 def as_mask(self
, width
):
75 """Create a bit-mask from `self`.
77 Each bit in the returned mask is clear only if the partition point at
78 the same bit-index is enabled.
80 :param width: the bit width of the resulting mask
83 for i
in range(width
):
90 def get_max_partition_count(self
, width
):
91 """Get the maximum number of partitions.
93 Gets the number of partitions when all partition points are enabled.
96 for point
in self
.keys():
101 def fits_in_width(self
, width
):
102 """Check if all partition points are smaller than `width`."""
103 for point
in self
.keys():
108 def part_byte(self
, index
, mfactor
=1): # mfactor used for "expanding"
109 if index
== -1 or index
== 7:
111 assert index
>= 0 and index
< 8
112 return self
[(index
* 8 + 8)*mfactor
]
115 class FullAdder(Elaboratable
):
118 :attribute in0: the first input
119 :attribute in1: the second input
120 :attribute in2: the third input
121 :attribute sum: the sum output
122 :attribute carry: the carry output
124 Rather than do individual full adders (and have an array of them,
125 which would be very slow to simulate), this module can specify the
126 bit width of the inputs and outputs: in effect it performs multiple
127 Full 3-2 Add operations "in parallel".
130 def __init__(self
, width
):
131 """Create a ``FullAdder``.
133 :param width: the bit width of the input and output
135 self
.in0
= Signal(width
)
136 self
.in1
= Signal(width
)
137 self
.in2
= Signal(width
)
138 self
.sum = Signal(width
)
139 self
.carry
= Signal(width
)
141 def elaborate(self
, platform
):
142 """Elaborate this module."""
144 m
.d
.comb
+= self
.sum.eq(self
.in0 ^ self
.in1 ^ self
.in2
)
145 m
.d
.comb
+= self
.carry
.eq((self
.in0
& self
.in1
)
146 |
(self
.in1
& self
.in2
)
147 |
(self
.in2
& self
.in0
))
151 class MaskedFullAdder(Elaboratable
):
152 """Masked Full Adder.
154 :attribute mask: the carry partition mask
155 :attribute in0: the first input
156 :attribute in1: the second input
157 :attribute in2: the third input
158 :attribute sum: the sum output
159 :attribute mcarry: the masked carry output
161 FullAdders are always used with a "mask" on the output. To keep
162 the graphviz "clean", this class performs the masking here rather
163 than inside a large for-loop.
165 See the following discussion as to why this is no longer derived
166 from FullAdder. Each carry is shifted here *before* being ANDed
167 with the mask, so that an AOI cell may be used (which is more
169 https://en.wikipedia.org/wiki/AND-OR-Invert
170 https://groups.google.com/d/msg/comp.arch/fcq-GLQqvas/vTxmcA0QAgAJ
173 def __init__(self
, width
):
174 """Create a ``MaskedFullAdder``.
176 :param width: the bit width of the input and output
179 self
.mask
= Signal(width
, reset_less
=True)
180 self
.mcarry
= Signal(width
, reset_less
=True)
181 self
.in0
= Signal(width
, reset_less
=True)
182 self
.in1
= Signal(width
, reset_less
=True)
183 self
.in2
= Signal(width
, reset_less
=True)
184 self
.sum = Signal(width
, reset_less
=True)
186 def elaborate(self
, platform
):
187 """Elaborate this module."""
189 s1
= Signal(self
.width
, reset_less
=True)
190 s2
= Signal(self
.width
, reset_less
=True)
191 s3
= Signal(self
.width
, reset_less
=True)
192 c1
= Signal(self
.width
, reset_less
=True)
193 c2
= Signal(self
.width
, reset_less
=True)
194 c3
= Signal(self
.width
, reset_less
=True)
195 m
.d
.comb
+= self
.sum.eq(self
.in0 ^ self
.in1 ^ self
.in2
)
196 m
.d
.comb
+= s1
.eq(Cat(0, self
.in0
))
197 m
.d
.comb
+= s2
.eq(Cat(0, self
.in1
))
198 m
.d
.comb
+= s3
.eq(Cat(0, self
.in2
))
199 m
.d
.comb
+= c1
.eq(s1
& s2
& self
.mask
)
200 m
.d
.comb
+= c2
.eq(s2
& s3
& self
.mask
)
201 m
.d
.comb
+= c3
.eq(s3
& s1
& self
.mask
)
202 m
.d
.comb
+= self
.mcarry
.eq(c1 | c2 | c3
)
206 class PartitionedAdder(Elaboratable
):
207 """Partitioned Adder.
209 Performs the final add. The partition points are included in the
210 actual add (in one of the operands only), which causes a carry over
211 to the next bit. Then the final output *removes* the extra bits from
214 partition: .... P... P... P... P... (32 bits)
215 a : .... .... .... .... .... (32 bits)
216 b : .... .... .... .... .... (32 bits)
217 exp-a : ....P....P....P....P.... (32+4 bits, P=1 if no partition)
218 exp-b : ....0....0....0....0.... (32 bits plus 4 zeros)
219 exp-o : ....xN...xN...xN...xN... (32+4 bits - x to be discarded)
220 o : .... N... N... N... N... (32 bits - x ignored, N is carry-over)
222 :attribute width: the bit width of the input and output. Read-only.
223 :attribute a: the first input to the adder
224 :attribute b: the second input to the adder
225 :attribute output: the sum output
226 :attribute partition_points: the input partition points. Modification not
227 supported, except for by ``Signal.eq``.
230 def __init__(self
, width
, partition_points
):
231 """Create a ``PartitionedAdder``.
233 :param width: the bit width of the input and output
234 :param partition_points: the input partition points
237 self
.a
= Signal(width
)
238 self
.b
= Signal(width
)
239 self
.output
= Signal(width
)
240 self
.partition_points
= PartitionPoints(partition_points
)
241 if not self
.partition_points
.fits_in_width(width
):
242 raise ValueError("partition_points doesn't fit in width")
244 for i
in range(self
.width
):
245 if i
in self
.partition_points
:
248 self
._expanded
_width
= expanded_width
249 # XXX these have to remain here due to some horrible nmigen
250 # simulation bugs involving sync. it is *not* necessary to
251 # have them here, they should (under normal circumstances)
252 # be moved into elaborate, as they are entirely local
253 self
._expanded
_a
= Signal(expanded_width
) # includes extra part-points
254 self
._expanded
_b
= Signal(expanded_width
) # likewise.
255 self
._expanded
_o
= Signal(expanded_width
) # likewise.
257 def elaborate(self
, platform
):
258 """Elaborate this module."""
261 # store bits in a list, use Cat later. graphviz is much cleaner
262 al
, bl
, ol
, ea
, eb
, eo
= [],[],[],[],[],[]
264 # partition points are "breaks" (extra zeros or 1s) in what would
265 # otherwise be a massive long add. when the "break" points are 0,
266 # whatever is in it (in the output) is discarded. however when
267 # there is a "1", it causes a roll-over carry to the *next* bit.
268 # we still ignore the "break" bit in the [intermediate] output,
269 # however by that time we've got the effect that we wanted: the
270 # carry has been carried *over* the break point.
272 for i
in range(self
.width
):
273 if i
in self
.partition_points
:
274 # add extra bit set to 0 + 0 for enabled partition points
275 # and 1 + 0 for disabled partition points
276 ea
.append(self
._expanded
_a
[expanded_index
])
277 al
.append(~self
.partition_points
[i
]) # add extra bit in a
278 eb
.append(self
._expanded
_b
[expanded_index
])
279 bl
.append(C(0)) # yes, add a zero
280 expanded_index
+= 1 # skip the extra point. NOT in the output
281 ea
.append(self
._expanded
_a
[expanded_index
])
282 eb
.append(self
._expanded
_b
[expanded_index
])
283 eo
.append(self
._expanded
_o
[expanded_index
])
286 ol
.append(self
.output
[i
])
289 # combine above using Cat
290 m
.d
.comb
+= Cat(*ea
).eq(Cat(*al
))
291 m
.d
.comb
+= Cat(*eb
).eq(Cat(*bl
))
292 m
.d
.comb
+= Cat(*ol
).eq(Cat(*eo
))
294 # use only one addition to take advantage of look-ahead carry and
295 # special hardware on FPGAs
296 m
.d
.comb
+= self
._expanded
_o
.eq(
297 self
._expanded
_a
+ self
._expanded
_b
)
301 FULL_ADDER_INPUT_COUNT
= 3
305 def __init__(self
, ppoints
, n_inputs
, output_width
, n_parts
):
306 self
.part_ops
= [Signal(2, name
=f
"part_ops_{i}")
307 for i
in range(n_parts
)]
308 self
.inputs
= [Signal(output_width
, name
=f
"inputs[{i}]")
309 for i
in range(n_inputs
)]
310 self
.reg_partition_points
= ppoints
.like()
313 return [self
.reg_partition_points
.eq(rhs
.reg_partition_points
)] + \
314 [self
.inputs
[i
].eq(rhs
.inputs
[i
])
315 for i
in range(len(self
.inputs
))] + \
316 [self
.part_ops
[i
].eq(rhs
.part_ops
[i
])
317 for i
in range(len(self
.part_ops
))]
320 class FinalAdd(Elaboratable
):
321 """ Final stage of add reduce
324 def __init__(self
, n_inputs
, output_width
, n_parts
, register_levels
,
326 self
.i
= AddReduceData(partition_points
, n_inputs
,
327 output_width
, n_parts
)
328 self
.n_inputs
= n_inputs
329 self
.n_parts
= n_parts
330 self
._resized
_inputs
= self
.i
.inputs
331 self
.register_levels
= list(register_levels
)
332 self
.output
= Signal(output_width
)
333 self
.partition_points
= PartitionPoints(partition_points
)
334 if not self
.partition_points
.fits_in_width(output_width
):
335 raise ValueError("partition_points doesn't fit in output_width")
336 self
.intermediate_terms
= []
338 def elaborate(self
, platform
):
339 """Elaborate this module."""
342 if self
.n_inputs
== 0:
343 # use 0 as the default output value
344 m
.d
.comb
+= self
.output
.eq(0)
345 elif self
.n_inputs
== 1:
346 # handle single input
347 m
.d
.comb
+= self
.output
.eq(self
._resized
_inputs
[0])
349 # base case for adding 2 inputs
350 assert self
.n_inputs
== 2
351 adder
= PartitionedAdder(len(self
.output
),
352 self
.i
.reg_partition_points
)
353 m
.submodules
.final_adder
= adder
354 m
.d
.comb
+= adder
.a
.eq(self
._resized
_inputs
[0])
355 m
.d
.comb
+= adder
.b
.eq(self
._resized
_inputs
[1])
356 m
.d
.comb
+= self
.output
.eq(adder
.output
)
360 class AddReduceSingle(Elaboratable
):
361 """Add list of numbers together.
363 :attribute inputs: input ``Signal``s to be summed. Modification not
364 supported, except for by ``Signal.eq``.
365 :attribute register_levels: List of nesting levels that should have
367 :attribute output: output sum.
368 :attribute partition_points: the input partition points. Modification not
369 supported, except for by ``Signal.eq``.
372 def __init__(self
, n_inputs
, output_width
, n_parts
, register_levels
,
374 """Create an ``AddReduce``.
376 :param inputs: input ``Signal``s to be summed.
377 :param output_width: bit-width of ``output``.
378 :param register_levels: List of nesting levels that should have
380 :param partition_points: the input partition points.
382 self
.n_inputs
= n_inputs
383 self
.n_parts
= n_parts
384 self
.output_width
= output_width
385 self
.i
= AddReduceData(partition_points
, n_inputs
,
386 output_width
, n_parts
)
387 self
._resized
_inputs
= self
.i
.inputs
388 self
.register_levels
= list(register_levels
)
389 self
.partition_points
= PartitionPoints(partition_points
)
390 if not self
.partition_points
.fits_in_width(output_width
):
391 raise ValueError("partition_points doesn't fit in output_width")
393 max_level
= AddReduceSingle
.get_max_level(n_inputs
)
394 for level
in self
.register_levels
:
395 if level
> max_level
:
397 "not enough adder levels for specified register levels")
399 # this is annoying. we have to create the modules (and terms)
400 # because we need to know what they are (in order to set up the
401 # interconnects back in AddReduce), but cannot do the m.d.comb +=
402 # etc because this is not in elaboratable.
403 self
.groups
= AddReduceSingle
.full_adder_groups(n_inputs
)
404 self
._intermediate
_terms
= []
405 if len(self
.groups
) != 0:
406 self
.create_next_terms()
409 def get_max_level(input_count
):
410 """Get the maximum level.
412 All ``register_levels`` must be less than or equal to the maximum
417 groups
= AddReduceSingle
.full_adder_groups(input_count
)
420 input_count
%= FULL_ADDER_INPUT_COUNT
421 input_count
+= 2 * len(groups
)
425 def full_adder_groups(input_count
):
426 """Get ``inputs`` indices for which a full adder should be built."""
428 input_count
- FULL_ADDER_INPUT_COUNT
+ 1,
429 FULL_ADDER_INPUT_COUNT
)
431 def elaborate(self
, platform
):
432 """Elaborate this module."""
435 for (value
, term
) in self
._intermediate
_terms
:
436 m
.d
.comb
+= term
.eq(value
)
438 mask
= self
.i
.reg_partition_points
.as_mask(self
.output_width
)
439 m
.d
.comb
+= self
.part_mask
.eq(mask
)
441 # add and link the intermediate term modules
442 for i
, (iidx
, adder_i
) in enumerate(self
.adders
):
443 setattr(m
.submodules
, f
"adder_{i}", adder_i
)
445 m
.d
.comb
+= adder_i
.in0
.eq(self
._resized
_inputs
[iidx
])
446 m
.d
.comb
+= adder_i
.in1
.eq(self
._resized
_inputs
[iidx
+ 1])
447 m
.d
.comb
+= adder_i
.in2
.eq(self
._resized
_inputs
[iidx
+ 2])
448 m
.d
.comb
+= adder_i
.mask
.eq(self
.part_mask
)
452 def create_next_terms(self
):
454 # go on to prepare recursive case
455 intermediate_terms
= []
456 _intermediate_terms
= []
458 def add_intermediate_term(value
):
459 intermediate_term
= Signal(
461 name
=f
"intermediate_terms[{len(intermediate_terms)}]")
462 _intermediate_terms
.append((value
, intermediate_term
))
463 intermediate_terms
.append(intermediate_term
)
465 # store mask in intermediary (simplifies graph)
466 self
.part_mask
= Signal(self
.output_width
, reset_less
=True)
468 # create full adders for this recursive level.
469 # this shrinks N terms to 2 * (N // 3) plus the remainder
471 for i
in self
.groups
:
472 adder_i
= MaskedFullAdder(self
.output_width
)
473 self
.adders
.append((i
, adder_i
))
474 # add both the sum and the masked-carry to the next level.
475 # 3 inputs have now been reduced to 2...
476 add_intermediate_term(adder_i
.sum)
477 add_intermediate_term(adder_i
.mcarry
)
478 # handle the remaining inputs.
479 if self
.n_inputs
% FULL_ADDER_INPUT_COUNT
== 1:
480 add_intermediate_term(self
._resized
_inputs
[-1])
481 elif self
.n_inputs
% FULL_ADDER_INPUT_COUNT
== 2:
482 # Just pass the terms to the next layer, since we wouldn't gain
483 # anything by using a half adder since there would still be 2 terms
484 # and just passing the terms to the next layer saves gates.
485 add_intermediate_term(self
._resized
_inputs
[-2])
486 add_intermediate_term(self
._resized
_inputs
[-1])
488 assert self
.n_inputs
% FULL_ADDER_INPUT_COUNT
== 0
490 self
.intermediate_terms
= intermediate_terms
491 self
._intermediate
_terms
= _intermediate_terms
494 class AddReduce(Elaboratable
):
495 """Recursively Add list of numbers together.
497 :attribute inputs: input ``Signal``s to be summed. Modification not
498 supported, except for by ``Signal.eq``.
499 :attribute register_levels: List of nesting levels that should have
501 :attribute output: output sum.
502 :attribute partition_points: the input partition points. Modification not
503 supported, except for by ``Signal.eq``.
506 def __init__(self
, inputs
, output_width
, register_levels
, partition_points
,
508 """Create an ``AddReduce``.
510 :param inputs: input ``Signal``s to be summed.
511 :param output_width: bit-width of ``output``.
512 :param register_levels: List of nesting levels that should have
514 :param partition_points: the input partition points.
517 self
.part_ops
= part_ops
518 self
.out_part_ops
= [Signal(2, name
=f
"out_part_ops_{i}")
519 for i
in range(len(part_ops
))]
520 self
.output
= Signal(output_width
)
521 self
.output_width
= output_width
522 self
.register_levels
= register_levels
523 self
.partition_points
= partition_points
528 def get_max_level(input_count
):
529 return AddReduceSingle
.get_max_level(input_count
)
532 def next_register_levels(register_levels
):
533 """``Iterable`` of ``register_levels`` for next recursive level."""
534 for level
in register_levels
:
538 def create_levels(self
):
539 """creates reduction levels"""
542 next_levels
= self
.register_levels
543 partition_points
= self
.partition_points
545 part_ops
= self
.part_ops
546 n_parts
= len(part_ops
)
549 next_level
= AddReduceSingle(ilen
, self
.output_width
, n_parts
,
550 next_levels
, partition_points
)
551 mods
.append(next_level
)
552 next_levels
= list(AddReduce
.next_register_levels(next_levels
))
553 partition_points
= next_level
.i
.reg_partition_points
554 inputs
= next_level
.intermediate_terms
556 part_ops
= next_level
.i
.part_ops
557 groups
= AddReduceSingle
.full_adder_groups(len(inputs
))
562 next_level
= FinalAdd(ilen
, self
.output_width
, n_parts
,
563 next_levels
, partition_points
)
564 mods
.append(next_level
)
568 def elaborate(self
, platform
):
569 """Elaborate this module."""
572 for i
, next_level
in enumerate(self
.levels
):
573 setattr(m
.submodules
, "next_level%d" % i
, next_level
)
575 partition_points
= self
.partition_points
577 part_ops
= self
.part_ops
578 for i
in range(len(self
.levels
)):
579 mcur
= self
.levels
[i
]
580 inassign
= [mcur
._resized
_inputs
[i
].eq(inputs
[i
])
581 for i
in range(len(inputs
))]
582 copy_part_ops
= [mcur
.i
.part_ops
[i
].eq(part_ops
[i
])
583 for i
in range(len(part_ops
))]
584 if 0 in mcur
.register_levels
:
585 m
.d
.sync
+= copy_part_ops
587 m
.d
.sync
+= mcur
.i
.reg_partition_points
.eq(partition_points
)
589 m
.d
.comb
+= copy_part_ops
591 m
.d
.comb
+= mcur
.i
.reg_partition_points
.eq(partition_points
)
592 partition_points
= mcur
.i
.reg_partition_points
593 inputs
= mcur
.intermediate_terms
594 part_ops
= mcur
.i
.part_ops
596 # output comes from last module
597 m
.d
.comb
+= self
.output
.eq(next_level
.output
)
598 copy_part_ops
= [self
.out_part_ops
[i
].eq(next_level
.i
.part_ops
[i
])
599 for i
in range(len(self
.part_ops
))]
600 m
.d
.comb
+= copy_part_ops
606 OP_MUL_SIGNED_HIGH
= 1
607 OP_MUL_SIGNED_UNSIGNED_HIGH
= 2 # a is signed, b is unsigned
608 OP_MUL_UNSIGNED_HIGH
= 3
611 def get_term(value
, shift
=0, enabled
=None):
612 if enabled
is not None:
613 value
= Mux(enabled
, value
, 0)
615 value
= Cat(Repl(C(0, 1), shift
), value
)
621 class ProductTerm(Elaboratable
):
622 """ this class creates a single product term (a[..]*b[..]).
623 it has a design flaw in that is the *output* that is selected,
624 where the multiplication(s) are combinatorially generated
628 def __init__(self
, width
, twidth
, pbwid
, a_index
, b_index
):
629 self
.a_index
= a_index
630 self
.b_index
= b_index
631 shift
= 8 * (self
.a_index
+ self
.b_index
)
637 self
.ti
= Signal(self
.width
, reset_less
=True)
638 self
.term
= Signal(twidth
, reset_less
=True)
639 self
.a
= Signal(twidth
//2, reset_less
=True)
640 self
.b
= Signal(twidth
//2, reset_less
=True)
641 self
.pb_en
= Signal(pbwid
, reset_less
=True)
644 min_index
= min(self
.a_index
, self
.b_index
)
645 max_index
= max(self
.a_index
, self
.b_index
)
646 for i
in range(min_index
, max_index
):
647 tl
.append(self
.pb_en
[i
])
648 name
= "te_%d_%d" % (self
.a_index
, self
.b_index
)
650 term_enabled
= Signal(name
=name
, reset_less
=True)
653 self
.enabled
= term_enabled
654 self
.term
.name
= "term_%d_%d" % (a_index
, b_index
) # rename
656 def elaborate(self
, platform
):
659 if self
.enabled
is not None:
660 m
.d
.comb
+= self
.enabled
.eq(~
(Cat(*self
.tl
).bool()))
662 bsa
= Signal(self
.width
, reset_less
=True)
663 bsb
= Signal(self
.width
, reset_less
=True)
664 a_index
, b_index
= self
.a_index
, self
.b_index
666 m
.d
.comb
+= bsa
.eq(self
.a
.part(a_index
* pwidth
, pwidth
))
667 m
.d
.comb
+= bsb
.eq(self
.b
.part(b_index
* pwidth
, pwidth
))
668 m
.d
.comb
+= self
.ti
.eq(bsa
* bsb
)
669 m
.d
.comb
+= self
.term
.eq(get_term(self
.ti
, self
.shift
, self
.enabled
))
671 #TODO: sort out width issues, get inputs a/b switched on/off.
672 #data going into Muxes is 1/2 the required width
676 bsa = Signal(self.twidth//2, reset_less=True)
677 bsb = Signal(self.twidth//2, reset_less=True)
678 asel = Signal(width, reset_less=True)
679 bsel = Signal(width, reset_less=True)
680 a_index, b_index = self.a_index, self.b_index
681 m.d.comb += asel.eq(self.a.part(a_index * pwidth, pwidth))
682 m.d.comb += bsel.eq(self.b.part(b_index * pwidth, pwidth))
683 m.d.comb += bsa.eq(get_term(asel, self.shift, self.enabled))
684 m.d.comb += bsb.eq(get_term(bsel, self.shift, self.enabled))
685 m.d.comb += self.ti.eq(bsa * bsb)
686 m.d.comb += self.term.eq(self.ti)
692 class ProductTerms(Elaboratable
):
693 """ creates a bank of product terms. also performs the actual bit-selection
694 this class is to be wrapped with a for-loop on the "a" operand.
695 it creates a second-level for-loop on the "b" operand.
697 def __init__(self
, width
, twidth
, pbwid
, a_index
, blen
):
698 self
.a_index
= a_index
703 self
.a
= Signal(twidth
//2, reset_less
=True)
704 self
.b
= Signal(twidth
//2, reset_less
=True)
705 self
.pb_en
= Signal(pbwid
, reset_less
=True)
706 self
.terms
= [Signal(twidth
, name
="term%d"%i, reset_less
=True) \
707 for i
in range(blen
)]
709 def elaborate(self
, platform
):
713 for b_index
in range(self
.blen
):
714 t
= ProductTerm(self
.pwidth
, self
.twidth
, self
.pbwid
,
715 self
.a_index
, b_index
)
716 setattr(m
.submodules
, "term_%d" % b_index
, t
)
718 m
.d
.comb
+= t
.a
.eq(self
.a
)
719 m
.d
.comb
+= t
.b
.eq(self
.b
)
720 m
.d
.comb
+= t
.pb_en
.eq(self
.pb_en
)
722 m
.d
.comb
+= self
.terms
[b_index
].eq(t
.term
)
727 class LSBNegTerm(Elaboratable
):
729 def __init__(self
, bit_width
):
730 self
.bit_width
= bit_width
731 self
.part
= Signal(reset_less
=True)
732 self
.signed
= Signal(reset_less
=True)
733 self
.op
= Signal(bit_width
, reset_less
=True)
734 self
.msb
= Signal(reset_less
=True)
735 self
.nt
= Signal(bit_width
*2, reset_less
=True)
736 self
.nl
= Signal(bit_width
*2, reset_less
=True)
738 def elaborate(self
, platform
):
741 bit_wid
= self
.bit_width
742 ext
= Repl(0, bit_wid
) # extend output to HI part
744 # determine sign of each incoming number *in this partition*
745 enabled
= Signal(reset_less
=True)
746 m
.d
.comb
+= enabled
.eq(self
.part
& self
.msb
& self
.signed
)
748 # for 8-bit values: form a * 0xFF00 by using -a * 0x100, the
749 # negation operation is split into a bitwise not and a +1.
750 # likewise for 16, 32, and 64-bit values.
752 # width-extended 1s complement if a is signed, otherwise zero
753 comb
+= self
.nt
.eq(Mux(enabled
, Cat(ext
, ~self
.op
), 0))
755 # add 1 if signed, otherwise add zero
756 comb
+= self
.nl
.eq(Cat(ext
, enabled
, Repl(0, bit_wid
-1)))
761 class Parts(Elaboratable
):
763 def __init__(self
, pbwid
, epps
, n_parts
):
766 self
.epps
= PartitionPoints
.like(epps
, name
="epps") # expanded points
768 self
.parts
= [Signal(name
=f
"part_{i}") for i
in range(n_parts
)]
770 def elaborate(self
, platform
):
773 epps
, parts
= self
.epps
, self
.parts
774 # collect part-bytes (double factor because the input is extended)
775 pbs
= Signal(self
.pbwid
, reset_less
=True)
777 for i
in range(self
.pbwid
):
778 pb
= Signal(name
="pb%d" % i
, reset_less
=True)
779 m
.d
.comb
+= pb
.eq(epps
.part_byte(i
, mfactor
=2)) # double
781 m
.d
.comb
+= pbs
.eq(Cat(*tl
))
783 # negated-temporary copy of partition bits
784 npbs
= Signal
.like(pbs
, reset_less
=True)
785 m
.d
.comb
+= npbs
.eq(~pbs
)
786 byte_count
= 8 // len(parts
)
787 for i
in range(len(parts
)):
789 pbl
.append(npbs
[i
* byte_count
- 1])
790 for j
in range(i
* byte_count
, (i
+ 1) * byte_count
- 1):
792 pbl
.append(npbs
[(i
+ 1) * byte_count
- 1])
793 value
= Signal(len(pbl
), name
="value_%d" % i
, reset_less
=True)
794 m
.d
.comb
+= value
.eq(Cat(*pbl
))
795 m
.d
.comb
+= parts
[i
].eq(~
(value
).bool())
800 class Part(Elaboratable
):
801 """ a key class which, depending on the partitioning, will determine
802 what action to take when parts of the output are signed or unsigned.
804 this requires 2 pieces of data *per operand, per partition*:
805 whether the MSB is HI/LO (per partition!), and whether a signed
806 or unsigned operation has been *requested*.
808 once that is determined, signed is basically carried out
809 by splitting 2's complement into 1's complement plus one.
810 1's complement is just a bit-inversion.
812 the extra terms - as separate terms - are then thrown at the
813 AddReduce alongside the multiplication part-results.
815 def __init__(self
, epps
, width
, n_parts
, n_levels
, pbwid
):
823 self
.a_signed
= [Signal(name
=f
"a_signed_{i}") for i
in range(8)]
824 self
.b_signed
= [Signal(name
=f
"_b_signed_{i}") for i
in range(8)]
825 self
.pbs
= Signal(pbwid
, reset_less
=True)
828 self
.parts
= [Signal(name
=f
"part_{i}") for i
in range(n_parts
)]
830 self
.not_a_term
= Signal(width
)
831 self
.neg_lsb_a_term
= Signal(width
)
832 self
.not_b_term
= Signal(width
)
833 self
.neg_lsb_b_term
= Signal(width
)
835 def elaborate(self
, platform
):
838 pbs
, parts
= self
.pbs
, self
.parts
840 m
.submodules
.p
= p
= Parts(self
.pbwid
, epps
, len(parts
))
841 m
.d
.comb
+= p
.epps
.eq(epps
)
844 byte_count
= 8 // len(parts
)
846 not_a_term
, neg_lsb_a_term
, not_b_term
, neg_lsb_b_term
= (
847 self
.not_a_term
, self
.neg_lsb_a_term
,
848 self
.not_b_term
, self
.neg_lsb_b_term
)
850 byte_width
= 8 // len(parts
) # byte width
851 bit_wid
= 8 * byte_width
# bit width
852 nat
, nbt
, nla
, nlb
= [], [], [], []
853 for i
in range(len(parts
)):
854 # work out bit-inverted and +1 term for a.
855 pa
= LSBNegTerm(bit_wid
)
856 setattr(m
.submodules
, "lnt_%d_a_%d" % (bit_wid
, i
), pa
)
857 m
.d
.comb
+= pa
.part
.eq(parts
[i
])
858 m
.d
.comb
+= pa
.op
.eq(self
.a
.part(bit_wid
* i
, bit_wid
))
859 m
.d
.comb
+= pa
.signed
.eq(self
.b_signed
[i
* byte_width
]) # yes b
860 m
.d
.comb
+= pa
.msb
.eq(self
.b
[(i
+ 1) * bit_wid
- 1]) # really, b
864 # work out bit-inverted and +1 term for b
865 pb
= LSBNegTerm(bit_wid
)
866 setattr(m
.submodules
, "lnt_%d_b_%d" % (bit_wid
, i
), pb
)
867 m
.d
.comb
+= pb
.part
.eq(parts
[i
])
868 m
.d
.comb
+= pb
.op
.eq(self
.b
.part(bit_wid
* i
, bit_wid
))
869 m
.d
.comb
+= pb
.signed
.eq(self
.a_signed
[i
* byte_width
]) # yes a
870 m
.d
.comb
+= pb
.msb
.eq(self
.a
[(i
+ 1) * bit_wid
- 1]) # really, a
874 # concatenate together and return all 4 results.
875 m
.d
.comb
+= [not_a_term
.eq(Cat(*nat
)),
876 not_b_term
.eq(Cat(*nbt
)),
877 neg_lsb_a_term
.eq(Cat(*nla
)),
878 neg_lsb_b_term
.eq(Cat(*nlb
)),
884 class IntermediateOut(Elaboratable
):
885 """ selects the HI/LO part of the multiplication, for a given bit-width
886 the output is also reconstructed in its SIMD (partition) lanes.
888 def __init__(self
, width
, out_wid
, n_parts
):
890 self
.n_parts
= n_parts
891 self
.part_ops
= [Signal(2, name
="dpop%d" % i
, reset_less
=True)
893 self
.intermed
= Signal(out_wid
, reset_less
=True)
894 self
.output
= Signal(out_wid
//2, reset_less
=True)
896 def elaborate(self
, platform
):
902 for i
in range(self
.n_parts
):
903 op
= Signal(w
, reset_less
=True, name
="op%d_%d" % (w
, i
))
905 Mux(self
.part_ops
[sel
* i
] == OP_MUL_LOW
,
906 self
.intermed
.part(i
* w
*2, w
),
907 self
.intermed
.part(i
* w
*2 + w
, w
)))
909 m
.d
.comb
+= self
.output
.eq(Cat(*ol
))
914 class FinalOut(Elaboratable
):
915 """ selects the final output based on the partitioning.
917 each byte is selectable independently, i.e. it is possible
918 that some partitions requested 8-bit computation whilst others
919 requested 16 or 32 bit.
921 def __init__(self
, out_wid
):
923 self
.d8
= [Signal(name
=f
"d8_{i}", reset_less
=True) for i
in range(8)]
924 self
.d16
= [Signal(name
=f
"d16_{i}", reset_less
=True) for i
in range(4)]
925 self
.d32
= [Signal(name
=f
"d32_{i}", reset_less
=True) for i
in range(2)]
927 self
.i8
= Signal(out_wid
, reset_less
=True)
928 self
.i16
= Signal(out_wid
, reset_less
=True)
929 self
.i32
= Signal(out_wid
, reset_less
=True)
930 self
.i64
= Signal(out_wid
, reset_less
=True)
933 self
.out
= Signal(out_wid
, reset_less
=True)
935 def elaborate(self
, platform
):
939 # select one of the outputs: d8 selects i8, d16 selects i16
940 # d32 selects i32, and the default is i64.
941 # d8 and d16 are ORed together in the first Mux
942 # then the 2nd selects either i8 or i16.
943 # if neither d8 nor d16 are set, d32 selects either i32 or i64.
944 op
= Signal(8, reset_less
=True, name
="op_%d" % i
)
946 Mux(self
.d8
[i
] | self
.d16
[i
// 2],
947 Mux(self
.d8
[i
], self
.i8
.part(i
* 8, 8),
948 self
.i16
.part(i
* 8, 8)),
949 Mux(self
.d32
[i
// 4], self
.i32
.part(i
* 8, 8),
950 self
.i64
.part(i
* 8, 8))))
952 m
.d
.comb
+= self
.out
.eq(Cat(*ol
))
956 class OrMod(Elaboratable
):
957 """ ORs four values together in a hierarchical tree
959 def __init__(self
, wid
):
961 self
.orin
= [Signal(wid
, name
="orin%d" % i
, reset_less
=True)
963 self
.orout
= Signal(wid
, reset_less
=True)
965 def elaborate(self
, platform
):
967 or1
= Signal(self
.wid
, reset_less
=True)
968 or2
= Signal(self
.wid
, reset_less
=True)
969 m
.d
.comb
+= or1
.eq(self
.orin
[0] | self
.orin
[1])
970 m
.d
.comb
+= or2
.eq(self
.orin
[2] | self
.orin
[3])
971 m
.d
.comb
+= self
.orout
.eq(or1 | or2
)
976 class Signs(Elaboratable
):
977 """ determines whether a or b are signed numbers
978 based on the required operation type (OP_MUL_*)
982 self
.part_ops
= Signal(2, reset_less
=True)
983 self
.a_signed
= Signal(reset_less
=True)
984 self
.b_signed
= Signal(reset_less
=True)
986 def elaborate(self
, platform
):
990 asig
= self
.part_ops
!= OP_MUL_UNSIGNED_HIGH
991 bsig
= (self
.part_ops
== OP_MUL_LOW
) \
992 |
(self
.part_ops
== OP_MUL_SIGNED_HIGH
)
993 m
.d
.comb
+= self
.a_signed
.eq(asig
)
994 m
.d
.comb
+= self
.b_signed
.eq(bsig
)
999 class Mul8_16_32_64(Elaboratable
):
1000 """Signed/Unsigned 8/16/32/64-bit partitioned integer multiplier.
1002 Supports partitioning into any combination of 8, 16, 32, and 64-bit
1003 partitions on naturally-aligned boundaries. Supports the operation being
1004 set for each partition independently.
1006 :attribute part_pts: the input partition points. Has a partition point at
1007 multiples of 8 in 0 < i < 64. Each partition point's associated
1008 ``Value`` is a ``Signal``. Modification not supported, except for by
1010 :attribute part_ops: the operation for each byte. The operation for a
1011 particular partition is selected by assigning the selected operation
1012 code to each byte in the partition. The allowed operation codes are:
1014 :attribute OP_MUL_LOW: the LSB half of the product. Equivalent to
1015 RISC-V's `mul` instruction.
1016 :attribute OP_MUL_SIGNED_HIGH: the MSB half of the product where both
1017 ``a`` and ``b`` are signed. Equivalent to RISC-V's `mulh`
1019 :attribute OP_MUL_SIGNED_UNSIGNED_HIGH: the MSB half of the product
1020 where ``a`` is signed and ``b`` is unsigned. Equivalent to RISC-V's
1021 `mulhsu` instruction.
1022 :attribute OP_MUL_UNSIGNED_HIGH: the MSB half of the product where both
1023 ``a`` and ``b`` are unsigned. Equivalent to RISC-V's `mulhu`
1027 def __init__(self
, register_levels
=()):
1028 """ register_levels: specifies the points in the cascade at which
1029 flip-flops are to be inserted.
1033 self
.register_levels
= list(register_levels
)
1036 self
.part_pts
= PartitionPoints()
1037 for i
in range(8, 64, 8):
1038 self
.part_pts
[i
] = Signal(name
=f
"part_pts_{i}")
1039 self
.part_ops
= [Signal(2, name
=f
"part_ops_{i}") for i
in range(8)]
1043 # intermediates (needed for unit tests)
1044 self
._intermediate
_output
= Signal(128)
1047 self
.output
= Signal(64)
1049 def elaborate(self
, platform
):
1052 # collect part-bytes
1053 pbs
= Signal(8, reset_less
=True)
1056 pb
= Signal(name
="pb%d" % i
, reset_less
=True)
1057 m
.d
.comb
+= pb
.eq(self
.part_pts
.part_byte(i
))
1059 m
.d
.comb
+= pbs
.eq(Cat(*tl
))
1061 # create (doubled) PartitionPoints (output is double input width)
1062 expanded_part_pts
= eps
= PartitionPoints()
1063 for i
, v
in self
.part_pts
.items():
1064 ep
= Signal(name
=f
"expanded_part_pts_{i*2}", reset_less
=True)
1065 expanded_part_pts
[i
* 2] = ep
1066 m
.d
.comb
+= ep
.eq(v
)
1073 setattr(m
.submodules
, "signs%d" % i
, s
)
1074 m
.d
.comb
+= s
.part_ops
.eq(self
.part_ops
[i
])
1076 n_levels
= len(self
.register_levels
)+1
1077 m
.submodules
.part_8
= part_8
= Part(eps
, 128, 8, n_levels
, 8)
1078 m
.submodules
.part_16
= part_16
= Part(eps
, 128, 4, n_levels
, 8)
1079 m
.submodules
.part_32
= part_32
= Part(eps
, 128, 2, n_levels
, 8)
1080 m
.submodules
.part_64
= part_64
= Part(eps
, 128, 1, n_levels
, 8)
1081 nat_l
, nbt_l
, nla_l
, nlb_l
= [], [], [], []
1082 for mod
in [part_8
, part_16
, part_32
, part_64
]:
1083 m
.d
.comb
+= mod
.a
.eq(self
.a
)
1084 m
.d
.comb
+= mod
.b
.eq(self
.b
)
1085 for i
in range(len(signs
)):
1086 m
.d
.comb
+= mod
.a_signed
[i
].eq(signs
[i
].a_signed
)
1087 m
.d
.comb
+= mod
.b_signed
[i
].eq(signs
[i
].b_signed
)
1088 m
.d
.comb
+= mod
.pbs
.eq(pbs
)
1089 nat_l
.append(mod
.not_a_term
)
1090 nbt_l
.append(mod
.not_b_term
)
1091 nla_l
.append(mod
.neg_lsb_a_term
)
1092 nlb_l
.append(mod
.neg_lsb_b_term
)
1096 for a_index
in range(8):
1097 t
= ProductTerms(8, 128, 8, a_index
, 8)
1098 setattr(m
.submodules
, "terms_%d" % a_index
, t
)
1100 m
.d
.comb
+= t
.a
.eq(self
.a
)
1101 m
.d
.comb
+= t
.b
.eq(self
.b
)
1102 m
.d
.comb
+= t
.pb_en
.eq(pbs
)
1104 for term
in t
.terms
:
1107 # it's fine to bitwise-or data together since they are never enabled
1109 m
.submodules
.nat_or
= nat_or
= OrMod(128)
1110 m
.submodules
.nbt_or
= nbt_or
= OrMod(128)
1111 m
.submodules
.nla_or
= nla_or
= OrMod(128)
1112 m
.submodules
.nlb_or
= nlb_or
= OrMod(128)
1113 for l
, mod
in [(nat_l
, nat_or
),
1117 for i
in range(len(l
)):
1118 m
.d
.comb
+= mod
.orin
[i
].eq(l
[i
])
1119 terms
.append(mod
.orout
)
1121 add_reduce
= AddReduce(terms
,
1123 self
.register_levels
,
1127 out_part_ops
= add_reduce
.levels
[-1].i
.part_ops
1128 out_part_pts
= add_reduce
.levels
[-1].i
.reg_partition_points
1130 m
.submodules
.add_reduce
= add_reduce
1131 m
.d
.comb
+= self
._intermediate
_output
.eq(add_reduce
.output
)
1133 m
.submodules
.io64
= io64
= IntermediateOut(64, 128, 1)
1134 m
.d
.comb
+= io64
.intermed
.eq(self
._intermediate
_output
)
1136 m
.d
.comb
+= io64
.part_ops
[i
].eq(out_part_ops
[i
])
1139 m
.submodules
.io32
= io32
= IntermediateOut(32, 128, 2)
1140 m
.d
.comb
+= io32
.intermed
.eq(self
._intermediate
_output
)
1142 m
.d
.comb
+= io32
.part_ops
[i
].eq(out_part_ops
[i
])
1145 m
.submodules
.io16
= io16
= IntermediateOut(16, 128, 4)
1146 m
.d
.comb
+= io16
.intermed
.eq(self
._intermediate
_output
)
1148 m
.d
.comb
+= io16
.part_ops
[i
].eq(out_part_ops
[i
])
1151 m
.submodules
.io8
= io8
= IntermediateOut(8, 128, 8)
1152 m
.d
.comb
+= io8
.intermed
.eq(self
._intermediate
_output
)
1154 m
.d
.comb
+= io8
.part_ops
[i
].eq(out_part_ops
[i
])
1156 m
.submodules
.p_8
= p_8
= Parts(8, eps
, len(part_8
.parts
))
1157 m
.submodules
.p_16
= p_16
= Parts(8, eps
, len(part_16
.parts
))
1158 m
.submodules
.p_32
= p_32
= Parts(8, eps
, len(part_32
.parts
))
1159 m
.submodules
.p_64
= p_64
= Parts(8, eps
, len(part_64
.parts
))
1161 m
.d
.comb
+= p_8
.epps
.eq(out_part_pts
)
1162 m
.d
.comb
+= p_16
.epps
.eq(out_part_pts
)
1163 m
.d
.comb
+= p_32
.epps
.eq(out_part_pts
)
1164 m
.d
.comb
+= p_64
.epps
.eq(out_part_pts
)
1167 m
.submodules
.finalout
= finalout
= FinalOut(64)
1168 for i
in range(len(part_8
.parts
)):
1169 m
.d
.comb
+= finalout
.d8
[i
].eq(p_8
.parts
[i
])
1170 for i
in range(len(part_16
.parts
)):
1171 m
.d
.comb
+= finalout
.d16
[i
].eq(p_16
.parts
[i
])
1172 for i
in range(len(part_32
.parts
)):
1173 m
.d
.comb
+= finalout
.d32
[i
].eq(p_32
.parts
[i
])
1174 m
.d
.comb
+= finalout
.i8
.eq(io8
.output
)
1175 m
.d
.comb
+= finalout
.i16
.eq(io16
.output
)
1176 m
.d
.comb
+= finalout
.i32
.eq(io32
.output
)
1177 m
.d
.comb
+= finalout
.i64
.eq(io64
.output
)
1178 m
.d
.comb
+= self
.output
.eq(finalout
.out
)
1183 if __name__
== "__main__":
1187 m
._intermediate
_output
,
1190 *m
.part_pts
.values()])