add module docstrings to (new) multiply classes
[ieee754fpu.git] / src / ieee754 / part_mul_add / multiply.py
1 # SPDX-License-Identifier: LGPL-2.1-or-later
2 # See Notices.txt for copyright information
3 """Integer Multiplication."""
4
5 from nmigen import Signal, Module, Value, Elaboratable, Cat, C, Mux, Repl
6 from nmigen.hdl.ast import Assign
7 from abc import ABCMeta, abstractmethod
8 from nmigen.cli import main
9 from functools import reduce
10 from operator import or_
11
12 class PartitionPoints(dict):
13 """Partition points and corresponding ``Value``s.
14
15 The points at where an ALU is partitioned along with ``Value``s that
16 specify if the corresponding partition points are enabled.
17
18 For example: ``{1: True, 5: True, 10: True}`` with
19 ``width == 16`` specifies that the ALU is split into 4 sections:
20 * bits 0 <= ``i`` < 1
21 * bits 1 <= ``i`` < 5
22 * bits 5 <= ``i`` < 10
23 * bits 10 <= ``i`` < 16
24
25 If the partition_points were instead ``{1: True, 5: a, 10: True}``
26 where ``a`` is a 1-bit ``Signal``:
27 * If ``a`` is asserted:
28 * bits 0 <= ``i`` < 1
29 * bits 1 <= ``i`` < 5
30 * bits 5 <= ``i`` < 10
31 * bits 10 <= ``i`` < 16
32 * Otherwise
33 * bits 0 <= ``i`` < 1
34 * bits 1 <= ``i`` < 10
35 * bits 10 <= ``i`` < 16
36 """
37
38 def __init__(self, partition_points=None):
39 """Create a new ``PartitionPoints``.
40
41 :param partition_points: the input partition points to values mapping.
42 """
43 super().__init__()
44 if partition_points is not None:
45 for point, enabled in partition_points.items():
46 if not isinstance(point, int):
47 raise TypeError("point must be a non-negative integer")
48 if point < 0:
49 raise ValueError("point must be a non-negative integer")
50 self[point] = Value.wrap(enabled)
51
52 def like(self, name=None, src_loc_at=0):
53 """Create a new ``PartitionPoints`` with ``Signal``s for all values.
54
55 :param name: the base name for the new ``Signal``s.
56 """
57 if name is None:
58 name = Signal(src_loc_at=1+src_loc_at).name # get variable name
59 retval = PartitionPoints()
60 for point, enabled in self.items():
61 retval[point] = Signal(enabled.shape(), name=f"{name}_{point}")
62 return retval
63
64 def eq(self, rhs):
65 """Assign ``PartitionPoints`` using ``Signal.eq``."""
66 if set(self.keys()) != set(rhs.keys()):
67 raise ValueError("incompatible point set")
68 for point, enabled in self.items():
69 yield enabled.eq(rhs[point])
70
71 def as_mask(self, width):
72 """Create a bit-mask from `self`.
73
74 Each bit in the returned mask is clear only if the partition point at
75 the same bit-index is enabled.
76
77 :param width: the bit width of the resulting mask
78 """
79 bits = []
80 for i in range(width):
81 if i in self:
82 bits.append(~self[i])
83 else:
84 bits.append(True)
85 return Cat(*bits)
86
87 def get_max_partition_count(self, width):
88 """Get the maximum number of partitions.
89
90 Gets the number of partitions when all partition points are enabled.
91 """
92 retval = 1
93 for point in self.keys():
94 if point < width:
95 retval += 1
96 return retval
97
98 def fits_in_width(self, width):
99 """Check if all partition points are smaller than `width`."""
100 for point in self.keys():
101 if point >= width:
102 return False
103 return True
104
105
106 class FullAdder(Elaboratable):
107 """Full Adder.
108
109 :attribute in0: the first input
110 :attribute in1: the second input
111 :attribute in2: the third input
112 :attribute sum: the sum output
113 :attribute carry: the carry output
114 """
115
116 def __init__(self, width):
117 """Create a ``FullAdder``.
118
119 :param width: the bit width of the input and output
120 """
121 self.in0 = Signal(width)
122 self.in1 = Signal(width)
123 self.in2 = Signal(width)
124 self.sum = Signal(width)
125 self.carry = Signal(width)
126
127 def elaborate(self, platform):
128 """Elaborate this module."""
129 m = Module()
130 m.d.comb += self.sum.eq(self.in0 ^ self.in1 ^ self.in2)
131 m.d.comb += self.carry.eq((self.in0 & self.in1)
132 | (self.in1 & self.in2)
133 | (self.in2 & self.in0))
134 return m
135
136
137 class PartitionedAdder(Elaboratable):
138 """Partitioned Adder.
139
140 :attribute width: the bit width of the input and output. Read-only.
141 :attribute a: the first input to the adder
142 :attribute b: the second input to the adder
143 :attribute output: the sum output
144 :attribute partition_points: the input partition points. Modification not
145 supported, except for by ``Signal.eq``.
146 """
147
148 def __init__(self, width, partition_points):
149 """Create a ``PartitionedAdder``.
150
151 :param width: the bit width of the input and output
152 :param partition_points: the input partition points
153 """
154 self.width = width
155 self.a = Signal(width)
156 self.b = Signal(width)
157 self.output = Signal(width)
158 self.partition_points = PartitionPoints(partition_points)
159 if not self.partition_points.fits_in_width(width):
160 raise ValueError("partition_points doesn't fit in width")
161 expanded_width = 0
162 for i in range(self.width):
163 if i in self.partition_points:
164 expanded_width += 1
165 expanded_width += 1
166 self._expanded_width = expanded_width
167 # XXX these have to remain here due to some horrible nmigen
168 # simulation bugs involving sync. it is *not* necessary to
169 # have them here, they should (under normal circumstances)
170 # be moved into elaborate, as they are entirely local
171 self._expanded_a = Signal(expanded_width)
172 self._expanded_b = Signal(expanded_width)
173 self._expanded_output = Signal(expanded_width)
174
175 def elaborate(self, platform):
176 """Elaborate this module."""
177 m = Module()
178 expanded_index = 0
179 # store bits in a list, use Cat later. graphviz is much cleaner
180 al = []
181 bl = []
182 ol = []
183 ea = []
184 eb = []
185 eo = []
186 # partition points are "breaks" (extra zeros) in what would otherwise
187 # be a massive long add.
188 for i in range(self.width):
189 if i in self.partition_points:
190 # add extra bit set to 0 + 0 for enabled partition points
191 # and 1 + 0 for disabled partition points
192 ea.append(self._expanded_a[expanded_index])
193 al.append(~self.partition_points[i])
194 eb.append(self._expanded_b[expanded_index])
195 bl.append(C(0))
196 expanded_index += 1
197 ea.append(self._expanded_a[expanded_index])
198 al.append(self.a[i])
199 eb.append(self._expanded_b[expanded_index])
200 bl.append(self.b[i])
201 eo.append(self._expanded_output[expanded_index])
202 ol.append(self.output[i])
203 expanded_index += 1
204 # combine above using Cat
205 m.d.comb += Cat(*ea).eq(Cat(*al))
206 m.d.comb += Cat(*eb).eq(Cat(*bl))
207 m.d.comb += Cat(*ol).eq(Cat(*eo))
208 # use only one addition to take advantage of look-ahead carry and
209 # special hardware on FPGAs
210 m.d.comb += self._expanded_output.eq(
211 self._expanded_a + self._expanded_b)
212 return m
213
214
215 FULL_ADDER_INPUT_COUNT = 3
216
217
218 class AddReduce(Elaboratable):
219 """Add list of numbers together.
220
221 :attribute inputs: input ``Signal``s to be summed. Modification not
222 supported, except for by ``Signal.eq``.
223 :attribute register_levels: List of nesting levels that should have
224 pipeline registers.
225 :attribute output: output sum.
226 :attribute partition_points: the input partition points. Modification not
227 supported, except for by ``Signal.eq``.
228 """
229
230 def __init__(self, inputs, output_width, register_levels, partition_points):
231 """Create an ``AddReduce``.
232
233 :param inputs: input ``Signal``s to be summed.
234 :param output_width: bit-width of ``output``.
235 :param register_levels: List of nesting levels that should have
236 pipeline registers.
237 :param partition_points: the input partition points.
238 """
239 self.inputs = list(inputs)
240 self._resized_inputs = [
241 Signal(output_width, name=f"resized_inputs[{i}]")
242 for i in range(len(self.inputs))]
243 self.register_levels = list(register_levels)
244 self.output = Signal(output_width)
245 self.partition_points = PartitionPoints(partition_points)
246 if not self.partition_points.fits_in_width(output_width):
247 raise ValueError("partition_points doesn't fit in output_width")
248 self._reg_partition_points = self.partition_points.like()
249 max_level = AddReduce.get_max_level(len(self.inputs))
250 for level in self.register_levels:
251 if level > max_level:
252 raise ValueError(
253 "not enough adder levels for specified register levels")
254
255 @staticmethod
256 def get_max_level(input_count):
257 """Get the maximum level.
258
259 All ``register_levels`` must be less than or equal to the maximum
260 level.
261 """
262 retval = 0
263 while True:
264 groups = AddReduce.full_adder_groups(input_count)
265 if len(groups) == 0:
266 return retval
267 input_count %= FULL_ADDER_INPUT_COUNT
268 input_count += 2 * len(groups)
269 retval += 1
270
271 def next_register_levels(self):
272 """``Iterable`` of ``register_levels`` for next recursive level."""
273 for level in self.register_levels:
274 if level > 0:
275 yield level - 1
276
277 @staticmethod
278 def full_adder_groups(input_count):
279 """Get ``inputs`` indices for which a full adder should be built."""
280 return range(0,
281 input_count - FULL_ADDER_INPUT_COUNT + 1,
282 FULL_ADDER_INPUT_COUNT)
283
284 def elaborate(self, platform):
285 """Elaborate this module."""
286 m = Module()
287
288 # resize inputs to correct bit-width and optionally add in
289 # pipeline registers
290 resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i])
291 for i in range(len(self.inputs))]
292 if 0 in self.register_levels:
293 m.d.sync += resized_input_assignments
294 m.d.sync += self._reg_partition_points.eq(self.partition_points)
295 else:
296 m.d.comb += resized_input_assignments
297 m.d.comb += self._reg_partition_points.eq(self.partition_points)
298
299 groups = AddReduce.full_adder_groups(len(self.inputs))
300 # if there are no full adders to create, then we handle the base cases
301 # and return, otherwise we go on to the recursive case
302 if len(groups) == 0:
303 if len(self.inputs) == 0:
304 # use 0 as the default output value
305 m.d.comb += self.output.eq(0)
306 elif len(self.inputs) == 1:
307 # handle single input
308 m.d.comb += self.output.eq(self._resized_inputs[0])
309 else:
310 # base case for adding 2 or more inputs, which get recursively
311 # reduced to 2 inputs
312 assert len(self.inputs) == 2
313 adder = PartitionedAdder(len(self.output),
314 self._reg_partition_points)
315 m.submodules.final_adder = adder
316 m.d.comb += adder.a.eq(self._resized_inputs[0])
317 m.d.comb += adder.b.eq(self._resized_inputs[1])
318 m.d.comb += self.output.eq(adder.output)
319 return m
320 # go on to handle recursive case
321 intermediate_terms = []
322
323 def add_intermediate_term(value):
324 intermediate_term = Signal(
325 len(self.output),
326 name=f"intermediate_terms[{len(intermediate_terms)}]")
327 intermediate_terms.append(intermediate_term)
328 m.d.comb += intermediate_term.eq(value)
329
330 # store mask in intermediary (simplifies graph)
331 part_mask = Signal(len(self.output), reset_less=True)
332 mask = self._reg_partition_points.as_mask(len(self.output))
333 m.d.comb += part_mask.eq(mask)
334
335 # create full adders for this recursive level.
336 # this shrinks N terms to 2 * (N // 3) plus the remainder
337 for i in groups:
338 adder_i = FullAdder(len(self.output))
339 setattr(m.submodules, f"adder_{i}", adder_i)
340 m.d.comb += adder_i.in0.eq(self._resized_inputs[i])
341 m.d.comb += adder_i.in1.eq(self._resized_inputs[i + 1])
342 m.d.comb += adder_i.in2.eq(self._resized_inputs[i + 2])
343 add_intermediate_term(adder_i.sum)
344 shifted_carry = adder_i.carry << 1
345 # mask out carry bits to prevent carries between partitions
346 add_intermediate_term((adder_i.carry << 1) & part_mask)
347 # handle the remaining inputs.
348 if len(self.inputs) % FULL_ADDER_INPUT_COUNT == 1:
349 add_intermediate_term(self._resized_inputs[-1])
350 elif len(self.inputs) % FULL_ADDER_INPUT_COUNT == 2:
351 # Just pass the terms to the next layer, since we wouldn't gain
352 # anything by using a half adder since there would still be 2 terms
353 # and just passing the terms to the next layer saves gates.
354 add_intermediate_term(self._resized_inputs[-2])
355 add_intermediate_term(self._resized_inputs[-1])
356 else:
357 assert len(self.inputs) % FULL_ADDER_INPUT_COUNT == 0
358 # recursive invocation of ``AddReduce``
359 next_level = AddReduce(intermediate_terms,
360 len(self.output),
361 self.next_register_levels(),
362 self._reg_partition_points)
363 m.submodules.next_level = next_level
364 m.d.comb += self.output.eq(next_level.output)
365 return m
366
367
368 OP_MUL_LOW = 0
369 OP_MUL_SIGNED_HIGH = 1
370 OP_MUL_SIGNED_UNSIGNED_HIGH = 2 # a is signed, b is unsigned
371 OP_MUL_UNSIGNED_HIGH = 3
372
373
374 def get_term(value, shift=0, enabled=None):
375 if enabled is not None:
376 value = Mux(enabled, value, 0)
377 if shift > 0:
378 value = Cat(Repl(C(0, 1), shift), value)
379 else:
380 assert shift == 0
381 return value
382
383
384 class ProductTerm(Elaboratable):
385 """ this class creates a single product term (a[..]*b[..]).
386 it has a design flaw in that is the *output* that is selected,
387 where the multiplication(s) are combinatorially generated
388 all the time.
389 """
390
391 def __init__(self, width, twidth, pbwid, a_index, b_index):
392 self.a_index = a_index
393 self.b_index = b_index
394 shift = 8 * (self.a_index + self.b_index)
395 self.pwidth = width
396 self.twidth = twidth
397 self.width = width*2
398 self.shift = shift
399
400 self.ti = Signal(self.width, reset_less=True)
401 self.term = Signal(twidth, reset_less=True)
402 self.a = Signal(twidth//2, reset_less=True)
403 self.b = Signal(twidth//2, reset_less=True)
404 self.pb_en = Signal(pbwid, reset_less=True)
405
406 self.tl = tl = []
407 min_index = min(self.a_index, self.b_index)
408 max_index = max(self.a_index, self.b_index)
409 for i in range(min_index, max_index):
410 tl.append(self.pb_en[i])
411 name = "te_%d_%d" % (self.a_index, self.b_index)
412 if len(tl) > 0:
413 term_enabled = Signal(name=name, reset_less=True)
414 else:
415 term_enabled = None
416 self.enabled = term_enabled
417 self.term.name = "term_%d_%d" % (a_index, b_index) # rename
418
419 def elaborate(self, platform):
420
421 m = Module()
422 if self.enabled is not None:
423 m.d.comb += self.enabled.eq(~(Cat(*self.tl).bool()))
424
425 bsa = Signal(self.width, reset_less=True)
426 bsb = Signal(self.width, reset_less=True)
427 a_index, b_index = self.a_index, self.b_index
428 pwidth = self.pwidth
429 m.d.comb += bsa.eq(self.a.bit_select(a_index * pwidth, pwidth))
430 m.d.comb += bsb.eq(self.b.bit_select(b_index * pwidth, pwidth))
431 m.d.comb += self.ti.eq(bsa * bsb)
432 m.d.comb += self.term.eq(get_term(self.ti, self.shift, self.enabled))
433 """
434 #TODO: sort out width issues, get inputs a/b switched on/off.
435 #data going into Muxes is 1/2 the required width
436
437 pwidth = self.pwidth
438 width = self.width
439 bsa = Signal(self.twidth//2, reset_less=True)
440 bsb = Signal(self.twidth//2, reset_less=True)
441 asel = Signal(width, reset_less=True)
442 bsel = Signal(width, reset_less=True)
443 a_index, b_index = self.a_index, self.b_index
444 m.d.comb += asel.eq(self.a.bit_select(a_index * pwidth, pwidth))
445 m.d.comb += bsel.eq(self.b.bit_select(b_index * pwidth, pwidth))
446 m.d.comb += bsa.eq(get_term(asel, self.shift, self.enabled))
447 m.d.comb += bsb.eq(get_term(bsel, self.shift, self.enabled))
448 m.d.comb += self.ti.eq(bsa * bsb)
449 m.d.comb += self.term.eq(self.ti)
450 """
451
452 return m
453
454
455 class ProductTerms(Elaboratable):
456 """ creates a bank of product terms. also performs the actual bit-selection
457 this class is to be wrapped with a for-loop on the "a" operand.
458 it creates a second-level for-loop on the "b" operand.
459 """
460 def __init__(self, width, twidth, pbwid, a_index, blen):
461 self.a_index = a_index
462 self.blen = blen
463 self.pwidth = width
464 self.twidth = twidth
465 self.pbwid = pbwid
466 self.a = Signal(twidth//2, reset_less=True)
467 self.b = Signal(twidth//2, reset_less=True)
468 self.pb_en = Signal(pbwid, reset_less=True)
469 self.terms = [Signal(twidth, name="term%d"%i, reset_less=True) \
470 for i in range(blen)]
471
472 def elaborate(self, platform):
473
474 m = Module()
475
476 for b_index in range(self.blen):
477 t = ProductTerm(self.pwidth, self.twidth, self.pbwid,
478 self.a_index, b_index)
479 setattr(m.submodules, "term_%d" % b_index, t)
480
481 m.d.comb += t.a.eq(self.a)
482 m.d.comb += t.b.eq(self.b)
483 m.d.comb += t.pb_en.eq(self.pb_en)
484
485 m.d.comb += self.terms[b_index].eq(t.term)
486
487 return m
488
489
490 class Part(Elaboratable):
491 def __init__(self, width, n_parts, n_levels, pbwid):
492
493 # inputs
494 self.a = Signal(64)
495 self.b = Signal(64)
496 self.a_signed = [Signal(name=f"a_signed_{i}") for i in range(8)]
497 self.b_signed = [Signal(name=f"_b_signed_{i}") for i in range(8)]
498 self.pbs = Signal(pbwid, reset_less=True)
499
500 # outputs
501 self.parts = [Signal(name=f"part_{i}") for i in range(n_parts)]
502 self.delayed_parts = [
503 [Signal(name=f"delayed_part_{delay}_{i}")
504 for i in range(n_parts)]
505 for delay in range(n_levels)]
506 # XXX REALLY WEIRD BUG - have to take a copy of the last delayed_parts
507 self.dplast = [Signal(name=f"dplast_{i}")
508 for i in range(n_parts)]
509
510 self.not_a_term = Signal(width)
511 self.neg_lsb_a_term = Signal(width)
512 self.not_b_term = Signal(width)
513 self.neg_lsb_b_term = Signal(width)
514
515 def elaborate(self, platform):
516 m = Module()
517
518 pbs, parts, delayed_parts = self.pbs, self.parts, self.delayed_parts
519 byte_count = 8 // len(parts)
520 for i in range(len(parts)):
521 pbl = []
522 pbl.append(~pbs[i * byte_count - 1])
523 for j in range(i * byte_count, (i + 1) * byte_count - 1):
524 pbl.append(pbs[j])
525 pbl.append(~pbs[(i + 1) * byte_count - 1])
526 value = Signal(len(pbl), reset_less=True)
527 m.d.comb += value.eq(Cat(*pbl))
528 m.d.comb += parts[i].eq(~(value).bool())
529 m.d.comb += delayed_parts[0][i].eq(parts[i])
530 m.d.sync += [delayed_parts[j + 1][i].eq(delayed_parts[j][i])
531 for j in range(len(delayed_parts)-1)]
532 m.d.comb += self.dplast[i].eq(delayed_parts[-1][i])
533
534 not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = \
535 self.not_a_term, self.neg_lsb_a_term, \
536 self.not_b_term, self.neg_lsb_b_term
537
538 byte_width = 8 // len(parts)
539 bit_width = 8 * byte_width
540 nat, nbt, nla, nlb = [], [], [], []
541 for i in range(len(parts)):
542 be = parts[i] & self.a[(i + 1) * bit_width - 1] \
543 & self.a_signed[i * byte_width]
544 ae = parts[i] & self.b[(i + 1) * bit_width - 1] \
545 & self.b_signed[i * byte_width]
546 a_enabled = Signal(name="a_en_%d" % i, reset_less=True)
547 b_enabled = Signal(name="b_en_%d" % i, reset_less=True)
548 m.d.comb += a_enabled.eq(ae)
549 m.d.comb += b_enabled.eq(be)
550
551 # for 8-bit values: form a * 0xFF00 by using -a * 0x100, the
552 # negation operation is split into a bitwise not and a +1.
553 # likewise for 16, 32, and 64-bit values.
554 nat.append(Mux(a_enabled,
555 Cat(Repl(0, bit_width),
556 ~self.a.bit_select(bit_width * i, bit_width)),
557 0))
558
559 nla.append(Cat(Repl(0, bit_width), a_enabled,
560 Repl(0, bit_width-1)))
561
562 nbt.append(Mux(b_enabled,
563 Cat(Repl(0, bit_width),
564 ~self.b.bit_select(bit_width * i, bit_width)),
565 0))
566
567 nlb.append(Cat(Repl(0, bit_width), b_enabled,
568 Repl(0, bit_width-1)))
569
570 m.d.comb += [not_a_term.eq(Cat(*nat)),
571 not_b_term.eq(Cat(*nbt)),
572 neg_lsb_a_term.eq(Cat(*nla)),
573 neg_lsb_b_term.eq(Cat(*nlb)),
574 ]
575
576 return m
577
578
579 class IntermediateOut(Elaboratable):
580 """ selects the HI/LO part of the multiplication, for a given bit-width
581 the output is also reconstructed in its SIMD (partition) lanes.
582 """
583 def __init__(self, width, out_wid, n_parts):
584 self.width = width
585 self.n_parts = n_parts
586 self.delayed_part_ops = [Signal(2, name="dpop%d" % i, reset_less=True)
587 for i in range(8)]
588 self.intermed = Signal(out_wid, reset_less=True)
589 self.output = Signal(out_wid//2, reset_less=True)
590
591 def elaborate(self, platform):
592 m = Module()
593
594 ol = []
595 w = self.width
596 sel = w // 8
597 for i in range(self.n_parts):
598 op = Signal(w, reset_less=True, name="op%d_%d" % (w, i))
599 m.d.comb += op.eq(
600 Mux(self.delayed_part_ops[sel * i] == OP_MUL_LOW,
601 self.intermed.bit_select(i * w*2, w),
602 self.intermed.bit_select(i * w*2 + w, w)))
603 ol.append(op)
604 m.d.comb += self.output.eq(Cat(*ol))
605
606 return m
607
608
609 class FinalOut(Elaboratable):
610 """ selects the final output based on the partitioning.
611
612 each byte is selectable independently, i.e. it is possible
613 that some partitions requested 8-bit computation whilst others
614 requested 16 or 32 bit.
615 """
616 def __init__(self, out_wid):
617 # inputs
618 self.d8 = [Signal(name=f"d8_{i}", reset_less=True) for i in range(8)]
619 self.d16 = [Signal(name=f"d16_{i}", reset_less=True) for i in range(4)]
620 self.d32 = [Signal(name=f"d32_{i}", reset_less=True) for i in range(2)]
621
622 self.i8 = Signal(out_wid, reset_less=True)
623 self.i16 = Signal(out_wid, reset_less=True)
624 self.i32 = Signal(out_wid, reset_less=True)
625 self.i64 = Signal(out_wid, reset_less=True)
626
627 # output
628 self.out = Signal(out_wid, reset_less=True)
629
630 def elaborate(self, platform):
631 m = Module()
632 ol = []
633 for i in range(8):
634 # select one of the outputs: d8 selects i8, d16 selects i16
635 # d32 selects i32, and the default is i64.
636 # d8 and d16 are ORed together in the first Mux
637 # then the 2nd selects either i8 or i16.
638 # if neither d8 nor d16 are set, d32 selects either i32 or i64.
639 op = Signal(8, reset_less=True, name="op_%d" % i)
640 m.d.comb += op.eq(
641 Mux(self.d8[i] | self.d16[i // 2],
642 Mux(self.d8[i], self.i8.bit_select(i * 8, 8),
643 self.i16.bit_select(i * 8, 8)),
644 Mux(self.d32[i // 4], self.i32.bit_select(i * 8, 8),
645 self.i64.bit_select(i * 8, 8))))
646 ol.append(op)
647 m.d.comb += self.out.eq(Cat(*ol))
648 return m
649
650
651 class OrMod(Elaboratable):
652 """ ORs four values together in a hierarchical tree
653 """
654 def __init__(self, wid):
655 self.wid = wid
656 self.orin = [Signal(wid, name="orin%d" % i, reset_less=True)
657 for i in range(4)]
658 self.orout = Signal(wid, reset_less=True)
659
660 def elaborate(self, platform):
661 m = Module()
662 or1 = Signal(self.wid, reset_less=True)
663 or2 = Signal(self.wid, reset_less=True)
664 m.d.comb += or1.eq(self.orin[0] | self.orin[1])
665 m.d.comb += or2.eq(self.orin[2] | self.orin[3])
666 m.d.comb += self.orout.eq(or1 | or2)
667
668 return m
669
670
671 class Signs(Elaboratable):
672 """ determines whether a or b are signed numbers
673 based on the required operation type (OP_MUL_*)
674 """
675
676 def __init__(self):
677 self.part_ops = Signal(2, reset_less=True)
678 self.a_signed = Signal(reset_less=True)
679 self.b_signed = Signal(reset_less=True)
680
681 def elaborate(self, platform):
682
683 m = Module()
684
685 asig = self.part_ops != OP_MUL_UNSIGNED_HIGH
686 bsig = (self.part_ops == OP_MUL_LOW) \
687 | (self.part_ops == OP_MUL_SIGNED_HIGH)
688 m.d.comb += self.a_signed.eq(asig)
689 m.d.comb += self.b_signed.eq(bsig)
690
691 return m
692
693
694 class Mul8_16_32_64(Elaboratable):
695 """Signed/Unsigned 8/16/32/64-bit partitioned integer multiplier.
696
697 Supports partitioning into any combination of 8, 16, 32, and 64-bit
698 partitions on naturally-aligned boundaries. Supports the operation being
699 set for each partition independently.
700
701 :attribute part_pts: the input partition points. Has a partition point at
702 multiples of 8 in 0 < i < 64. Each partition point's associated
703 ``Value`` is a ``Signal``. Modification not supported, except for by
704 ``Signal.eq``.
705 :attribute part_ops: the operation for each byte. The operation for a
706 particular partition is selected by assigning the selected operation
707 code to each byte in the partition. The allowed operation codes are:
708
709 :attribute OP_MUL_LOW: the LSB half of the product. Equivalent to
710 RISC-V's `mul` instruction.
711 :attribute OP_MUL_SIGNED_HIGH: the MSB half of the product where both
712 ``a`` and ``b`` are signed. Equivalent to RISC-V's `mulh`
713 instruction.
714 :attribute OP_MUL_SIGNED_UNSIGNED_HIGH: the MSB half of the product
715 where ``a`` is signed and ``b`` is unsigned. Equivalent to RISC-V's
716 `mulhsu` instruction.
717 :attribute OP_MUL_UNSIGNED_HIGH: the MSB half of the product where both
718 ``a`` and ``b`` are unsigned. Equivalent to RISC-V's `mulhu`
719 instruction.
720 """
721
722 def __init__(self, register_levels=()):
723 """ register_levels: specifies the points in the cascade at which
724 flip-flops are to be inserted.
725 """
726
727 # parameter(s)
728 self.register_levels = list(register_levels)
729
730 # inputs
731 self.part_pts = PartitionPoints()
732 for i in range(8, 64, 8):
733 self.part_pts[i] = Signal(name=f"part_pts_{i}")
734 self.part_ops = [Signal(2, name=f"part_ops_{i}") for i in range(8)]
735 self.a = Signal(64)
736 self.b = Signal(64)
737
738 # intermediates (needed for unit tests)
739 self._intermediate_output = Signal(128)
740
741 # output
742 self.output = Signal(64)
743
744 def _part_byte(self, index):
745 if index == -1 or index == 7:
746 return C(True, 1)
747 assert index >= 0 and index < 8
748 return self.part_pts[index * 8 + 8]
749
750 def elaborate(self, platform):
751 m = Module()
752
753 # collect part-bytes
754 pbs = Signal(8, reset_less=True)
755 tl = []
756 for i in range(8):
757 pb = Signal(name="pb%d" % i, reset_less=True)
758 m.d.comb += pb.eq(self._part_byte(i))
759 tl.append(pb)
760 m.d.comb += pbs.eq(Cat(*tl))
761
762 # local variables
763 signs = []
764 for i in range(8):
765 s = Signs()
766 signs.append(s)
767 setattr(m.submodules, "signs%d" % i, s)
768 m.d.comb += s.part_ops.eq(self.part_ops[i])
769
770 delayed_part_ops = [
771 [Signal(2, name=f"_delayed_part_ops_{delay}_{i}")
772 for i in range(8)]
773 for delay in range(1 + len(self.register_levels))]
774 for i in range(len(self.part_ops)):
775 m.d.comb += delayed_part_ops[0][i].eq(self.part_ops[i])
776 m.d.sync += [delayed_part_ops[j + 1][i].eq(delayed_part_ops[j][i])
777 for j in range(len(self.register_levels))]
778
779 n_levels = len(self.register_levels)+1
780 m.submodules.part_8 = part_8 = Part(128, 8, n_levels, 8)
781 m.submodules.part_16 = part_16 = Part(128, 4, n_levels, 8)
782 m.submodules.part_32 = part_32 = Part(128, 2, n_levels, 8)
783 m.submodules.part_64 = part_64 = Part(128, 1, n_levels, 8)
784 nat_l, nbt_l, nla_l, nlb_l = [], [], [], []
785 for mod in [part_8, part_16, part_32, part_64]:
786 m.d.comb += mod.a.eq(self.a)
787 m.d.comb += mod.b.eq(self.b)
788 for i in range(len(signs)):
789 m.d.comb += mod.a_signed[i].eq(signs[i].a_signed)
790 m.d.comb += mod.b_signed[i].eq(signs[i].b_signed)
791 m.d.comb += mod.pbs.eq(pbs)
792 nat_l.append(mod.not_a_term)
793 nbt_l.append(mod.not_b_term)
794 nla_l.append(mod.neg_lsb_a_term)
795 nlb_l.append(mod.neg_lsb_b_term)
796
797 terms = []
798
799 for a_index in range(8):
800 t = ProductTerms(8, 128, 8, a_index, 8)
801 setattr(m.submodules, "terms_%d" % a_index, t)
802
803 m.d.comb += t.a.eq(self.a)
804 m.d.comb += t.b.eq(self.b)
805 m.d.comb += t.pb_en.eq(pbs)
806
807 for term in t.terms:
808 terms.append(term)
809
810 # it's fine to bitwise-or data together since they are never enabled
811 # at the same time
812 m.submodules.nat_or = nat_or = OrMod(128)
813 m.submodules.nbt_or = nbt_or = OrMod(128)
814 m.submodules.nla_or = nla_or = OrMod(128)
815 m.submodules.nlb_or = nlb_or = OrMod(128)
816 for l, mod in [(nat_l, nat_or),
817 (nbt_l, nbt_or),
818 (nla_l, nla_or),
819 (nlb_l, nlb_or)]:
820 for i in range(len(l)):
821 m.d.comb += mod.orin[i].eq(l[i])
822 terms.append(mod.orout)
823
824 expanded_part_pts = PartitionPoints()
825 for i, v in self.part_pts.items():
826 signal = Signal(name=f"expanded_part_pts_{i*2}", reset_less=True)
827 expanded_part_pts[i * 2] = signal
828 m.d.comb += signal.eq(v)
829
830 add_reduce = AddReduce(terms,
831 128,
832 self.register_levels,
833 expanded_part_pts)
834 m.submodules.add_reduce = add_reduce
835 m.d.comb += self._intermediate_output.eq(add_reduce.output)
836 # create _output_64
837 m.submodules.io64 = io64 = IntermediateOut(64, 128, 1)
838 m.d.comb += io64.intermed.eq(self._intermediate_output)
839 for i in range(8):
840 m.d.comb += io64.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
841
842 # create _output_32
843 m.submodules.io32 = io32 = IntermediateOut(32, 128, 2)
844 m.d.comb += io32.intermed.eq(self._intermediate_output)
845 for i in range(8):
846 m.d.comb += io32.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
847
848 # create _output_16
849 m.submodules.io16 = io16 = IntermediateOut(16, 128, 4)
850 m.d.comb += io16.intermed.eq(self._intermediate_output)
851 for i in range(8):
852 m.d.comb += io16.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
853
854 # create _output_8
855 m.submodules.io8 = io8 = IntermediateOut(8, 128, 8)
856 m.d.comb += io8.intermed.eq(self._intermediate_output)
857 for i in range(8):
858 m.d.comb += io8.delayed_part_ops[i].eq(delayed_part_ops[-1][i])
859
860 # final output
861 m.submodules.finalout = finalout = FinalOut(64)
862 for i in range(len(part_8.delayed_parts[-1])):
863 m.d.comb += finalout.d8[i].eq(part_8.dplast[i])
864 for i in range(len(part_16.delayed_parts[-1])):
865 m.d.comb += finalout.d16[i].eq(part_16.dplast[i])
866 for i in range(len(part_32.delayed_parts[-1])):
867 m.d.comb += finalout.d32[i].eq(part_32.dplast[i])
868 m.d.comb += finalout.i8.eq(io8.output)
869 m.d.comb += finalout.i16.eq(io16.output)
870 m.d.comb += finalout.i32.eq(io32.output)
871 m.d.comb += finalout.i64.eq(io64.output)
872 m.d.comb += self.output.eq(finalout.out)
873
874 return m
875
876
877 if __name__ == "__main__":
878 m = Mul8_16_32_64()
879 main(m, ports=[m.a,
880 m.b,
881 m._intermediate_output,
882 m.output,
883 *m.part_ops,
884 *m.part_pts.values()])