add read-mask to MultiCompUnit
[soc.git] / src / soc / experiment / compalu_multi.py
1 """Computation Unit (aka "ALU Manager").
2
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
9
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
11 """
12
13 from nmigen.compat.sim import run_simulation, Settle
14 from nmigen.cli import verilog, rtlil
15 from nmigen import Module, Signal, Mux, Elaboratable, Repl, Array, Cat, Const
16 from nmigen.hdl.rec import (Record, DIR_FANIN, DIR_FANOUT)
17
18 from nmutil.latch import SRLatch, latchregister
19 from nmutil.iocontrol import RecordObject
20
21 from soc.decoder.power_decoder2 import Data
22 from soc.decoder.power_enums import InternalOp
23 from soc.fu.regspec import RegSpec, RegSpecALUAPI
24
25
26 def go_record(n, name):
27 r = Record([('go', n, DIR_FANIN),
28 ('rel', n, DIR_FANOUT)], name=name)
29 r.go.reset_less = True
30 r.rel.reset_less = True
31 return r
32
33 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
34
35 class CompUnitRecord(RegSpec, RecordObject):
36 """CompUnitRecord
37
38 base class for Computation Units, to provide a uniform API
39 and allow "record.connect" etc. to be used, particularly when
40 it comes to connecting multiple Computation Units up as a block
41 (very laborious)
42
43 LDSTCompUnitRecord should derive from this class and add the
44 additional signals it requires
45
46 :subkls: the class (not an instance) needed to construct the opcode
47 :rwid: either an integer (specifies width of all regs) or a "regspec"
48
49 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
50 """
51 def __init__(self, subkls, rwid, n_src=None, n_dst=None, name=None):
52 RegSpec.__init__(self, rwid, n_src, n_dst)
53 RecordObject.__init__(self, name)
54 self._subkls = subkls
55 n_src, n_dst = self._n_src, self._n_dst
56
57 # create source operands
58 src = []
59 for i in range(n_src):
60 j = i + 1 # name numbering to match src1/src2
61 name = "src%d_i" % j
62 rw = self._get_srcwid(i)
63 sreg = Signal(rw, name=name, reset_less=True)
64 setattr(self, name, sreg)
65 src.append(sreg)
66 self._src_i = src
67
68 # create dest operands
69 dst = []
70 for i in range(n_dst):
71 j = i + 1 # name numbering to match dest1/2...
72 name = "dest%d_o" % j
73 rw = self._get_dstwid(i)
74 dreg = Signal(rw, name=name, reset_less=True)
75 setattr(self, name, dreg)
76 dst.append(dreg)
77 self._dest = dst
78
79 # operation / data input
80 self.oper_i = subkls(name="oper_i") # operand
81
82 # create read/write and other scoreboard signalling
83 self.rd = go_record(n_src, name="rd") # read in, req out
84 self.wr = go_record(n_dst, name="wr") # write in, req out
85 self.rdmaskn = Signal(n_src, reset_less=True) # read mask
86 self.issue_i = Signal(reset_less=True) # fn issue in
87 self.shadown_i = Signal(reset=1) # shadow function, defaults to ON
88 self.go_die_i = Signal() # go die (reset)
89
90 # output (busy/done)
91 self.busy_o = Signal(reset_less=True) # fn busy out
92 self.done_o = Signal(reset_less=True)
93
94
95 class MultiCompUnit(RegSpecALUAPI, Elaboratable):
96 def __init__(self, rwid, alu, opsubsetkls, n_src=2, n_dst=1):
97 """MultiCompUnit
98
99 * :rwid: width of register latches (TODO: allocate per regspec)
100 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
101 * :opsubsetkls: subset of Decode2ExecuteType
102 * :n_src: number of src operands
103 * :n_dst: number of destination operands
104 """
105 RegSpecALUAPI.__init__(self, rwid, alu)
106 self.opsubsetkls = opsubsetkls
107 self.cu = cu = CompUnitRecord(opsubsetkls, rwid, n_src, n_dst)
108 n_src, n_dst = self.n_src, self.n_dst = cu._n_src, cu._n_dst
109 print ("n_src %d n_dst %d" % (self.n_src, self.n_dst))
110
111 # convenience names for src operands
112 for i in range(n_src):
113 j = i + 1 # name numbering to match src1/src2
114 name = "src%d_i" % j
115 setattr(self, name, getattr(cu, name))
116
117 # convenience names for dest operands
118 for i in range(n_dst):
119 j = i + 1 # name numbering to match dest1/2...
120 name = "dest%d_o" % j
121 setattr(self, name, getattr(cu, name))
122
123 # more convenience names
124 self.rd = cu.rd
125 self.wr = cu.wr
126 self.rdmaskn = cu.rdmaskn
127 self.go_rd_i = self.rd.go # temporary naming
128 self.go_wr_i = self.wr.go # temporary naming
129 self.rd_rel_o = self.rd.rel # temporary naming
130 self.req_rel_o = self.wr.rel # temporary naming
131 self.issue_i = cu.issue_i
132 self.shadown_i = cu.shadown_i
133 self.go_die_i = cu.go_die_i
134
135 # operation / data input
136 self.oper_i = cu.oper_i
137 self.src_i = cu._src_i
138
139 self.busy_o = cu.busy_o
140 self.dest = cu._dest
141 self.data_o = self.dest[0] # Dest out
142 self.done_o = cu.done_o
143
144
145 def _mux_op(self, m, sl, op_is_imm, imm, i):
146 # select imm if opcode says so. however also change the latch
147 # to trigger *from* the opcode latch instead.
148 src_or_imm = Signal(self.cu._get_srcwid(i), reset_less=True)
149 src_sel = Signal(reset_less=True)
150 m.d.comb += src_sel.eq(Mux(op_is_imm, self.opc_l.q, self.src_l.q[i]))
151 m.d.comb += src_or_imm.eq(Mux(op_is_imm, imm, self.src_i[i]))
152 # overwrite 1st src-latch with immediate-muxed stuff
153 sl[i][0] = src_or_imm
154 sl[i][2] = src_sel
155 sl[i][3] = ~op_is_imm # change rd.rel[i] gate condition
156
157 def elaborate(self, platform):
158 m = Module()
159 m.submodules.alu = self.alu
160 m.submodules.src_l = src_l = SRLatch(False, self.n_src, name="src")
161 m.submodules.opc_l = opc_l = SRLatch(sync=False, name="opc")
162 m.submodules.req_l = req_l = SRLatch(False, self.n_dst, name="req")
163 m.submodules.rst_l = rst_l = SRLatch(sync=False, name="rst")
164 m.submodules.rok_l = rok_l = SRLatch(sync=False, name="rdok")
165 self.opc_l, self.src_l = opc_l, src_l
166
167 # ALU only proceeds when all src are ready. rd_rel_o is delayed
168 # so combine it with go_rd_i. if all bits are set we're good
169 all_rd = Signal(reset_less=True)
170 m.d.comb += all_rd.eq(self.busy_o & rok_l.q &
171 (((~self.rd.rel) | self.rd.go).all()))
172
173 # generate read-done pulse
174 all_rd_dly = Signal(reset_less=True)
175 all_rd_pulse = Signal(reset_less=True)
176 m.d.sync += all_rd_dly.eq(all_rd)
177 m.d.comb += all_rd_pulse.eq(all_rd & ~all_rd_dly)
178
179 # create rising pulse from alu valid condition.
180 alu_done = Signal(reset_less=True)
181 alu_done_dly = Signal(reset_less=True)
182 alu_pulse = Signal(reset_less=True)
183 alu_pulsem = Signal(self.n_dst, reset_less=True)
184 m.d.comb += alu_done.eq(self.alu.n.valid_o)
185 m.d.sync += alu_done_dly.eq(alu_done)
186 m.d.comb += alu_pulse.eq(alu_done & ~alu_done_dly)
187 m.d.comb += alu_pulsem.eq(Repl(alu_pulse, self.n_dst))
188
189 # write_requests all done
190 # req_done works because any one of the last of the writes
191 # is enough, when combined with when read-phase is done (rst_l.q)
192 wr_any = Signal(reset_less=True)
193 req_done = Signal(reset_less=True)
194 m.d.comb += self.done_o.eq(self.busy_o & ~(self.wr.rel.bool()))
195 m.d.comb += wr_any.eq(self.wr.go.bool())
196 m.d.comb += req_done.eq(wr_any & ~self.alu.n.ready_i & (req_l.q == 0))
197
198 # shadow/go_die
199 reset = Signal(reset_less=True)
200 rst_r = Signal(reset_less=True) # reset latch off
201 reset_w = Signal(self.n_dst, reset_less=True)
202 reset_r = Signal(self.n_src, reset_less=True)
203 m.d.comb += reset.eq(req_done | self.go_die_i)
204 m.d.comb += rst_r.eq(self.issue_i | self.go_die_i)
205 m.d.comb += reset_w.eq(self.wr.go | Repl(self.go_die_i, self.n_dst))
206 m.d.comb += reset_r.eq(self.rd.go | Repl(self.go_die_i, self.n_src))
207
208 # read-done,wr-proceed latch
209 m.d.comb += rok_l.s.eq(self.issue_i) # set up when issue starts
210 m.d.comb += rok_l.r.eq(self.alu.n.valid_o & self.busy_o) # ALU done
211
212 # wr-done, back-to-start latch
213 m.d.comb += rst_l.s.eq(all_rd) # set when read-phase is fully done
214 m.d.comb += rst_l.r.eq(rst_r) # *off* on issue
215
216 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
217 m.d.sync += opc_l.s.eq(self.issue_i) # set on issue
218 m.d.sync += opc_l.r.eq(req_done) # reset on ALU
219
220 # src operand latch (not using go_wr_i)
221 m.d.sync += src_l.s.eq(Repl(self.issue_i, self.n_src))
222 m.d.sync += src_l.r.eq(reset_r)
223
224 # dest operand latch (not using issue_i)
225 m.d.comb += req_l.s.eq(alu_pulsem)
226 m.d.comb += req_l.r.eq(reset_w)
227
228 # create a latch/register for the operand
229 oper_r = self.opsubsetkls(name="oper_r")
230 latchregister(m, self.oper_i, oper_r, self.issue_i, "oper_l")
231
232 # and for each output from the ALU: capture when ALU output is valid
233 drl = []
234 for i in range(self.n_dst):
235 name = "data_r%d" % i
236 data_r = Signal(self.cu._get_dstwid(i), name=name, reset_less=True)
237 latchregister(m, self.get_out(i), data_r, alu_pulsem, name + "_l")
238 drl.append(data_r)
239
240 # pass the operation to the ALU
241 m.d.comb += self.get_op().eq(oper_r)
242
243 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
244 # in the case, for ALU and Logical pipelines, we assume RB is the
245 # 2nd operand in the input "regspec". see for example
246 # soc.fu.alu.pipe_data.ALUInputData
247 sl = []
248 print ("src_i", self.src_i)
249 for i in range(self.n_src):
250 sl.append([self.src_i[i], self.get_in(i), src_l.q[i], Const(1,1)])
251
252 # if the operand subset has "zero_a" we implicitly assume that means
253 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
254 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
255 if hasattr(oper_r, "zero_a"):
256 # select zero imm if opcode says so. however also change the latch
257 # to trigger *from* the opcode latch instead.
258 self._mux_op(m, sl, oper_r.zero_a, 0, 0)
259
260 # if the operand subset has "imm_data" we implicitly assume that means
261 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
262 if hasattr(oper_r, "imm_data"):
263 # select immediate if opcode says so. however also change the latch
264 # to trigger *from* the opcode latch instead.
265 op_is_imm = oper_r.imm_data.imm_ok
266 imm = oper_r.imm_data.imm
267 self._mux_op(m, sl, op_is_imm, imm, 1)
268
269 # create a latch/register for src1/src2 (even if it is a copy of imm)
270 for i in range(self.n_src):
271 src, alusrc, latch, _ = sl[i]
272 latchregister(m, src, alusrc, latch, name="src_r%d" % i)
273
274 # -----
275 # ALU connection / interaction
276 # -----
277
278 # on a go_read, tell the ALU we're accepting data.
279 m.submodules.alui_l = alui_l = SRLatch(False, name="alui")
280 m.d.comb += self.alu.p.valid_i.eq(alui_l.q)
281 m.d.sync += alui_l.r.eq(self.alu.p.ready_o & alui_l.q)
282 m.d.comb += alui_l.s.eq(all_rd_pulse)
283
284 # ALU output "ready" side. alu "ready" indication stays hi until
285 # ALU says "valid".
286 m.submodules.alu_l = alu_l = SRLatch(False, name="alu")
287 m.d.comb += self.alu.n.ready_i.eq(alu_l.q)
288 m.d.sync += alu_l.r.eq(self.alu.n.valid_o & alu_l.q)
289 m.d.comb += alu_l.s.eq(all_rd_pulse)
290
291 # -----
292 # outputs
293 # -----
294
295 slg = Cat(*map(lambda x: x[3], sl)) # get req gate conditions
296 # all request signals gated by busy_o. prevents picker problems
297 m.d.comb += self.busy_o.eq(opc_l.q) # busy out
298
299 # read-release gated by busy (and read-mask)
300 bro = Repl(self.busy_o, self.n_src)
301 m.d.comb += self.rd.rel.eq(src_l.q & bro & slg & ~self.rdmaskn)
302
303 # write-release gated by busy and by shadow
304 brd = Repl(self.busy_o & self.shadown_i, self.n_dst)
305 m.d.comb += self.wr.rel.eq(req_l.q & brd)
306
307 # output the data from the latch on go_write
308 for i in range(self.n_dst):
309 with m.If(self.wr.go[i]):
310 m.d.comb += self.dest[i].eq(drl[i])
311
312 return m
313
314 def __iter__(self):
315 yield self.rd.go
316 yield self.wr.go
317 yield self.issue_i
318 yield self.shadown_i
319 yield self.go_die_i
320 yield from self.oper_i.ports()
321 yield self.src1_i
322 yield self.src2_i
323 yield self.busy_o
324 yield self.rd.rel
325 yield self.wr.rel
326 yield self.data_o
327
328 def ports(self):
329 return list(self)
330
331
332 def op_sim(dut, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
333 yield dut.issue_i.eq(0)
334 yield
335 yield dut.src_i[0].eq(a)
336 yield dut.src_i[1].eq(b)
337 yield dut.oper_i.insn_type.eq(op)
338 yield dut.oper_i.invert_a.eq(inv_a)
339 yield dut.oper_i.imm_data.imm.eq(imm)
340 yield dut.oper_i.imm_data.imm_ok.eq(imm_ok)
341 yield dut.oper_i.zero_a.eq(zero_a)
342 yield dut.issue_i.eq(1)
343 yield
344 yield dut.issue_i.eq(0)
345 yield
346 if not imm_ok or not zero_a:
347 yield dut.rd.go.eq(0b11)
348 while True:
349 yield
350 rd_rel_o = yield dut.rd.rel
351 print ("rd_rel", rd_rel_o)
352 if rd_rel_o:
353 break
354 yield dut.rd.go.eq(0)
355 if len(dut.src_i) == 3:
356 yield dut.rd.go.eq(0b100)
357 while True:
358 yield
359 rd_rel_o = yield dut.rd.rel
360 print ("rd_rel", rd_rel_o)
361 if rd_rel_o:
362 break
363 yield dut.rd.go.eq(0)
364
365 req_rel_o = yield dut.wr.rel
366 result = yield dut.data_o
367 print ("req_rel", req_rel_o, result)
368 while True:
369 req_rel_o = yield dut.wr.rel
370 result = yield dut.data_o
371 print ("req_rel", req_rel_o, result)
372 if req_rel_o:
373 break
374 yield
375 yield dut.wr.go[0].eq(1)
376 yield Settle()
377 result = yield dut.data_o
378 yield
379 print ("result", result)
380 yield dut.wr.go[0].eq(0)
381 yield
382 return result
383
384
385 def scoreboard_sim_dummy(dut):
386 result = yield from op_sim(dut, 5, 2, InternalOp.OP_NOP, inv_a=0,
387 imm=8, imm_ok=1)
388 assert result == 5, result
389
390 result = yield from op_sim(dut, 9, 2, InternalOp.OP_NOP, inv_a=0,
391 imm=8, imm_ok=1)
392 assert result == 9, result
393
394
395 def scoreboard_sim(dut):
396 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=0,
397 imm=8, imm_ok=1)
398 assert result == 13
399
400 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD)
401 assert result == 7
402
403 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=1)
404 assert result == 65532
405
406 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1,
407 imm=8, imm_ok=1)
408 assert result == 8
409
410 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1)
411 assert result == 2
412
413
414 def test_compunit():
415 from alu_hier import ALU
416 from soc.fu.alu.alu_input_record import CompALUOpSubset
417
418 m = Module()
419 alu = ALU(16)
420 dut = MultiCompUnit(16, alu, CompALUOpSubset)
421 m.submodules.cu = dut
422
423 vl = rtlil.convert(dut, ports=dut.ports())
424 with open("test_compunit1.il", "w") as f:
425 f.write(vl)
426
427 run_simulation(m, scoreboard_sim(dut), vcd_name='test_compunit1.vcd')
428
429
430 class CompUnitParallelTest:
431 def __init__(self, dut):
432 self.dut = dut
433
434 # Operation cycle should not take longer than this:
435 self.MAX_BUSY_WAIT = 50
436
437 # Minimum duration in which issue_i will be kept inactive,
438 # during which busy_o must remain low.
439 self.MIN_BUSY_LOW = 5
440
441 # Number of cycles to stall until the assertion of go.
442 # One value, for each port. Can be zero, for no delay.
443 self.RD_GO_DELAY = [0, 3]
444
445 # store common data for the input operation of the processes
446 # input operation:
447 self.op = 0
448 self.inv_a = self.zero_a = 0
449 self.imm = self.imm_ok = 0
450 # input data:
451 self.a = self.b = 0
452
453 def driver(self):
454 print("Begin parallel test.")
455 yield from self.operation(5, 2, InternalOp.OP_ADD, inv_a=0,
456 imm=8, imm_ok=1)
457
458 def operation(self, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
459 # store data for the operation
460 self.a = a
461 self.b = b
462 self.op = op
463 self.inv_a = inv_a
464 self.imm = imm
465 self.imm_ok = imm_ok
466 self.zero_a = zero_a
467
468 # trigger operation cycle
469 yield from self.issue()
470
471 def issue(self):
472 # issue_i starts inactive
473 yield self.dut.issue_i.eq(0)
474
475 for n in range(self.MIN_BUSY_LOW):
476 yield
477 # busy_o must remain inactive. It cannot rise on its own.
478 busy_o = yield self.dut.busy_o
479 assert not busy_o
480
481 # activate issue_i to begin the operation cycle
482 yield self.dut.issue_i.eq(1)
483
484 # at the same time, present the operation
485 yield self.dut.oper_i.insn_type.eq(self.op)
486 yield self.dut.oper_i.invert_a.eq(self.inv_a)
487 yield self.dut.oper_i.imm_data.imm.eq(self.imm)
488 yield self.dut.oper_i.imm_data.imm_ok.eq(self.imm_ok)
489 yield self.dut.oper_i.zero_a.eq(self.zero_a)
490
491 # give one cycle for the CompUnit to latch the data
492 yield
493
494 # busy_o must keep being low in this cycle, because issue_i was
495 # low on the previous cycle.
496 # It cannot rise on its own.
497 # Also, busy_o and issue_i must never be active at the same time, ever.
498 busy_o = yield self.dut.busy_o
499 assert not busy_o
500
501 # Lower issue_i
502 yield self.dut.issue_i.eq(0)
503
504 # deactivate inputs along with issue_i, so we can be sure the data
505 # was latched at the correct cycle
506 yield self.dut.oper_i.insn_type.eq(0)
507 yield self.dut.oper_i.invert_a.eq(0)
508 yield self.dut.oper_i.imm_data.imm.eq(0)
509 yield self.dut.oper_i.imm_data.imm_ok.eq(0)
510 yield self.dut.oper_i.zero_a.eq(0)
511 yield
512
513 # wait for busy_o to lower
514 # timeout after self.MAX_BUSY_WAIT cycles
515 for n in range(self.MAX_BUSY_WAIT):
516 # sample busy_o in the current cycle
517 busy_o = yield self.dut.busy_o
518 if not busy_o:
519 # operation cycle ends when busy_o becomes inactive
520 break
521 yield
522
523 # if busy_o is still active, a timeout has occurred
524 # TODO: Uncomment this, once the test is complete:
525 # assert not busy_o
526
527 if busy_o:
528 print("If you are reading this, "
529 "it's because the above test failed, as expected,\n"
530 "with a timeout. It must pass, once the test is complete.")
531 return
532
533 print("If you are reading this, "
534 "it's because the above test unexpectedly passed.")
535
536 def rd(self, rd_idx):
537 # wait for issue_i to rise
538 while True:
539 issue_i = yield self.dut.issue_i
540 if issue_i:
541 break
542 # issue_i has not risen yet, so rd must keep low
543 rel = yield self.dut.rd.rel[rd_idx]
544 assert not rel
545 yield
546
547 # we do not want rd to rise on an immediate operand
548 # if it is immediate, exit the process
549 # TODO: don't exit the process, monitor rd instead to ensure it
550 # doesn't rise on its own
551 if (self.zero_a and rd_idx == 0) or (self.imm_ok and rd_idx == 1):
552 return
553
554 # issue_i has risen. rel must rise on the next cycle
555 rel = yield self.dut.rd.rel[rd_idx]
556 assert not rel
557
558 # stall for additional cycles. Check that rel doesn't fall on its own
559 for n in range(self.RD_GO_DELAY[rd_idx]):
560 yield
561 rel = yield self.dut.rd.rel[rd_idx]
562 assert rel
563
564 # Before asserting "go", make sure "rel" has risen.
565 # The use of Settle allows "go" to be set combinatorially,
566 # rising on the same cycle as "rel".
567 yield Settle()
568 rel = yield self.dut.rd.rel[rd_idx]
569 assert rel
570
571 # assert go for one cycle
572 yield self.dut.rd.go[rd_idx].eq(1)
573 yield
574
575 # rel must keep high, since go was inactive in the last cycle
576 rel = yield self.dut.rd.rel[rd_idx]
577 assert rel
578
579 # finish the go one-clock pulse
580 yield self.dut.rd.go[rd_idx].eq(0)
581 yield
582
583 # rel must have gone low in response to go being high
584 # on the previous cycle
585 rel = yield self.dut.rd.rel[rd_idx]
586 assert not rel
587
588 # TODO: also when dut.rd.go is set, put the expected value into
589 # the src_i. use dut.get_in[rd_idx] to do so
590
591 def wr(self, wr_idx):
592 # monitor self.dut.wr.req[rd_idx] and sets dut.wr.go[idx] for one cycle
593 yield
594 # TODO: also when dut.wr.go is set, check the output against the
595 # self.expected_o and assert. use dut.get_out(wr_idx) to do so.
596
597 def run_simulation(self, vcd_name):
598 run_simulation(self.dut, [self.driver(),
599 self.rd(0), # one read port (a)
600 self.rd(1), # one read port (b)
601 self.wr(0), # one write port (o)
602 ],
603 vcd_name=vcd_name)
604
605
606 def test_compunit_regspec3():
607 from alu_hier import DummyALU
608 from soc.fu.alu.alu_input_record import CompALUOpSubset
609
610 inspec = [('INT', 'a', '0:15'),
611 ('INT', 'b', '0:15'),
612 ('INT', 'c', '0:15')]
613 outspec = [('INT', 'o', '0:15'),
614 ]
615
616 regspec = (inspec, outspec)
617
618 m = Module()
619 alu = DummyALU(16)
620 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
621 m.submodules.cu = dut
622
623 run_simulation(m, scoreboard_sim_dummy(dut),
624 vcd_name='test_compunit_regspec3.vcd')
625
626
627 def test_compunit_regspec1():
628 from alu_hier import ALU
629 from soc.fu.alu.alu_input_record import CompALUOpSubset
630
631 inspec = [('INT', 'a', '0:15'),
632 ('INT', 'b', '0:15')]
633 outspec = [('INT', 'o', '0:15'),
634 ]
635
636 regspec = (inspec, outspec)
637
638 m = Module()
639 alu = ALU(16)
640 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
641 m.submodules.cu = dut
642
643 vl = rtlil.convert(dut, ports=dut.ports())
644 with open("test_compunit_regspec1.il", "w") as f:
645 f.write(vl)
646
647 run_simulation(m, scoreboard_sim(dut),
648 vcd_name='test_compunit_regspec1.vcd')
649
650 test = CompUnitParallelTest(dut)
651 test.run_simulation("test_compunit_parallel.vcd")
652
653
654 if __name__ == '__main__':
655 test_compunit()
656 test_compunit_regspec1()
657 test_compunit_regspec3()