create a write-mask, anything with an "ok" in the Record fields
[soc.git] / src / soc / experiment / compalu_multi.py
1 """Computation Unit (aka "ALU Manager").
2
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
9
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
11 """
12
13 from nmigen.compat.sim import run_simulation, Settle
14 from nmigen.cli import verilog, rtlil
15 from nmigen import Module, Signal, Mux, Elaboratable, Repl, Array, Cat, Const
16 from nmigen.hdl.rec import (Record, DIR_FANIN, DIR_FANOUT)
17
18 from nmutil.latch import SRLatch, latchregister
19 from nmutil.iocontrol import RecordObject
20
21 from soc.decoder.power_decoder2 import Data
22 from soc.decoder.power_enums import InternalOp
23 from soc.fu.regspec import RegSpec, RegSpecALUAPI
24
25
26 def go_record(n, name):
27 r = Record([('go', n, DIR_FANIN),
28 ('rel', n, DIR_FANOUT)], name=name)
29 r.go.reset_less = True
30 r.rel.reset_less = True
31 return r
32
33 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
34
35 class CompUnitRecord(RegSpec, RecordObject):
36 """CompUnitRecord
37
38 base class for Computation Units, to provide a uniform API
39 and allow "record.connect" etc. to be used, particularly when
40 it comes to connecting multiple Computation Units up as a block
41 (very laborious)
42
43 LDSTCompUnitRecord should derive from this class and add the
44 additional signals it requires
45
46 :subkls: the class (not an instance) needed to construct the opcode
47 :rwid: either an integer (specifies width of all regs) or a "regspec"
48
49 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
50 """
51 def __init__(self, subkls, rwid, n_src=None, n_dst=None, name=None):
52 RegSpec.__init__(self, rwid, n_src, n_dst)
53 RecordObject.__init__(self, name)
54 self._subkls = subkls
55 n_src, n_dst = self._n_src, self._n_dst
56
57 # create source operands
58 src = []
59 for i in range(n_src):
60 j = i + 1 # name numbering to match src1/src2
61 name = "src%d_i" % j
62 rw = self._get_srcwid(i)
63 sreg = Signal(rw, name=name, reset_less=True)
64 setattr(self, name, sreg)
65 src.append(sreg)
66 self._src_i = src
67
68 # create dest operands
69 dst = []
70 for i in range(n_dst):
71 j = i + 1 # name numbering to match dest1/2...
72 name = "dest%d_o" % j
73 rw = self._get_dstwid(i)
74 dreg = Signal(rw, name=name, reset_less=True)
75 setattr(self, name, dreg)
76 dst.append(dreg)
77 self._dest = dst
78
79 # operation / data input
80 self.oper_i = subkls(name="oper_i") # operand
81
82 # create read/write and other scoreboard signalling
83 self.rd = go_record(n_src, name="rd") # read in, req out
84 self.wr = go_record(n_dst, name="wr") # write in, req out
85 self.rdmaskn = Signal(n_src, reset_less=True) # read mask
86 self.issue_i = Signal(reset_less=True) # fn issue in
87 self.shadown_i = Signal(reset=1) # shadow function, defaults to ON
88 self.go_die_i = Signal() # go die (reset)
89
90 # output (busy/done)
91 self.busy_o = Signal(reset_less=True) # fn busy out
92 self.done_o = Signal(reset_less=True)
93
94
95 class MultiCompUnit(RegSpecALUAPI, Elaboratable):
96 def __init__(self, rwid, alu, opsubsetkls, n_src=2, n_dst=1):
97 """MultiCompUnit
98
99 * :rwid: width of register latches (TODO: allocate per regspec)
100 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
101 * :opsubsetkls: subset of Decode2ExecuteType
102 * :n_src: number of src operands
103 * :n_dst: number of destination operands
104 """
105 RegSpecALUAPI.__init__(self, rwid, alu)
106 self.opsubsetkls = opsubsetkls
107 self.cu = cu = CompUnitRecord(opsubsetkls, rwid, n_src, n_dst)
108 n_src, n_dst = self.n_src, self.n_dst = cu._n_src, cu._n_dst
109 print ("n_src %d n_dst %d" % (self.n_src, self.n_dst))
110
111 # convenience names for src operands
112 for i in range(n_src):
113 j = i + 1 # name numbering to match src1/src2
114 name = "src%d_i" % j
115 setattr(self, name, getattr(cu, name))
116
117 # convenience names for dest operands
118 for i in range(n_dst):
119 j = i + 1 # name numbering to match dest1/2...
120 name = "dest%d_o" % j
121 setattr(self, name, getattr(cu, name))
122
123 # more convenience names
124 self.rd = cu.rd
125 self.wr = cu.wr
126 self.rdmaskn = cu.rdmaskn
127 self.go_rd_i = self.rd.go # temporary naming
128 self.go_wr_i = self.wr.go # temporary naming
129 self.rd_rel_o = self.rd.rel # temporary naming
130 self.req_rel_o = self.wr.rel # temporary naming
131 self.issue_i = cu.issue_i
132 self.shadown_i = cu.shadown_i
133 self.go_die_i = cu.go_die_i
134
135 # operation / data input
136 self.oper_i = cu.oper_i
137 self.src_i = cu._src_i
138
139 self.busy_o = cu.busy_o
140 self.dest = cu._dest
141 self.data_o = self.dest[0] # Dest out
142 self.done_o = cu.done_o
143
144
145 def _mux_op(self, m, sl, op_is_imm, imm, i):
146 # select imm if opcode says so. however also change the latch
147 # to trigger *from* the opcode latch instead.
148 src_or_imm = Signal(self.cu._get_srcwid(i), reset_less=True)
149 src_sel = Signal(reset_less=True)
150 m.d.comb += src_sel.eq(Mux(op_is_imm, self.opc_l.q, self.src_l.q[i]))
151 m.d.comb += src_or_imm.eq(Mux(op_is_imm, imm, self.src_i[i]))
152 # overwrite 1st src-latch with immediate-muxed stuff
153 sl[i][0] = src_or_imm
154 sl[i][2] = src_sel
155 sl[i][3] = ~op_is_imm # change rd.rel[i] gate condition
156
157 def elaborate(self, platform):
158 m = Module()
159 m.submodules.alu = self.alu
160 m.submodules.src_l = src_l = SRLatch(False, self.n_src, name="src")
161 m.submodules.opc_l = opc_l = SRLatch(sync=False, name="opc")
162 m.submodules.req_l = req_l = SRLatch(False, self.n_dst, name="req")
163 m.submodules.rst_l = rst_l = SRLatch(sync=False, name="rst")
164 m.submodules.rok_l = rok_l = SRLatch(sync=False, name="rdok")
165 self.opc_l, self.src_l = opc_l, src_l
166
167 # ALU only proceeds when all src are ready. rd_rel_o is delayed
168 # so combine it with go_rd_i. if all bits are set we're good
169 all_rd = Signal(reset_less=True)
170 m.d.comb += all_rd.eq(self.busy_o & rok_l.q &
171 (((~self.rd.rel) | self.rd.go).all()))
172
173 # generate read-done pulse
174 all_rd_dly = Signal(reset_less=True)
175 all_rd_pulse = Signal(reset_less=True)
176 m.d.sync += all_rd_dly.eq(all_rd)
177 m.d.comb += all_rd_pulse.eq(all_rd & ~all_rd_dly)
178
179 # create rising pulse from alu valid condition.
180 alu_done = Signal(reset_less=True)
181 alu_done_dly = Signal(reset_less=True)
182 alu_pulse = Signal(reset_less=True)
183 alu_pulsem = Signal(self.n_dst, reset_less=True)
184 m.d.comb += alu_done.eq(self.alu.n.valid_o)
185 m.d.sync += alu_done_dly.eq(alu_done)
186 m.d.comb += alu_pulse.eq(alu_done & ~alu_done_dly)
187 m.d.comb += alu_pulsem.eq(Repl(alu_pulse, self.n_dst))
188
189 # write_requests all done
190 # req_done works because any one of the last of the writes
191 # is enough, when combined with when read-phase is done (rst_l.q)
192 wr_any = Signal(reset_less=True)
193 req_done = Signal(reset_less=True)
194 m.d.comb += self.done_o.eq(self.busy_o & ~(self.wr.rel.bool()))
195 m.d.comb += wr_any.eq(self.wr.go.bool())
196 m.d.comb += req_done.eq(wr_any & ~self.alu.n.ready_i & (req_l.q == 0))
197
198 # shadow/go_die
199 reset = Signal(reset_less=True)
200 rst_r = Signal(reset_less=True) # reset latch off
201 reset_w = Signal(self.n_dst, reset_less=True)
202 reset_r = Signal(self.n_src, reset_less=True)
203 m.d.comb += reset.eq(req_done | self.go_die_i)
204 m.d.comb += rst_r.eq(self.issue_i | self.go_die_i)
205 m.d.comb += reset_w.eq(self.wr.go | Repl(self.go_die_i, self.n_dst))
206 m.d.comb += reset_r.eq(self.rd.go | Repl(self.go_die_i, self.n_src))
207
208 # read-done,wr-proceed latch
209 m.d.comb += rok_l.s.eq(self.issue_i) # set up when issue starts
210 m.d.comb += rok_l.r.eq(self.alu.n.valid_o & self.busy_o) # ALU done
211
212 # wr-done, back-to-start latch
213 m.d.comb += rst_l.s.eq(all_rd) # set when read-phase is fully done
214 m.d.comb += rst_l.r.eq(rst_r) # *off* on issue
215
216 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
217 m.d.sync += opc_l.s.eq(self.issue_i) # set on issue
218 m.d.sync += opc_l.r.eq(req_done) # reset on ALU
219
220 # src operand latch (not using go_wr_i)
221 m.d.sync += src_l.s.eq(Repl(self.issue_i, self.n_src))
222 m.d.sync += src_l.r.eq(reset_r)
223
224 # dest operand latch (not using issue_i)
225 m.d.comb += req_l.s.eq(alu_pulsem)
226 m.d.comb += req_l.r.eq(reset_w)
227
228 # create a latch/register for the operand
229 oper_r = self.opsubsetkls(name="oper_r")
230 latchregister(m, self.oper_i, oper_r, self.issue_i, "oper_l")
231
232 # and for each output from the ALU: capture when ALU output is valid
233 drl = []
234 wrok = []
235 for i in range(self.n_dst):
236 name = "data_r%d" % i
237 lro = self.get_out(i)
238 ok = Const(1, 1)
239 if isinstance(lro, Record):
240 data_r = Record.like(lro, name=name)
241 if hasattr(data_r, "ok"): # bye-bye abstract interface design..
242 ok = data_r.ok
243 else:
244 data_r = Signal.like(lro, name=name, reset_less=True)
245 wrok.append(ok)
246 latchregister(m, lro, data_r, alu_pulsem, name + "_l")
247 drl.append(data_r)
248
249 # ok, above we collated anything with an "ok" on the output side
250 # now actually use those to create a write-mask. this basically
251 # is now the Function Unit API tells the Comp Unit "do not request
252 # a regfile port because this particular output is not valid"
253 wrmask = Signal(self.n_dst, reset_less=True)
254 m.d.comb += wrmask.eq(Cat(*wrok))
255
256 # pass the operation to the ALU
257 m.d.comb += self.get_op().eq(oper_r)
258
259 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
260 # in the case, for ALU and Logical pipelines, we assume RB is the
261 # 2nd operand in the input "regspec". see for example
262 # soc.fu.alu.pipe_data.ALUInputData
263 sl = []
264 print ("src_i", self.src_i)
265 for i in range(self.n_src):
266 sl.append([self.src_i[i], self.get_in(i), src_l.q[i], Const(1,1)])
267
268 # if the operand subset has "zero_a" we implicitly assume that means
269 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
270 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
271 if hasattr(oper_r, "zero_a"):
272 # select zero imm if opcode says so. however also change the latch
273 # to trigger *from* the opcode latch instead.
274 self._mux_op(m, sl, oper_r.zero_a, 0, 0)
275
276 # if the operand subset has "imm_data" we implicitly assume that means
277 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
278 if hasattr(oper_r, "imm_data"):
279 # select immediate if opcode says so. however also change the latch
280 # to trigger *from* the opcode latch instead.
281 op_is_imm = oper_r.imm_data.imm_ok
282 imm = oper_r.imm_data.imm
283 self._mux_op(m, sl, op_is_imm, imm, 1)
284
285 # create a latch/register for src1/src2 (even if it is a copy of imm)
286 for i in range(self.n_src):
287 src, alusrc, latch, _ = sl[i]
288 latchregister(m, src, alusrc, latch, name="src_r%d" % i)
289
290 # -----
291 # ALU connection / interaction
292 # -----
293
294 # on a go_read, tell the ALU we're accepting data.
295 m.submodules.alui_l = alui_l = SRLatch(False, name="alui")
296 m.d.comb += self.alu.p.valid_i.eq(alui_l.q)
297 m.d.sync += alui_l.r.eq(self.alu.p.ready_o & alui_l.q)
298 m.d.comb += alui_l.s.eq(all_rd_pulse)
299
300 # ALU output "ready" side. alu "ready" indication stays hi until
301 # ALU says "valid".
302 m.submodules.alu_l = alu_l = SRLatch(False, name="alu")
303 m.d.comb += self.alu.n.ready_i.eq(alu_l.q)
304 m.d.sync += alu_l.r.eq(self.alu.n.valid_o & alu_l.q)
305 m.d.comb += alu_l.s.eq(all_rd_pulse)
306
307 # -----
308 # outputs
309 # -----
310
311 slg = Cat(*map(lambda x: x[3], sl)) # get req gate conditions
312 # all request signals gated by busy_o. prevents picker problems
313 m.d.comb += self.busy_o.eq(opc_l.q) # busy out
314
315 # read-release gated by busy (and read-mask)
316 bro = Repl(self.busy_o, self.n_src)
317 m.d.comb += self.rd.rel.eq(src_l.q & bro & slg & ~self.rdmaskn)
318
319 # write-release gated by busy and by shadow (and write-mask)
320 brd = Repl(self.busy_o & self.shadown_i, self.n_dst)
321 m.d.comb += self.wr.rel.eq(req_l.q & brd & wrmask)
322
323 # output the data from the latch on go_write
324 for i in range(self.n_dst):
325 with m.If(self.wr.go[i]):
326 m.d.comb += self.dest[i].eq(drl[i])
327
328 return m
329
330 def __iter__(self):
331 yield self.rd.go
332 yield self.wr.go
333 yield self.issue_i
334 yield self.shadown_i
335 yield self.go_die_i
336 yield from self.oper_i.ports()
337 yield self.src1_i
338 yield self.src2_i
339 yield self.busy_o
340 yield self.rd.rel
341 yield self.wr.rel
342 yield self.data_o
343
344 def ports(self):
345 return list(self)
346
347
348 def op_sim(dut, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
349 yield dut.issue_i.eq(0)
350 yield
351 yield dut.src_i[0].eq(a)
352 yield dut.src_i[1].eq(b)
353 yield dut.oper_i.insn_type.eq(op)
354 yield dut.oper_i.invert_a.eq(inv_a)
355 yield dut.oper_i.imm_data.imm.eq(imm)
356 yield dut.oper_i.imm_data.imm_ok.eq(imm_ok)
357 yield dut.oper_i.zero_a.eq(zero_a)
358 yield dut.issue_i.eq(1)
359 yield
360 yield dut.issue_i.eq(0)
361 yield
362 if not imm_ok or not zero_a:
363 yield dut.rd.go.eq(0b11)
364 while True:
365 yield
366 rd_rel_o = yield dut.rd.rel
367 print ("rd_rel", rd_rel_o)
368 if rd_rel_o:
369 break
370 yield dut.rd.go.eq(0)
371 if len(dut.src_i) == 3:
372 yield dut.rd.go.eq(0b100)
373 while True:
374 yield
375 rd_rel_o = yield dut.rd.rel
376 print ("rd_rel", rd_rel_o)
377 if rd_rel_o:
378 break
379 yield dut.rd.go.eq(0)
380
381 req_rel_o = yield dut.wr.rel
382 result = yield dut.data_o
383 print ("req_rel", req_rel_o, result)
384 while True:
385 req_rel_o = yield dut.wr.rel
386 result = yield dut.data_o
387 print ("req_rel", req_rel_o, result)
388 if req_rel_o:
389 break
390 yield
391 yield dut.wr.go[0].eq(1)
392 yield Settle()
393 result = yield dut.data_o
394 yield
395 print ("result", result)
396 yield dut.wr.go[0].eq(0)
397 yield
398 return result
399
400
401 def scoreboard_sim_dummy(dut):
402 result = yield from op_sim(dut, 5, 2, InternalOp.OP_NOP, inv_a=0,
403 imm=8, imm_ok=1)
404 assert result == 5, result
405
406 result = yield from op_sim(dut, 9, 2, InternalOp.OP_NOP, inv_a=0,
407 imm=8, imm_ok=1)
408 assert result == 9, result
409
410
411 def scoreboard_sim(dut):
412 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=0,
413 imm=8, imm_ok=1)
414 assert result == 13
415
416 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD)
417 assert result == 7
418
419 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=1)
420 assert result == 65532
421
422 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1,
423 imm=8, imm_ok=1)
424 assert result == 8
425
426 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1)
427 assert result == 2
428
429
430 def test_compunit():
431 from alu_hier import ALU
432 from soc.fu.alu.alu_input_record import CompALUOpSubset
433
434 m = Module()
435 alu = ALU(16)
436 dut = MultiCompUnit(16, alu, CompALUOpSubset)
437 m.submodules.cu = dut
438
439 vl = rtlil.convert(dut, ports=dut.ports())
440 with open("test_compunit1.il", "w") as f:
441 f.write(vl)
442
443 run_simulation(m, scoreboard_sim(dut), vcd_name='test_compunit1.vcd')
444
445
446 class CompUnitParallelTest:
447 def __init__(self, dut):
448 self.dut = dut
449
450 # Operation cycle should not take longer than this:
451 self.MAX_BUSY_WAIT = 50
452
453 # Minimum duration in which issue_i will be kept inactive,
454 # during which busy_o must remain low.
455 self.MIN_BUSY_LOW = 5
456
457 # Number of cycles to stall until the assertion of go.
458 # One value, for each port. Can be zero, for no delay.
459 self.RD_GO_DELAY = [0, 3]
460
461 # store common data for the input operation of the processes
462 # input operation:
463 self.op = 0
464 self.inv_a = self.zero_a = 0
465 self.imm = self.imm_ok = 0
466 # input data:
467 self.a = self.b = 0
468
469 def driver(self):
470 print("Begin parallel test.")
471 yield from self.operation(5, 2, InternalOp.OP_ADD, inv_a=0,
472 imm=8, imm_ok=1)
473
474 def operation(self, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
475 # store data for the operation
476 self.a = a
477 self.b = b
478 self.op = op
479 self.inv_a = inv_a
480 self.imm = imm
481 self.imm_ok = imm_ok
482 self.zero_a = zero_a
483
484 # trigger operation cycle
485 yield from self.issue()
486
487 def issue(self):
488 # issue_i starts inactive
489 yield self.dut.issue_i.eq(0)
490
491 for n in range(self.MIN_BUSY_LOW):
492 yield
493 # busy_o must remain inactive. It cannot rise on its own.
494 busy_o = yield self.dut.busy_o
495 assert not busy_o
496
497 # activate issue_i to begin the operation cycle
498 yield self.dut.issue_i.eq(1)
499
500 # at the same time, present the operation
501 yield self.dut.oper_i.insn_type.eq(self.op)
502 yield self.dut.oper_i.invert_a.eq(self.inv_a)
503 yield self.dut.oper_i.imm_data.imm.eq(self.imm)
504 yield self.dut.oper_i.imm_data.imm_ok.eq(self.imm_ok)
505 yield self.dut.oper_i.zero_a.eq(self.zero_a)
506
507 # give one cycle for the CompUnit to latch the data
508 yield
509
510 # busy_o must keep being low in this cycle, because issue_i was
511 # low on the previous cycle.
512 # It cannot rise on its own.
513 # Also, busy_o and issue_i must never be active at the same time, ever.
514 busy_o = yield self.dut.busy_o
515 assert not busy_o
516
517 # Lower issue_i
518 yield self.dut.issue_i.eq(0)
519
520 # deactivate inputs along with issue_i, so we can be sure the data
521 # was latched at the correct cycle
522 yield self.dut.oper_i.insn_type.eq(0)
523 yield self.dut.oper_i.invert_a.eq(0)
524 yield self.dut.oper_i.imm_data.imm.eq(0)
525 yield self.dut.oper_i.imm_data.imm_ok.eq(0)
526 yield self.dut.oper_i.zero_a.eq(0)
527 yield
528
529 # wait for busy_o to lower
530 # timeout after self.MAX_BUSY_WAIT cycles
531 for n in range(self.MAX_BUSY_WAIT):
532 # sample busy_o in the current cycle
533 busy_o = yield self.dut.busy_o
534 if not busy_o:
535 # operation cycle ends when busy_o becomes inactive
536 break
537 yield
538
539 # if busy_o is still active, a timeout has occurred
540 # TODO: Uncomment this, once the test is complete:
541 # assert not busy_o
542
543 if busy_o:
544 print("If you are reading this, "
545 "it's because the above test failed, as expected,\n"
546 "with a timeout. It must pass, once the test is complete.")
547 return
548
549 print("If you are reading this, "
550 "it's because the above test unexpectedly passed.")
551
552 def rd(self, rd_idx):
553 # wait for issue_i to rise
554 while True:
555 issue_i = yield self.dut.issue_i
556 if issue_i:
557 break
558 # issue_i has not risen yet, so rd must keep low
559 rel = yield self.dut.rd.rel[rd_idx]
560 assert not rel
561 yield
562
563 # we do not want rd to rise on an immediate operand
564 # if it is immediate, exit the process
565 # TODO: don't exit the process, monitor rd instead to ensure it
566 # doesn't rise on its own
567 if (self.zero_a and rd_idx == 0) or (self.imm_ok and rd_idx == 1):
568 return
569
570 # issue_i has risen. rel must rise on the next cycle
571 rel = yield self.dut.rd.rel[rd_idx]
572 assert not rel
573
574 # stall for additional cycles. Check that rel doesn't fall on its own
575 for n in range(self.RD_GO_DELAY[rd_idx]):
576 yield
577 rel = yield self.dut.rd.rel[rd_idx]
578 assert rel
579
580 # Before asserting "go", make sure "rel" has risen.
581 # The use of Settle allows "go" to be set combinatorially,
582 # rising on the same cycle as "rel".
583 yield Settle()
584 rel = yield self.dut.rd.rel[rd_idx]
585 assert rel
586
587 # assert go for one cycle
588 yield self.dut.rd.go[rd_idx].eq(1)
589 yield
590
591 # rel must keep high, since go was inactive in the last cycle
592 rel = yield self.dut.rd.rel[rd_idx]
593 assert rel
594
595 # finish the go one-clock pulse
596 yield self.dut.rd.go[rd_idx].eq(0)
597 yield
598
599 # rel must have gone low in response to go being high
600 # on the previous cycle
601 rel = yield self.dut.rd.rel[rd_idx]
602 assert not rel
603
604 # TODO: also when dut.rd.go is set, put the expected value into
605 # the src_i. use dut.get_in[rd_idx] to do so
606
607 def wr(self, wr_idx):
608 # monitor self.dut.wr.req[rd_idx] and sets dut.wr.go[idx] for one cycle
609 yield
610 # TODO: also when dut.wr.go is set, check the output against the
611 # self.expected_o and assert. use dut.get_out(wr_idx) to do so.
612
613 def run_simulation(self, vcd_name):
614 run_simulation(self.dut, [self.driver(),
615 self.rd(0), # one read port (a)
616 self.rd(1), # one read port (b)
617 self.wr(0), # one write port (o)
618 ],
619 vcd_name=vcd_name)
620
621
622 def test_compunit_regspec3():
623 from alu_hier import DummyALU
624 from soc.fu.alu.alu_input_record import CompALUOpSubset
625
626 inspec = [('INT', 'a', '0:15'),
627 ('INT', 'b', '0:15'),
628 ('INT', 'c', '0:15')]
629 outspec = [('INT', 'o', '0:15'),
630 ]
631
632 regspec = (inspec, outspec)
633
634 m = Module()
635 alu = DummyALU(16)
636 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
637 m.submodules.cu = dut
638
639 run_simulation(m, scoreboard_sim_dummy(dut),
640 vcd_name='test_compunit_regspec3.vcd')
641
642
643 def test_compunit_regspec1():
644 from alu_hier import ALU
645 from soc.fu.alu.alu_input_record import CompALUOpSubset
646
647 inspec = [('INT', 'a', '0:15'),
648 ('INT', 'b', '0:15')]
649 outspec = [('INT', 'o', '0:15'),
650 ]
651
652 regspec = (inspec, outspec)
653
654 m = Module()
655 alu = ALU(16)
656 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
657 m.submodules.cu = dut
658
659 vl = rtlil.convert(dut, ports=dut.ports())
660 with open("test_compunit_regspec1.il", "w") as f:
661 f.write(vl)
662
663 run_simulation(m, scoreboard_sim(dut),
664 vcd_name='test_compunit_regspec1.vcd')
665
666 test = CompUnitParallelTest(dut)
667 test.run_simulation("test_compunit_parallel.vcd")
668
669
670 if __name__ == '__main__':
671 test_compunit()
672 test_compunit_regspec1()
673 test_compunit_regspec3()