Send a one-clock "go" pulse after a configurable number of cycles
[soc.git] / src / soc / experiment / compalu_multi.py
1 """Computation Unit (aka "ALU Manager").
2
3 Manages a Pipeline or FSM, ensuring that the start and end time are 100%
4 monitored. At no time may the ALU proceed without this module notifying
5 the Dependency Matrices. At no time is a result production "abandoned".
6 This module blocks (indicates busy) starting from when it first receives
7 an opcode until it receives notification that
8 its result(s) have been successfully stored in the regfile(s)
9
10 Documented at http://libre-soc.org/3d_gpu/architecture/compunit
11 """
12
13 from nmigen.compat.sim import run_simulation
14 from nmigen.cli import verilog, rtlil
15 from nmigen import Module, Signal, Mux, Elaboratable, Repl, Array, Cat, Const
16 from nmigen.hdl.rec import (Record, DIR_FANIN, DIR_FANOUT)
17
18 from nmutil.latch import SRLatch, latchregister
19 from nmutil.iocontrol import RecordObject
20
21 from soc.decoder.power_decoder2 import Data
22 from soc.decoder.power_enums import InternalOp
23 from soc.fu.regspec import RegSpec, RegSpecALUAPI
24
25
26 def go_record(n, name):
27 r = Record([('go', n, DIR_FANIN),
28 ('rel', n, DIR_FANOUT)], name=name)
29 r.go.reset_less = True
30 r.rel.reset_less = True
31 return r
32
33 # see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
34
35 class CompUnitRecord(RegSpec, RecordObject):
36 """CompUnitRecord
37
38 base class for Computation Units, to provide a uniform API
39 and allow "record.connect" etc. to be used, particularly when
40 it comes to connecting multiple Computation Units up as a block
41 (very laborious)
42
43 LDSTCompUnitRecord should derive from this class and add the
44 additional signals it requires
45
46 :subkls: the class (not an instance) needed to construct the opcode
47 :rwid: either an integer (specifies width of all regs) or a "regspec"
48
49 see https://libre-soc.org/3d_gpu/architecture/regfile/ section on regspecs
50 """
51 def __init__(self, subkls, rwid, n_src=None, n_dst=None, name=None):
52 RegSpec.__init__(self, rwid, n_src, n_dst)
53 RecordObject.__init__(self, name)
54 self._subkls = subkls
55 n_src, n_dst = self._n_src, self._n_dst
56
57 # create source operands
58 src = []
59 for i in range(n_src):
60 j = i + 1 # name numbering to match src1/src2
61 name = "src%d_i" % j
62 rw = self._get_srcwid(i)
63 sreg = Signal(rw, name=name, reset_less=True)
64 setattr(self, name, sreg)
65 src.append(sreg)
66 self._src_i = src
67
68 # create dest operands
69 dst = []
70 for i in range(n_dst):
71 j = i + 1 # name numbering to match dest1/2...
72 name = "dest%d_i" % j
73 rw = self._get_dstwid(i)
74 dreg = Signal(rw, name=name, reset_less=True)
75 setattr(self, name, dreg)
76 dst.append(dreg)
77 self._dest = dst
78
79 # operation / data input
80 self.oper_i = subkls(name="oper_i") # operand
81
82 # create read/write and other scoreboard signalling
83 self.rd = go_record(n_src, name="rd") # read in, req out
84 self.wr = go_record(n_dst, name="wr") # write in, req out
85 self.issue_i = Signal(reset_less=True) # fn issue in
86 self.shadown_i = Signal(reset=1) # shadow function, defaults to ON
87 self.go_die_i = Signal() # go die (reset)
88
89 # output (busy/done)
90 self.busy_o = Signal(reset_less=True) # fn busy out
91 self.done_o = Signal(reset_less=True)
92
93
94 class MultiCompUnit(RegSpecALUAPI, Elaboratable):
95 def __init__(self, rwid, alu, opsubsetkls, n_src=2, n_dst=1):
96 """MultiCompUnit
97
98 * :rwid: width of register latches (TODO: allocate per regspec)
99 * :alu: ALU (pipeline, FSM) - must conform to nmutil Pipe API
100 * :opsubsetkls: subset of Decode2ExecuteType
101 * :n_src: number of src operands
102 * :n_dst: number of destination operands
103 """
104 RegSpecALUAPI.__init__(self, rwid, alu)
105 self.opsubsetkls = opsubsetkls
106 self.cu = cu = CompUnitRecord(opsubsetkls, rwid, n_src, n_dst)
107 n_src, n_dst = self.n_src, self.n_dst = cu._n_src, cu._n_dst
108 print ("n_src %d n_dst %d" % (self.n_src, self.n_dst))
109
110 # convenience names for src operands
111 for i in range(n_src):
112 j = i + 1 # name numbering to match src1/src2
113 name = "src%d_i" % j
114 setattr(self, name, getattr(cu, name))
115
116 # convenience names for dest operands
117 for i in range(n_dst):
118 j = i + 1 # name numbering to match dest1/2...
119 name = "dest%d_i" % j
120 setattr(self, name, getattr(cu, name))
121
122 # more convenience names
123 self.rd = cu.rd
124 self.wr = cu.wr
125 self.go_rd_i = self.rd.go # temporary naming
126 self.go_wr_i = self.wr.go # temporary naming
127 self.rd_rel_o = self.rd.rel # temporary naming
128 self.req_rel_o = self.wr.rel # temporary naming
129 self.issue_i = cu.issue_i
130 self.shadown_i = cu.shadown_i
131 self.go_die_i = cu.go_die_i
132
133 # operation / data input
134 self.oper_i = cu.oper_i
135 self.src_i = cu._src_i
136
137 self.busy_o = cu.busy_o
138 self.dest = cu._dest
139 self.data_o = self.dest[0] # Dest out
140 self.done_o = cu.done_o
141
142
143 def _mux_op(self, m, sl, op_is_imm, imm, i):
144 # select imm if opcode says so. however also change the latch
145 # to trigger *from* the opcode latch instead.
146 src_or_imm = Signal(self.cu._get_srcwid(i), reset_less=True)
147 src_sel = Signal(reset_less=True)
148 m.d.comb += src_sel.eq(Mux(op_is_imm, self.opc_l.q, self.src_l.q[i]))
149 m.d.comb += src_or_imm.eq(Mux(op_is_imm, imm, self.src_i[i]))
150 # overwrite 1st src-latch with immediate-muxed stuff
151 sl[i][0] = src_or_imm
152 sl[i][2] = src_sel
153 sl[i][3] = ~op_is_imm # change rd.rel[i] gate condition
154
155 def elaborate(self, platform):
156 m = Module()
157 m.submodules.alu = self.alu
158 m.submodules.src_l = src_l = SRLatch(False, self.n_src, name="src")
159 m.submodules.opc_l = opc_l = SRLatch(sync=False, name="opc")
160 m.submodules.req_l = req_l = SRLatch(False, self.n_dst, name="req")
161 m.submodules.rst_l = rst_l = SRLatch(sync=False, name="rst")
162 m.submodules.rok_l = rok_l = SRLatch(sync=False, name="rdok")
163 self.opc_l, self.src_l = opc_l, src_l
164
165 # ALU only proceeds when all src are ready. rd_rel_o is delayed
166 # so combine it with go_rd_i. if all bits are set we're good
167 all_rd = Signal(reset_less=True)
168 m.d.comb += all_rd.eq(self.busy_o & rok_l.q &
169 (((~self.rd.rel) | self.rd.go).all()))
170
171 # write_requests all done
172 # req_done works because any one of the last of the writes
173 # is enough, when combined with when read-phase is done (rst_l.q)
174 wr_any = Signal(reset_less=True)
175 req_done = Signal(reset_less=True)
176 m.d.comb += self.done_o.eq(self.busy_o & ~(self.wr.rel.bool()))
177 m.d.comb += wr_any.eq(self.wr.go.bool())
178 m.d.comb += req_done.eq(rst_l.q & wr_any)
179
180 # shadow/go_die
181 reset = Signal(reset_less=True)
182 rst_r = Signal(reset_less=True) # reset latch off
183 reset_w = Signal(self.n_dst, reset_less=True)
184 reset_r = Signal(self.n_src, reset_less=True)
185 m.d.comb += reset.eq(req_done | self.go_die_i)
186 m.d.comb += rst_r.eq(self.issue_i | self.go_die_i)
187 m.d.comb += reset_w.eq(self.wr.go | Repl(self.go_die_i, self.n_dst))
188 m.d.comb += reset_r.eq(self.rd.go | Repl(self.go_die_i, self.n_src))
189
190 # read-done,wr-proceed latch
191 m.d.comb += rok_l.s.eq(self.issue_i) # set up when issue starts
192 m.d.comb += rok_l.r.eq(self.alu.p.ready_o) # off when ALU acknowledges
193
194 # wr-done, back-to-start latch
195 m.d.comb += rst_l.s.eq(all_rd) # set when read-phase is fully done
196 m.d.comb += rst_l.r.eq(rst_r) # *off* on issue
197
198 # opcode latch (not using go_rd_i) - inverted so that busy resets to 0
199 m.d.sync += opc_l.s.eq(self.issue_i) # set on issue
200 m.d.sync += opc_l.r.eq(self.alu.n.valid_o & req_done) # reset on ALU
201
202 # src operand latch (not using go_wr_i)
203 m.d.sync += src_l.s.eq(Repl(self.issue_i, self.n_src))
204 m.d.sync += src_l.r.eq(reset_r)
205
206 # dest operand latch (not using issue_i)
207 m.d.sync += req_l.s.eq(Repl(all_rd, self.n_dst))
208 m.d.sync += req_l.r.eq(reset_w)
209
210 # create a latch/register for the operand
211 oper_r = self.opsubsetkls(name="oper_r")
212 latchregister(m, self.oper_i, oper_r, self.issue_i, "oper_l")
213
214 # and for each output from the ALU
215 drl = []
216 for i in range(self.n_dst):
217 name = "data_r%d" % i
218 data_r = Signal(self.cu._get_dstwid(i), name=name, reset_less=True)
219 latchregister(m, self.get_out(i), data_r, req_l.q[i], name + "_l")
220 drl.append(data_r)
221
222 # pass the operation to the ALU
223 m.d.comb += self.get_op().eq(oper_r)
224
225 # create list of src/alu-src/src-latch. override 1st and 2nd one below.
226 # in the case, for ALU and Logical pipelines, we assume RB is the
227 # 2nd operand in the input "regspec". see for example
228 # soc.fu.alu.pipe_data.ALUInputData
229 sl = []
230 print ("src_i", self.src_i)
231 for i in range(self.n_src):
232 sl.append([self.src_i[i], self.get_in(i), src_l.q[i], Const(1,1)])
233
234 # if the operand subset has "zero_a" we implicitly assume that means
235 # src_i[0] is an INT reg type where zero can be multiplexed in, instead.
236 # see https://bugs.libre-soc.org/show_bug.cgi?id=336
237 if hasattr(oper_r, "zero_a"):
238 # select zero imm if opcode says so. however also change the latch
239 # to trigger *from* the opcode latch instead.
240 self._mux_op(m, sl, oper_r.zero_a, 0, 0)
241
242 # if the operand subset has "imm_data" we implicitly assume that means
243 # "this is an INT ALU/Logical FU jobbie, RB is muxed with the immediate"
244 if hasattr(oper_r, "imm_data"):
245 # select immediate if opcode says so. however also change the latch
246 # to trigger *from* the opcode latch instead.
247 op_is_imm = oper_r.imm_data.imm_ok
248 imm = oper_r.imm_data.imm
249 self._mux_op(m, sl, op_is_imm, imm, 1)
250
251 # create a latch/register for src1/src2 (even if it is a copy of imm)
252 for i in range(self.n_src):
253 src, alusrc, latch, _ = sl[i]
254 latchregister(m, src, alusrc, latch, name="src_r%d" % i)
255
256 # -----
257 # outputs
258 # -----
259
260 slg = Cat(*map(lambda x: x[3], sl)) # get req gate conditions
261 # all request signals gated by busy_o. prevents picker problems
262 m.d.comb += self.busy_o.eq(opc_l.q) # busy out
263 bro = Repl(self.busy_o, self.n_src)
264 m.d.comb += self.rd.rel.eq(src_l.q & bro & slg) # src1/src2 req rel
265
266 # on a go_read, tell the ALU we're accepting data.
267 # NOTE: this spells TROUBLE if the ALU isn't ready!
268 # go_read is only valid for one clock!
269 with m.If(all_rd): # src operands ready, GO!
270 with m.If(~self.alu.p.ready_o): # no ACK yet
271 m.d.comb += self.alu.p.valid_i.eq(1) # so indicate valid
272
273 brd = Repl(self.busy_o & self.shadown_i, self.n_dst)
274 # only proceed if ALU says its output is valid
275 with m.If(self.alu.n.valid_o):
276 # when ALU ready, write req release out. waits for shadow
277 m.d.comb += self.wr.rel.eq(req_l.q & brd)
278 # when output latch is ready, and ALU says ready, accept ALU output
279 with m.If(reset):
280 m.d.comb += self.alu.n.ready_i.eq(1) # tells ALU "got it"
281
282 # output the data from the latch on go_write
283 for i in range(self.n_dst):
284 with m.If(self.wr.go[i]):
285 m.d.comb += self.dest[i].eq(drl[i])
286
287 return m
288
289 def __iter__(self):
290 yield self.rd.go
291 yield self.wr.go
292 yield self.issue_i
293 yield self.shadown_i
294 yield self.go_die_i
295 yield from self.oper_i.ports()
296 yield self.src1_i
297 yield self.src2_i
298 yield self.busy_o
299 yield self.rd.rel
300 yield self.wr.rel
301 yield self.data_o
302
303 def ports(self):
304 return list(self)
305
306
307 def op_sim(dut, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
308 yield dut.issue_i.eq(0)
309 yield
310 yield dut.src_i[0].eq(a)
311 yield dut.src_i[1].eq(b)
312 yield dut.oper_i.insn_type.eq(op)
313 yield dut.oper_i.invert_a.eq(inv_a)
314 yield dut.oper_i.imm_data.imm.eq(imm)
315 yield dut.oper_i.imm_data.imm_ok.eq(imm_ok)
316 yield dut.oper_i.zero_a.eq(zero_a)
317 yield dut.issue_i.eq(1)
318 yield
319 yield dut.issue_i.eq(0)
320 yield
321 if not imm_ok or not zero_a:
322 yield dut.rd.go.eq(0b11)
323 while True:
324 yield
325 rd_rel_o = yield dut.rd.rel
326 print ("rd_rel", rd_rel_o)
327 if rd_rel_o:
328 break
329 yield dut.rd.go.eq(0)
330 if len(dut.src_i) == 3:
331 yield dut.rd.go.eq(0b100)
332 while True:
333 yield
334 rd_rel_o = yield dut.rd.rel
335 print ("rd_rel", rd_rel_o)
336 if rd_rel_o:
337 break
338 yield dut.rd.go.eq(0)
339
340 req_rel_o = yield dut.wr.rel
341 result = yield dut.data_o
342 print ("req_rel", req_rel_o, result)
343 while True:
344 req_rel_o = yield dut.wr.rel
345 result = yield dut.data_o
346 print ("req_rel", req_rel_o, result)
347 if req_rel_o:
348 break
349 yield
350 yield dut.wr.go[0].eq(1)
351 yield
352 result = yield dut.data_o
353 print ("result", result)
354 yield dut.wr.go[0].eq(0)
355 yield
356 return result
357
358
359 def scoreboard_sim_dummy(dut):
360 result = yield from op_sim(dut, 5, 2, InternalOp.OP_NOP, inv_a=0,
361 imm=8, imm_ok=1)
362 assert result == 5, result
363
364 result = yield from op_sim(dut, 9, 2, InternalOp.OP_NOP, inv_a=0,
365 imm=8, imm_ok=1)
366 assert result == 9, result
367
368
369 def scoreboard_sim(dut):
370 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=0,
371 imm=8, imm_ok=1)
372 assert result == 13
373
374 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD)
375 assert result == 7
376
377 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, inv_a=1)
378 assert result == 65532
379
380 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1,
381 imm=8, imm_ok=1)
382 assert result == 8
383
384 result = yield from op_sim(dut, 5, 2, InternalOp.OP_ADD, zero_a=1)
385 assert result == 2
386
387
388 def test_compunit():
389 from alu_hier import ALU
390 from soc.fu.alu.alu_input_record import CompALUOpSubset
391
392 m = Module()
393 alu = ALU(16)
394 dut = MultiCompUnit(16, alu, CompALUOpSubset)
395 m.submodules.cu = dut
396
397 vl = rtlil.convert(dut, ports=dut.ports())
398 with open("test_compunit1.il", "w") as f:
399 f.write(vl)
400
401 run_simulation(m, scoreboard_sim(dut), vcd_name='test_compunit1.vcd')
402
403
404 class CompUnitParallelTest:
405 def __init__(self, dut):
406 self.dut = dut
407
408 # Operation cycle should not take longer than this:
409 self.MAX_BUSY_WAIT = 50
410
411 # Minimum duration in which issue_i will be kept inactive,
412 # during which busy_o must remain low.
413 self.MIN_BUSY_LOW = 5
414
415 # Number of cycles to stall until the assertion of go.
416 # One positive, non-zero value, for each port.
417 self.RD_GO_DELAY = [3, 1]
418
419 # store common data for the input operation of the processes
420 # input operation:
421 self.op = 0
422 self.inv_a = self.zero_a = 0
423 self.imm = self.imm_ok = 0
424 # input data:
425 self.a = self.b = 0
426
427 def driver(self):
428 print("Begin parallel test.")
429 yield from self.operation(5, 2, InternalOp.OP_ADD, inv_a=0,
430 imm=8, imm_ok=1)
431
432 def operation(self, a, b, op, inv_a=0, imm=0, imm_ok=0, zero_a=0):
433 # store data for the operation
434 self.a = a
435 self.b = b
436 self.op = op
437 self.inv_a = inv_a
438 self.imm = imm
439 self.imm_ok = imm_ok
440 self.zero_a = zero_a
441
442 # trigger operation cycle
443 yield from self.issue()
444
445 def issue(self):
446 # issue_i starts inactive
447 yield self.dut.issue_i.eq(0)
448
449 for n in range(self.MIN_BUSY_LOW):
450 yield
451 # busy_o must remain inactive. It cannot rise on its own.
452 busy_o = yield self.dut.busy_o
453 assert not busy_o
454
455 # activate issue_i to begin the operation cycle
456 yield self.dut.issue_i.eq(1)
457
458 # at the same time, present the operation
459 yield self.dut.oper_i.insn_type.eq(self.op)
460 yield self.dut.oper_i.invert_a.eq(self.inv_a)
461 yield self.dut.oper_i.imm_data.imm.eq(self.imm)
462 yield self.dut.oper_i.imm_data.imm_ok.eq(self.imm_ok)
463 yield self.dut.oper_i.zero_a.eq(self.zero_a)
464
465 # give one cycle for the CompUnit to latch the data
466 yield
467
468 # busy_o must keep being low in this cycle, because issue_i was
469 # low on the previous cycle.
470 # It cannot rise on its own.
471 # Also, busy_o and issue_i must never be active at the same time, ever.
472 busy_o = yield self.dut.busy_o
473 assert not busy_o
474
475 # Lower issue_i
476 yield self.dut.issue_i.eq(0)
477
478 # deactivate inputs along with issue_i, so we can be sure the data
479 # was latched at the correct cycle
480 yield self.dut.oper_i.insn_type.eq(0)
481 yield self.dut.oper_i.invert_a.eq(0)
482 yield self.dut.oper_i.imm_data.imm.eq(0)
483 yield self.dut.oper_i.imm_data.imm_ok.eq(0)
484 yield self.dut.oper_i.zero_a.eq(0)
485 yield
486
487 # wait for busy_o to lower
488 # timeout after self.MAX_BUSY_WAIT cycles
489 for n in range(self.MAX_BUSY_WAIT):
490 # sample busy_o in the current cycle
491 busy_o = yield self.dut.busy_o
492 if not busy_o:
493 # operation cycle ends when busy_o becomes inactive
494 break
495 yield
496
497 # if busy_o is still active, a timeout has occurred
498 # TODO: Uncomment this, once the test is complete:
499 # assert not busy_o
500
501 if busy_o:
502 print("If you are reading this, "
503 "it's because the above test failed, as expected,\n"
504 "with a timeout. It must pass, once the test is complete.")
505 return
506
507 print("If you are reading this, "
508 "it's because the above test unexpectedly passed.")
509
510 def rd(self, rd_idx):
511 # wait for issue_i to rise
512 while True:
513 issue_i = yield self.dut.issue_i
514 if issue_i:
515 break
516 # issue_i has not risen yet, so rd must keep low
517 rd = yield self.dut.rd.rel[rd_idx]
518 assert not rd
519 yield
520
521 # we do not want rd to rise on an immediate operand
522 # if it is immediate, exit the process
523 # TODO: don't exit the process, monitor rd instead to ensure it
524 # doesn't rise on its own
525 if (self.zero_a and rd_idx == 0) or (self.imm_ok and rd_idx == 1):
526 return
527
528 # issue_i has risen. rd must rise on the next cycle
529 rd = yield self.dut.rd.rel[rd_idx]
530 assert not rd
531
532 # stall for additional cycles. Check that rel doesn't fall on its own
533 for n in range(self.RD_GO_DELAY[rd_idx]):
534 yield
535 rd = yield self.dut.rd.rel[rd_idx]
536 assert rd
537
538 # assert go for one cycle
539 yield self.dut.rd.go[rd_idx].eq(1)
540 yield
541
542 # rel must keep high, since go was inactive in the last cycle
543 rd = yield self.dut.rd.rel[rd_idx]
544 assert rd
545
546 # finish the go one-clock pulse
547 yield self.dut.rd.go[rd_idx].eq(0)
548 yield
549
550 # rel must have gone low in response to go being high
551 # on the previous cycle
552 rd = yield self.dut.rd.rel[rd_idx]
553 assert not rd
554
555 # TODO: also when dut.rd.go is set, put the expected value into
556 # the src_i. use dut.get_in[rd_idx] to do so
557
558 def wr(self, wr_idx):
559 # monitor self.dut.wr.req[rd_idx] and sets dut.wr.go[idx] for one cycle
560 yield
561 # TODO: also when dut.wr.go is set, check the output against the
562 # self.expected_o and assert. use dut.get_out(wr_idx) to do so.
563
564 def run_simulation(self, vcd_name):
565 run_simulation(self.dut, [self.driver(),
566 self.rd(0), # one read port (a)
567 self.rd(1), # one read port (b)
568 self.wr(0), # one write port (o)
569 ],
570 vcd_name=vcd_name)
571
572
573 def test_compunit_regspec3():
574 from alu_hier import DummyALU
575 from soc.fu.alu.alu_input_record import CompALUOpSubset
576
577 inspec = [('INT', 'a', '0:15'),
578 ('INT', 'b', '0:15'),
579 ('INT', 'c', '0:15')]
580 outspec = [('INT', 'o', '0:15'),
581 ]
582
583 regspec = (inspec, outspec)
584
585 m = Module()
586 alu = DummyALU(16)
587 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
588 m.submodules.cu = dut
589
590 run_simulation(m, scoreboard_sim_dummy(dut),
591 vcd_name='test_compunit_regspec3.vcd')
592
593
594 def test_compunit_regspec1():
595 from alu_hier import ALU
596 from soc.fu.alu.alu_input_record import CompALUOpSubset
597
598 inspec = [('INT', 'a', '0:15'),
599 ('INT', 'b', '0:15')]
600 outspec = [('INT', 'o', '0:15'),
601 ]
602
603 regspec = (inspec, outspec)
604
605 m = Module()
606 alu = ALU(16)
607 dut = MultiCompUnit(regspec, alu, CompALUOpSubset)
608 m.submodules.cu = dut
609
610 vl = rtlil.convert(dut, ports=dut.ports())
611 with open("test_compunit_regspec1.il", "w") as f:
612 f.write(vl)
613
614 run_simulation(m, scoreboard_sim(dut),
615 vcd_name='test_compunit_regspec1.vcd')
616
617 test = CompUnitParallelTest(dut)
618 test.run_simulation("test_compunit_parallel.vcd")
619
620
621 if __name__ == '__main__':
622 test_compunit()
623 test_compunit_regspec1()
624 test_compunit_regspec3()