synchronisation of mid is not working
[ieee754fpu.git] / src / add / example_buf_pipe.py
1 """ Pipeline and BufferedPipeline implementation, conforming to the same API.
2
3 eq:
4 --
5
6 a strategically very important function that is identical in function
7 to nmigen's Signal.eq function, except it may take objects, or a list
8 of objects, or a tuple of objects, and where objects may also be
9 Records.
10
11 Stage API:
12 ---------
13
14 stage requires compliance with a strict API that may be
15 implemented in several means, including as a static class.
16 the methods of a stage instance must be as follows:
17
18 * ispec() - Input data format specification
19 returns an object or a list or tuple of objects, or
20 a Record, each object having an "eq" function which
21 takes responsibility for copying by assignment all
22 sub-objects
23 * ospec() - Output data format specification
24 requirements as for ospec
25 * process(m, i) - Processes an ispec-formatted object
26 returns a combinatorial block of a result that
27 may be assigned to the output, by way of the "eq"
28 function
29 * setup(m, i) - Optional function for setting up submodules
30 may be used for more complex stages, to link
31 the input (i) to submodules. must take responsibility
32 for adding those submodules to the module (m).
33 the submodules must be combinatorial blocks and
34 must have their inputs and output linked combinatorially.
35
36 StageChain:
37 ----------
38
39 A useful combinatorial wrapper around stages that chains them together
40 and then presents a Stage-API-conformant interface.
41
42 UnbufferedPipeline:
43 ------------------
44
45 A simple stalling clock-synchronised pipeline that has no buffering
46 (unlike BufferedPipeline). A stall anywhere along the line will
47 result in a stall back-propagating down the entire chain.
48
49 The BufferedPipeline by contrast will buffer incoming data, allowing
50 previous stages one clock cycle's grace before also having to stall.
51
52 BufferedPipeline:
53 ----------------
54
55 nmigen implementation of buffered pipeline stage, based on zipcpu:
56 https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
57
58 this module requires quite a bit of thought to understand how it works
59 (and why it is needed in the first place). reading the above is
60 *strongly* recommended.
61
62 unlike john dawson's IEEE754 FPU STB/ACK signalling, which requires
63 the STB / ACK signals to raise and lower (on separate clocks) before
64 data may proceeed (thus only allowing one piece of data to proceed
65 on *ALTERNATE* cycles), the signalling here is a true pipeline
66 where data will flow on *every* clock when the conditions are right.
67
68 input acceptance conditions are when:
69 * incoming previous-stage strobe (p.i_valid) is HIGH
70 * outgoing previous-stage ready (p.o_ready) is LOW
71
72 output transmission conditions are when:
73 * outgoing next-stage strobe (n.o_valid) is HIGH
74 * outgoing next-stage ready (n.i_ready) is LOW
75
76 the tricky bit is when the input has valid data and the output is not
77 ready to accept it. if it wasn't for the clock synchronisation, it
78 would be possible to tell the input "hey don't send that data, we're
79 not ready". unfortunately, it's not possible to "change the past":
80 the previous stage *has no choice* but to pass on its data.
81
82 therefore, the incoming data *must* be accepted - and stored: that
83 is the responsibility / contract that this stage *must* accept.
84 on the same clock, it's possible to tell the input that it must
85 not send any more data. this is the "stall" condition.
86
87 we now effectively have *two* possible pieces of data to "choose" from:
88 the buffered data, and the incoming data. the decision as to which
89 to process and output is based on whether we are in "stall" or not.
90 i.e. when the next stage is no longer ready, the output comes from
91 the buffer if a stall had previously occurred, otherwise it comes
92 direct from processing the input.
93
94 this allows us to respect a synchronous "travelling STB" with what
95 dan calls a "buffered handshake".
96
97 it's quite a complex state machine!
98 """
99
100 from nmigen import Signal, Cat, Const, Mux, Module, Array
101 from nmigen.cli import verilog, rtlil
102 from nmigen.hdl.rec import Record, Layout
103
104 from collections.abc import Sequence
105
106
107 class PrevControl:
108 """ contains signals that come *from* the previous stage (both in and out)
109 * i_valid: previous stage indicating all incoming data is valid.
110 may be a multi-bit signal, where all bits are required
111 to be asserted to indicate "valid".
112 * o_ready: output to next stage indicating readiness to accept data
113 * i_data : an input - added by the user of this class
114 """
115
116 def __init__(self, i_width=1):
117 self.i_valid = Signal(i_width, name="p_i_valid") # prev >>in self
118 self.o_ready = Signal(name="p_o_ready") # prev <<out self
119
120 def connect_in(self, prev):
121 """ helper function to connect stage to an input source. do not
122 use to connect stage-to-stage!
123 """
124 return [self.i_valid.eq(prev.i_valid),
125 prev.o_ready.eq(self.o_ready),
126 eq(self.i_data, prev.i_data),
127 ]
128
129 def i_valid_logic(self):
130 vlen = len(self.i_valid)
131 if vlen > 1: # multi-bit case: valid only when i_valid is all 1s
132 all1s = Const(-1, (len(self.i_valid), False))
133 return self.i_valid == all1s
134 # single-bit i_valid case
135 return self.i_valid
136
137
138 class NextControl:
139 """ contains the signals that go *to* the next stage (both in and out)
140 * o_valid: output indicating to next stage that data is valid
141 * i_ready: input from next stage indicating that it can accept data
142 * o_data : an output - added by the user of this class
143 """
144 def __init__(self):
145 self.o_valid = Signal(name="n_o_valid") # self out>> next
146 self.i_ready = Signal(name="n_i_ready") # self <<in next
147
148 def connect_to_next(self, nxt):
149 """ helper function to connect to the next stage data/valid/ready.
150 data/valid is passed *TO* nxt, and ready comes *IN* from nxt.
151 """
152 return [nxt.i_valid.eq(self.o_valid),
153 self.i_ready.eq(nxt.o_ready),
154 eq(nxt.i_data, self.o_data),
155 ]
156
157 def connect_out(self, nxt):
158 """ helper function to connect stage to an output source. do not
159 use to connect stage-to-stage!
160 """
161 return [nxt.o_valid.eq(self.o_valid),
162 self.i_ready.eq(nxt.i_ready),
163 eq(nxt.o_data, self.o_data),
164 ]
165
166
167 def eq(o, i):
168 """ makes signals equal: a helper routine which identifies if it is being
169 passed a list (or tuple) of objects, or signals, or Records, and calls
170 the objects' eq function.
171
172 complex objects (classes) can be used: they must follow the
173 convention of having an eq member function, which takes the
174 responsibility of further calling eq and returning a list of
175 eq assignments
176
177 Record is a special (unusual, recursive) case, where the input may be
178 specified as a dictionary (which may contain further dictionaries,
179 recursively), where the field names of the dictionary must match
180 the Record's field spec. Alternatively, an object with the same
181 member names as the Record may be assigned: it does not have to
182 *be* a Record.
183 """
184 if not isinstance(o, list) and not isinstance(o, tuple):
185 o, i = [o], [i]
186 res = []
187 for (ao, ai) in zip(o, i):
188 #print ("eq ao", repr(ao), "ai:", repr(ai))
189 if isinstance(ao, Record):
190 for idx, (field_name, field_shape, _) in enumerate(ao.layout):
191 if isinstance(field_shape, Layout):
192 val = ai.fields
193 else:
194 val = ai
195 if hasattr(val, field_name): # check for attribute
196 val = getattr(val, field_name)
197 else:
198 val = val[field_name] # dictionary-style specification
199 rres = eq(ao.fields[field_name], val)
200 res += rres
201 else:
202 rres = ao.eq(ai)
203 if not isinstance(rres, Sequence):
204 rres = [rres]
205 res += rres
206 return res
207
208
209 class StageChain:
210 """ pass in a list of stages, and they will automatically be
211 chained together via their input and output specs into a
212 combinatorial chain.
213
214 * input to this class will be the input of the first stage
215 * output of first stage goes into input of second
216 * output of second goes into input into third (etc. etc.)
217 * the output of this class will be the output of the last stage
218 """
219 def __init__(self, chain):
220 self.chain = chain
221
222 def ispec(self):
223 return self.chain[0].ispec()
224
225 def ospec(self):
226 return self.chain[-1].ospec()
227
228 def setup(self, m, i):
229 for (idx, c) in enumerate(self.chain):
230 if hasattr(c, "setup"):
231 c.setup(m, i) # stage may have some module stuff
232 o = self.chain[idx].ospec() # only the last assignment survives
233 m.d.comb += eq(o, c.process(i)) # process input into "o"
234 if idx != len(self.chain)-1:
235 ni = self.chain[idx+1].ispec() # becomes new input on next loop
236 m.d.comb += eq(ni, o) # assign output to next input
237 i = ni
238 self.o = o # last loop is the output
239
240 def process(self, i):
241 return self.o
242
243
244 class PipelineBase:
245 """ Common functions for Pipeline API
246 """
247 def __init__(self, stage, in_multi=None, p_len=1, n_len=1):
248 """ pass in a "stage" which may be either a static class or a class
249 instance, which has four functions (one optional):
250 * ispec: returns input signals according to the input specification
251 * ispec: returns output signals to the output specification
252 * process: takes an input instance and returns processed data
253 * setup: performs any module linkage if the stage uses one.
254
255 User must also:
256 * add i_data member to PrevControl and
257 * add o_data member to NextControl
258 """
259 self.stage = stage
260
261 # set up input and output IO ACK (prev/next ready/valid)
262 p = []
263 n = []
264 for i in range(p_len):
265 p.append(PrevControl(in_multi))
266 for i in range(n_len):
267 n.append(NextControl())
268 if p_len > 1:
269 self.p = Array(p)
270 else:
271 self.p = p
272 if n_len > 1:
273 self.n = Array(n)
274 else:
275 self.n = n
276
277 def connect_to_next(self, nxt, p_idx=0, n_idx=0):
278 """ helper function to connect to the next stage data/valid/ready.
279 """
280 return self.n[n_idx].connect_to_next(nxt.p[p_idx])
281
282 def connect_in(self, prev, idx=0, prev_idx=None):
283 """ helper function to connect stage to an input source. do not
284 use to connect stage-to-stage!
285 """
286 if prev_idx is None:
287 return self.p[idx].connect_in(prev.p)
288 return self.p[idx].connect_in(prev.p[prev_idx])
289
290 def connect_out(self, nxt, idx=0, nxt_idx=None):
291 """ helper function to connect stage to an output source. do not
292 use to connect stage-to-stage!
293 """
294 if nxt_idx is None:
295 return self.n[idx].connect_out(nxt.n)
296 return self.n[idx].connect_out(nxt.n[nxt+idx])
297
298 def set_input(self, i, idx=0):
299 """ helper function to set the input data
300 """
301 return eq(self.p[idx].i_data, i)
302
303 def ports(self):
304 res = []
305 for i in range(len(self.p)):
306 res += [self.p[i].i_valid, self.p[i].o_ready,
307 self.p[i].i_data]# XXX need flattening!]
308 for i in range(len(self.n)):
309 res += [self.n[i].i_ready, self.n[i].o_valid,
310 self.n[i].o_data] # XXX need flattening!]
311 return res
312
313
314 class BufferedPipeline(PipelineBase):
315 """ buffered pipeline stage. data and strobe signals travel in sync.
316 if ever the input is ready and the output is not, processed data
317 is stored in a temporary register.
318
319 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
320 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
321 stage-1 p.i_data >>in stage n.o_data out>> stage+1
322 | |
323 process --->----^
324 | |
325 +-- r_data ->-+
326
327 input data p.i_data is read (only), is processed and goes into an
328 intermediate result store [process()]. this is updated combinatorially.
329
330 in a non-stall condition, the intermediate result will go into the
331 output (update_output). however if ever there is a stall, it goes
332 into r_data instead [update_buffer()].
333
334 when the non-stall condition is released, r_data is the first
335 to be transferred to the output [flush_buffer()], and the stall
336 condition cleared.
337
338 on the next cycle (as long as stall is not raised again) the
339 input may begin to be processed and transferred directly to output.
340 """
341
342 def __init__(self, stage, n_len=1, p_len=1, p_mux=None, n_mux=None):
343 """ set up a BufferedPipeline (multi-input, multi-output)
344 NOTE: n_len > 1 and p_len > 1 is NOT supported
345
346 Arguments:
347
348 * stage: see Stage API above
349 * p_len: number of inputs (PrevControls + data)
350 * n_len: number of outputs (NextControls + data)
351 * p_mux: optional multiplex selector for incoming data
352 * n_mux: optional multiplex router for outgoing data
353 """
354 PipelineBase.__init__(self, stage, n_len, p_len)
355 self.p_mux = p_mux
356 self.n_mux = n_mux
357
358 # set up the input and output data
359 for i in range(p_len):
360 self.p[i].i_data = stage.ispec() # input type
361 for i in range(n_len):
362 self.n[i].o_data = stage.ospec()
363
364 def elaborate(self, platform):
365 m = Module()
366
367 result = self.stage.ospec() # output data
368
369 # need an array of buffer registers conforming to *output* spec
370 r_data = []
371 p_len = len(self.p)
372 for i in range(len(self.p)):
373 r = self.stage.ospec() # output type
374 r_data.append(r)
375 if hasattr(self.stage, "setup"):
376 self.stage.setup(m, self.p[i].i_data)
377 if len(r_data) > 1:
378 r_data = Array(r_data)
379
380 pi = 0 # TODO: use p_mux to decide which to select
381 ni = 0 # TODO: use n_nux to decide which to select
382
383 # establish some combinatorial temporaries
384 o_n_validn = Signal(reset_less=True)
385 i_p_valid_o_p_ready = Signal(reset_less=True)
386 p_i_valid = Signal(reset_less=True)
387 m.d.comb += [p_i_valid.eq(self.p[pi].i_valid_logic()),
388 o_n_validn.eq(~self.n[ni].o_valid),
389 i_p_valid_o_p_ready.eq(p_i_valid & self.p[pi].o_ready),
390 ]
391
392 # store result of processing in combinatorial temporary
393 m.d.comb += eq(result, self.stage.process(self.p[pi].i_data))
394
395 # if not in stall condition, update the temporary register
396 with m.If(self.p[pi].o_ready): # not stalled
397 m.d.sync += eq(r_data[ni], result) # update buffer
398
399 with m.If(self.n[ni].i_ready): # next stage is ready
400 with m.If(self.p[pi].o_ready): # not stalled
401 # nothing in buffer: send (processed) input direct to output
402 m.d.sync += [self.n[ni].o_valid.eq(p_i_valid),
403 eq(self.n[ni].o_data, result), # update output
404 ]
405 with m.Else(): # p.o_ready is false, and something is in buffer.
406 # Flush the [already processed] buffer to the output port.
407 m.d.sync += [self.n[ni].o_valid.eq(1), # declare reg empty
408 eq(self.n[ni].o_data, r_data[ni]), # flush buffer
409 ]
410 for i in range(p_len):
411 m.d.sync += self.p[i].o_ready.eq(1) # clear stall
412 # ignore input, since p.o_ready is false (in current clock)
413
414 # (n.i_ready) is false here: next stage is ready
415 with m.Elif(o_n_validn): # next stage being told "ready"
416 m.d.sync += [self.n[ni].o_valid.eq(p_i_valid),
417 self.p[pi].o_ready.eq(1),
418 eq(self.n[ni].o_data, result), # set output data
419 ]
420 for i in range(p_len):
421 m.d.sync += self.p[i].o_ready.eq(1) # Keep the buffer empty
422
423 # (n.i_ready) false and (n.o_valid) true:
424 with m.Elif(i_p_valid_o_p_ready):
425 # If next stage *is* ready, and not stalled yet, accept input
426 for i in range(p_len):
427 piv = Signal(reset_less=True)
428 pnv = Signal(reset_less=True)
429 m.d.comb += [p_i_valid.eq(self.p[i].i_valid_logic()),
430 pnv.eq(~(p_i_valid & self.n[ni].o_valid))
431 ]
432 m.d.sync += self.p[i].o_ready.eq(pnv)
433
434 return m
435
436
437 class ExampleAddStage:
438 """ an example of how to use the buffered pipeline, as a class instance
439 """
440
441 def ispec(self):
442 """ returns a tuple of input signals which will be the incoming data
443 """
444 return (Signal(16), Signal(16))
445
446 def ospec(self):
447 """ returns an output signal which will happen to contain the sum
448 of the two inputs
449 """
450 return Signal(16)
451
452 def process(self, i):
453 """ process the input data (sums the values in the tuple) and returns it
454 """
455 return i[0] + i[1]
456
457
458 class ExampleBufPipeAdd(BufferedPipeline):
459 """ an example of how to use the buffered pipeline, using a class instance
460 """
461
462 def __init__(self):
463 addstage = ExampleAddStage()
464 BufferedPipeline.__init__(self, addstage)
465
466
467 class ExampleStage:
468 """ an example of how to use the buffered pipeline, in a static class
469 fashion
470 """
471
472 def ispec():
473 return Signal(16, name="example_input_signal")
474
475 def ospec():
476 return Signal(16, name="example_output_signal")
477
478 def process(i):
479 """ process the input data and returns it (adds 1)
480 """
481 return i + 1
482
483
484 class ExampleStageCls:
485 """ an example of how to use the buffered pipeline, in a static class
486 fashion
487 """
488
489 def ispec(self):
490 return Signal(16, name="example_input_signal")
491
492 def ospec(self):
493 return Signal(16, name="example_output_signal")
494
495 def process(self, i):
496 """ process the input data and returns it (adds 1)
497 """
498 return i + 1
499
500
501 class ExampleBufPipe(BufferedPipeline):
502 """ an example of how to use the buffered pipeline.
503 """
504
505 def __init__(self):
506 BufferedPipeline.__init__(self, ExampleStage)
507
508
509 class UnbufferedPipeline(PipelineBase):
510 """ A simple pipeline stage with single-clock synchronisation
511 and two-way valid/ready synchronised signalling.
512
513 Note that a stall in one stage will result in the entire pipeline
514 chain stalling.
515
516 Also that unlike BufferedPipeline, the valid/ready signalling does NOT
517 travel synchronously with the data: the valid/ready signalling
518 combines in a *combinatorial* fashion. Therefore, a long pipeline
519 chain will lengthen propagation delays.
520
521 Argument: stage. see Stage API, above
522
523 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
524 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
525 stage-1 p.i_data >>in stage n.o_data out>> stage+1
526 | |
527 r_data |
528 | |
529 +--process ->-+
530
531 Attributes:
532 -----------
533 p.i_data : StageInput, shaped according to ispec
534 The pipeline input
535 p.o_data : StageOutput, shaped according to ospec
536 The pipeline output
537 r_data : input_shape according to ispec
538 A temporary (buffered) copy of a prior (valid) input.
539 This is HELD if the output is not ready. It is updated
540 SYNCHRONOUSLY.
541 """
542
543 def __init__(self, stage, p_len=1, n_len=1, p_mux=None, n_mux=None):
544 PipelineBase.__init__(self, stage, n_len, p_len)
545 self.p_mux = p_mux
546 self.n_mux = n_mux
547
548 # set up the input and output data
549 for i in range(p_len):
550 self.p[i].i_data = stage.ispec() # input type
551 for i in range(n_len):
552 self.n[i].o_data = stage.ospec()
553
554 def elaborate(self, platform):
555 m = Module()
556
557 if self.p_mux:
558 m.submodules += self.p_mux
559
560 # need an array of buffer registers conforming to *input* spec
561 r_data = []
562 data_valid = []
563 p_i_valid = []
564 n_i_readyn = []
565 p_len = len(self.p)
566 for i in range(p_len):
567 r = self.stage.ispec() # input type
568 r_data.append(r)
569 data_valid.append(Signal(name="data_valid"))
570 p_i_valid.append(Signal(name="p_i_valid", reset_less=True))
571 n_i_readyn.append(Signal(name="n_i_readyn", reset_less=True))
572 if hasattr(self.stage, "setup"):
573 self.stage.setup(m, r)
574 if len(r_data) > 1:
575 r_data = Array(r_data)
576 p_i_valid = Array(p_i_valid)
577 n_i_readyn = Array(n_i_readyn)
578 data_valid = Array(data_valid)
579
580 ni = 0 # TODO: use n_mux to decide which to select
581
582 if self.p_mux:
583 mid = self.p_mux.m_id
584 for i in range(p_len):
585 m.d.sync += data_valid[i].eq(0)
586 m.d.comb += n_i_readyn[i].eq(1)
587 m.d.comb += p_i_valid[i].eq(0)
588 m.d.comb += self.p[i].o_ready.eq(0)
589 m.d.comb += p_i_valid[mid].eq(self.p_mux.active)
590 m.d.comb += self.p[mid].o_ready.eq(~data_valid[mid] | \
591 self.n[ni].i_ready)
592 m.d.comb += n_i_readyn[mid].eq(~self.n[ni].i_ready & \
593 data_valid[mid])
594 anyvalid = Signal(i, reset_less=True)
595 av = []
596 for i in range(p_len):
597 av.append(data_valid[i])
598 anyvalid = Cat(*av)
599 m.d.comb += self.n[ni].o_valid.eq(anyvalid.bool())
600 m.d.sync += data_valid[mid].eq(p_i_valid[mid] | \
601 (n_i_readyn[mid] & data_valid[mid]))
602
603 for i in range(p_len):
604 with m.If(self.p[i].i_valid & self.p[i].o_ready):
605 m.d.sync += eq(r_data[i], self.p[i].i_data)
606
607 m.d.comb += eq(self.n[ni].o_data,
608 self.stage.process(r_data[mid]))
609 else:
610 for i in range(p_len):
611 m.d.comb += p_i_valid[i].eq(self.p[i].i_valid_logic())
612 m.d.comb += self.p[i].o_ready.eq(~data_valid[i] | \
613 self.n[ni].i_ready)
614 m.d.comb += self.n[ni].o_valid.eq(data_valid[i])
615
616 m.d.comb += n_i_readyn[i].eq(~self.n[ni].i_ready & \
617 data_valid[i])
618 m.d.sync += data_valid[i].eq(p_i_valid[i] | \
619 (n_i_readyn[i] & data_valid[i]))
620 with m.If(self.p[i].i_valid & self.p[i].o_ready):
621 m.d.sync += eq(r_data[i], self.p[i].i_data)
622
623 m.d.comb += eq(self.n[ni].o_data, self.stage.process(r_data[i]))
624
625 return m
626
627
628 class ExamplePipeline(UnbufferedPipeline):
629 """ an example of how to use the combinatorial pipeline.
630 """
631
632 def __init__(self):
633 UnbufferedPipeline.__init__(self, ExampleStage)
634
635
636 if __name__ == '__main__':
637 dut = ExampleBufPipe()
638 vl = rtlil.convert(dut, ports=dut.ports())
639 with open("test_bufpipe.il", "w") as f:
640 f.write(vl)
641
642 dut = ExamplePipeline()
643 vl = rtlil.convert(dut, ports=dut.ports())
644 with open("test_combpipe.il", "w") as f:
645 f.write(vl)