1 """ Pipeline and BufferedPipeline implementation, conforming to the same API.
6 a strategically very important function that is identical in function
7 to nmigen's Signal.eq function, except it may take objects, or a list
8 of objects, or a tuple of objects, and where objects may also be
14 stage requires compliance with a strict API that may be
15 implemented in several means, including as a static class.
16 the methods of a stage instance must be as follows:
18 * ispec() - Input data format specification
19 returns an object or a list or tuple of objects, or
20 a Record, each object having an "eq" function which
21 takes responsibility for copying by assignment all
23 * ospec() - Output data format specification
24 requirements as for ospec
25 * process(m, i) - Processes an ispec-formatted object
26 returns a combinatorial block of a result that
27 may be assigned to the output, by way of the "eq"
29 * setup(m, i) - Optional function for setting up submodules
30 may be used for more complex stages, to link
31 the input (i) to submodules. must take responsibility
32 for adding those submodules to the module (m).
33 the submodules must be combinatorial blocks and
34 must have their inputs and output linked combinatorially.
39 A useful combinatorial wrapper around stages that chains them together
40 and then presents a Stage-API-conformant interface.
45 A simple stalling clock-synchronised pipeline that has no buffering
46 (unlike BufferedPipeline). A stall anywhere along the line will
47 result in a stall back-propagating down the entire chain.
49 The BufferedPipeline by contrast will buffer incoming data, allowing
50 previous stages one clock cycle's grace before also having to stall.
55 nmigen implementation of buffered pipeline stage, based on zipcpu:
56 https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
58 this module requires quite a bit of thought to understand how it works
59 (and why it is needed in the first place). reading the above is
60 *strongly* recommended.
62 unlike john dawson's IEEE754 FPU STB/ACK signalling, which requires
63 the STB / ACK signals to raise and lower (on separate clocks) before
64 data may proceeed (thus only allowing one piece of data to proceed
65 on *ALTERNATE* cycles), the signalling here is a true pipeline
66 where data will flow on *every* clock when the conditions are right.
68 input acceptance conditions are when:
69 * incoming previous-stage strobe (p.i_valid) is HIGH
70 * outgoing previous-stage ready (p.o_ready) is LOW
72 output transmission conditions are when:
73 * outgoing next-stage strobe (n.o_valid) is HIGH
74 * outgoing next-stage ready (n.i_ready) is LOW
76 the tricky bit is when the input has valid data and the output is not
77 ready to accept it. if it wasn't for the clock synchronisation, it
78 would be possible to tell the input "hey don't send that data, we're
79 not ready". unfortunately, it's not possible to "change the past":
80 the previous stage *has no choice* but to pass on its data.
82 therefore, the incoming data *must* be accepted - and stored: that
83 is the responsibility / contract that this stage *must* accept.
84 on the same clock, it's possible to tell the input that it must
85 not send any more data. this is the "stall" condition.
87 we now effectively have *two* possible pieces of data to "choose" from:
88 the buffered data, and the incoming data. the decision as to which
89 to process and output is based on whether we are in "stall" or not.
90 i.e. when the next stage is no longer ready, the output comes from
91 the buffer if a stall had previously occurred, otherwise it comes
92 direct from processing the input.
94 this allows us to respect a synchronous "travelling STB" with what
95 dan calls a "buffered handshake".
97 it's quite a complex state machine!
100 from nmigen
import Signal
, Cat
, Const
, Mux
, Module
101 from nmigen
.cli
import verilog
, rtlil
102 from nmigen
.hdl
.rec
import Record
, Layout
104 from collections
.abc
import Sequence
108 """ contains signals that come *from* the previous stage (both in and out)
109 * i_valid: previous stage indicating all incoming data is valid.
110 may be a multi-bit signal, where all bits are required
111 to be asserted to indicate "valid".
112 * o_ready: output to next stage indicating readiness to accept data
113 * i_data : an input - added by the user of this class
116 def __init__(self
, i_width
=1):
117 self
.i_valid
= Signal(i_width
, name
="p_i_valid") # prev >>in self
118 self
.o_ready
= Signal(name
="p_o_ready") # prev <<out self
120 def connect_in(self
, prev
):
121 """ helper function to connect stage to an input source. do not
122 use to connect stage-to-stage!
124 return [self
.i_valid
.eq(prev
.i_valid
),
125 prev
.o_ready
.eq(self
.o_ready
),
126 eq(self
.i_data
, prev
.i_data
),
129 def i_valid_logic(self
):
130 vlen
= len(self
.i_valid
)
131 if vlen
> 1: # multi-bit case: valid only when i_valid is all 1s
132 all1s
= Const(-1, (len(self
.i_valid
), False))
133 return self
.i_valid
== all1s
134 # single-bit i_valid case
139 """ contains the signals that go *to* the next stage (both in and out)
140 * o_valid: output indicating to next stage that data is valid
141 * i_ready: input from next stage indicating that it can accept data
142 * o_data : an output - added by the user of this class
145 self
.o_valid
= Signal(name
="n_o_valid") # self out>> next
146 self
.i_ready
= Signal(name
="n_i_ready") # self <<in next
148 def connect_to_next(self
, nxt
):
149 """ helper function to connect to the next stage data/valid/ready.
150 data/valid is passed *TO* nxt, and ready comes *IN* from nxt.
152 return [nxt
.i_valid
.eq(self
.o_valid
),
153 self
.i_ready
.eq(nxt
.o_ready
),
154 eq(nxt
.i_data
, self
.o_data
),
157 def connect_out(self
, nxt
):
158 """ helper function to connect stage to an output source. do not
159 use to connect stage-to-stage!
161 return [nxt
.o_valid
.eq(self
.o_valid
),
162 self
.i_ready
.eq(nxt
.i_ready
),
163 eq(nxt
.o_data
, self
.o_data
),
168 """ makes signals equal: a helper routine which identifies if it is being
169 passed a list (or tuple) of objects, or signals, or Records, and calls
170 the objects' eq function.
172 complex objects (classes) can be used: they must follow the
173 convention of having an eq member function, which takes the
174 responsibility of further calling eq and returning a list of
177 Record is a special (unusual, recursive) case, where the input
178 is specified as a dictionary (which may contain further dictionaries,
179 recursively), where the field names of the dictionary must match
180 the Record's field spec.
182 if not isinstance(o
, Sequence
):
185 for (ao
, ai
) in zip(o
, i
):
186 #print ("eq", ao, ai)
187 if isinstance(ao
, Record
):
188 for idx
, (field_name
, field_shape
, _
) in enumerate(ao
.layout
):
189 if isinstance(field_shape
, Layout
):
193 if hasattr(val
, field_name
):
194 val
= getattr(val
, field_name
)
196 val
= val
[field_name
]
197 rres
= eq(ao
.fields
[field_name
], val
)
201 if not isinstance(rres
, Sequence
):
208 """ pass in a list of stages, and they will automatically be
209 chained together via their input and output specs into a
212 * input to this class will be the input of the first stage
213 * output of first stage goes into input of second
214 * output of second goes into input into third (etc. etc.)
215 * the output of this class will be the output of the last stage
217 def __init__(self
, chain
):
221 return self
.chain
[0].ispec()
224 return self
.chain
[-1].ospec()
226 def setup(self
, m
, i
):
227 for (idx
, c
) in enumerate(self
.chain
):
228 if hasattr(c
, "setup"):
229 c
.setup(m
, i
) # stage may have some module stuff
230 o
= self
.chain
[idx
].ospec() # only the last assignment survives
231 m
.d
.comb
+= eq(o
, c
.process(i
)) # process input into "o"
232 if idx
!= len(self
.chain
)-1:
233 ni
= self
.chain
[idx
+1].ispec() # becomes new input on next loop
234 m
.d
.comb
+= eq(ni
, o
) # assign output to next input
236 self
.o
= o
# last loop is the output
238 def process(self
, i
):
243 """ Common functions for Pipeline API
245 def __init__(self
, stage
, in_multi
=None):
246 """ pass in a "stage" which may be either a static class or a class
247 instance, which has four functions (one optional):
248 * ispec: returns input signals according to the input specification
249 * ispec: returns output signals to the output specification
250 * process: takes an input instance and returns processed data
251 * setup: performs any module linkage if the stage uses one.
254 * add i_data member to PrevControl and
255 * add o_data member to NextControl
259 # set up input and output IO ACK (prev/next ready/valid)
260 self
.p
= PrevControl(in_multi
)
261 self
.n
= NextControl()
263 def connect_to_next(self
, nxt
):
264 """ helper function to connect to the next stage data/valid/ready.
266 return self
.n
.connect_to_next(nxt
.p
)
268 def connect_in(self
, prev
):
269 """ helper function to connect stage to an input source. do not
270 use to connect stage-to-stage!
272 return self
.p
.connect_in(prev
.p
)
274 def connect_out(self
, nxt
):
275 """ helper function to connect stage to an output source. do not
276 use to connect stage-to-stage!
278 return self
.n
.connect_out(nxt
.n
)
280 def set_input(self
, i
):
281 """ helper function to set the input data
283 return eq(self
.p
.i_data
, i
)
286 return [self
.p
.i_valid
, self
.n
.i_ready
,
287 self
.n
.o_valid
, self
.p
.o_ready
,
288 self
.p
.i_data
, self
.n
.o_data
# XXX need flattening!
292 class BufferedPipeline(PipelineBase
):
293 """ buffered pipeline stage. data and strobe signals travel in sync.
294 if ever the input is ready and the output is not, processed data
295 is stored in a temporary register.
297 Argument: stage. see Stage API above
299 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
300 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
301 stage-1 p.i_data >>in stage n.o_data out>> stage+1
307 input data p.i_data is read (only), is processed and goes into an
308 intermediate result store [process()]. this is updated combinatorially.
310 in a non-stall condition, the intermediate result will go into the
311 output (update_output). however if ever there is a stall, it goes
312 into r_data instead [update_buffer()].
314 when the non-stall condition is released, r_data is the first
315 to be transferred to the output [flush_buffer()], and the stall
318 on the next cycle (as long as stall is not raised again) the
319 input may begin to be processed and transferred directly to output.
322 def __init__(self
, stage
):
323 PipelineBase
.__init
__(self
, stage
)
325 # set up the input and output data
326 self
.p
.i_data
= stage
.ispec() # input type
327 self
.n
.o_data
= stage
.ospec()
329 def elaborate(self
, platform
):
332 result
= self
.stage
.ospec()
333 r_data
= self
.stage
.ospec()
334 if hasattr(self
.stage
, "setup"):
335 self
.stage
.setup(m
, self
.p
.i_data
)
337 # establish some combinatorial temporaries
338 o_n_validn
= Signal(reset_less
=True)
339 i_p_valid_o_p_ready
= Signal(reset_less
=True)
340 p_i_valid
= Signal(reset_less
=True)
341 m
.d
.comb
+= [p_i_valid
.eq(self
.p
.i_valid_logic()),
342 o_n_validn
.eq(~self
.n
.o_valid
),
343 i_p_valid_o_p_ready
.eq(p_i_valid
& self
.p
.o_ready
),
346 # store result of processing in combinatorial temporary
347 m
.d
.comb
+= eq(result
, self
.stage
.process(self
.p
.i_data
))
349 # if not in stall condition, update the temporary register
350 with m
.If(self
.p
.o_ready
): # not stalled
351 m
.d
.sync
+= eq(r_data
, result
) # update buffer
353 with m
.If(self
.n
.i_ready
): # next stage is ready
354 with m
.If(self
.p
.o_ready
): # not stalled
355 # nothing in buffer: send (processed) input direct to output
356 m
.d
.sync
+= [self
.n
.o_valid
.eq(p_i_valid
),
357 eq(self
.n
.o_data
, result
), # update output
359 with m
.Else(): # p.o_ready is false, and something is in buffer.
360 # Flush the [already processed] buffer to the output port.
361 m
.d
.sync
+= [self
.n
.o_valid
.eq(1), # declare reg empty
362 eq(self
.n
.o_data
, r_data
), # flush buffer
363 self
.p
.o_ready
.eq(1), # clear stall condition
365 # ignore input, since p.o_ready is also false.
367 # (n.i_ready) is false here: next stage is ready
368 with m
.Elif(o_n_validn
): # next stage being told "ready"
369 m
.d
.sync
+= [self
.n
.o_valid
.eq(p_i_valid
),
370 self
.p
.o_ready
.eq(1), # Keep the buffer empty
371 eq(self
.n
.o_data
, result
), # set output data
374 # (n.i_ready) false and (n.o_valid) true:
375 with m
.Elif(i_p_valid_o_p_ready
):
376 # If next stage *is* ready, and not stalled yet, accept input
377 m
.d
.sync
+= self
.p
.o_ready
.eq(~
(p_i_valid
& self
.n
.o_valid
))
382 class ExampleAddStage
:
383 """ an example of how to use the buffered pipeline, as a class instance
387 """ returns a tuple of input signals which will be the incoming data
389 return (Signal(16), Signal(16))
392 """ returns an output signal which will happen to contain the sum
397 def process(self
, i
):
398 """ process the input data (sums the values in the tuple) and returns it
403 class ExampleBufPipeAdd(BufferedPipeline
):
404 """ an example of how to use the buffered pipeline, using a class instance
408 addstage
= ExampleAddStage()
409 BufferedPipeline
.__init
__(self
, addstage
)
413 """ an example of how to use the buffered pipeline, in a static class
418 return Signal(16, name
="example_input_signal")
421 return Signal(16, name
="example_output_signal")
424 """ process the input data and returns it (adds 1)
429 class ExampleStageCls
:
430 """ an example of how to use the buffered pipeline, in a static class
435 return Signal(16, name
="example_input_signal")
438 return Signal(16, name
="example_output_signal")
440 def process(self
, i
):
441 """ process the input data and returns it (adds 1)
446 class ExampleBufPipe(BufferedPipeline
):
447 """ an example of how to use the buffered pipeline.
451 BufferedPipeline
.__init
__(self
, ExampleStage
)
454 class UnbufferedPipeline(PipelineBase
):
455 """ A simple pipeline stage with single-clock synchronisation
456 and two-way valid/ready synchronised signalling.
458 Note that a stall in one stage will result in the entire pipeline
461 Also that unlike BufferedPipeline, the valid/ready signalling does NOT
462 travel synchronously with the data: the valid/ready signalling
463 combines in a *combinatorial* fashion. Therefore, a long pipeline
464 chain will lengthen propagation delays.
466 Argument: stage. see Stage API, above
468 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
469 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
470 stage-1 p.i_data >>in stage n.o_data out>> stage+1
478 p.i_data : StageInput, shaped according to ispec
480 p.o_data : StageOutput, shaped according to ospec
482 r_data : input_shape according to ispec
483 A temporary (buffered) copy of a prior (valid) input.
484 This is HELD if the output is not ready. It is updated
486 result: output_shape according to ospec
487 The output of the combinatorial logic. it is updated
488 COMBINATORIALLY (no clock dependence).
491 def __init__(self
, stage
):
492 PipelineBase
.__init
__(self
, stage
)
493 self
._data
_valid
= Signal()
495 # set up the input and output data
496 self
.p
.i_data
= stage
.ispec() # input type
497 self
.n
.o_data
= stage
.ospec() # output type
499 def elaborate(self
, platform
):
502 r_data
= self
.stage
.ispec() # input type
503 result
= self
.stage
.ospec() # output data
504 if hasattr(self
.stage
, "setup"):
505 self
.stage
.setup(m
, r_data
)
507 p_i_valid
= Signal(reset_less
=True)
508 m
.d
.comb
+= p_i_valid
.eq(self
.p
.i_valid_logic())
509 m
.d
.comb
+= eq(result
, self
.stage
.process(r_data
))
510 m
.d
.comb
+= self
.n
.o_valid
.eq(self
._data
_valid
)
511 m
.d
.comb
+= self
.p
.o_ready
.eq(~self
._data
_valid | self
.n
.i_ready
)
512 m
.d
.sync
+= self
._data
_valid
.eq(p_i_valid | \
513 (~self
.n
.i_ready
& self
._data
_valid
))
514 with m
.If(self
.p
.i_valid
& self
.p
.o_ready
):
515 m
.d
.sync
+= eq(r_data
, self
.p
.i_data
)
516 m
.d
.comb
+= eq(self
.n
.o_data
, result
)
520 class ExamplePipeline(UnbufferedPipeline
):
521 """ an example of how to use the combinatorial pipeline.
525 UnbufferedPipeline
.__init
__(self
, ExampleStage
)
528 if __name__
== '__main__':
529 dut
= ExampleBufPipe()
530 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
531 with
open("test_bufpipe.il", "w") as f
:
534 dut
= ExamplePipeline()
535 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
536 with
open("test_combpipe.il", "w") as f
: