1 """ nmigen implementation of buffered pipeline stage, based on zipcpu:
2 https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
4 this module requires quite a bit of thought to understand how it works
5 (and why it is needed in the first place). reading the above is
6 *strongly* recommended.
8 unlike john dawson's IEEE754 FPU STB/ACK signalling, which requires
9 the STB / ACK signals to raise and lower (on separate clocks) before
10 data may proceeed (thus only allowing one piece of data to proceed
11 on *ALTERNATE* cycles), the signalling here is a true pipeline
12 where data will flow on *every* clock when the conditions are right.
14 input acceptance conditions are when:
15 * incoming previous-stage strobe (p.i_valid) is HIGH
16 * outgoing previous-stage ready (p.o_ready) is LOW
18 output transmission conditions are when:
19 * outgoing next-stage strobe (n.o_valid) is HIGH
20 * outgoing next-stage ready (n.i_ready) is LOW
22 the tricky bit is when the input has valid data and the output is not
23 ready to accept it. if it wasn't for the clock synchronisation, it
24 would be possible to tell the input "hey don't send that data, we're
25 not ready". unfortunately, it's not possible to "change the past":
26 the previous stage *has no choice* but to pass on its data.
28 therefore, the incoming data *must* be accepted - and stored: that
29 is the responsibility / contract that this stage *must* accept.
30 on the same clock, it's possible to tell the input that it must
31 not send any more data. this is the "stall" condition.
33 we now effectively have *two* possible pieces of data to "choose" from:
34 the buffered data, and the incoming data. the decision as to which
35 to process and output is based on whether we are in "stall" or not.
36 i.e. when the next stage is no longer ready, the output comes from
37 the buffer if a stall had previously occurred, otherwise it comes
38 direct from processing the input.
40 this allows us to respect a synchronous "travelling STB" with what
41 dan calls a "buffered handshake".
43 it's quite a complex state machine!
46 from nmigen
import Signal
, Cat
, Const
, Mux
, Module
47 from nmigen
.cli
import verilog
, rtlil
48 from nmigen
.hdl
.rec
import Record
, Layout
50 from collections
.abc
import Sequence
54 """ contains signals that come *from* the previous stage (both in and out)
55 * i_valid: input from previous stage indicating incoming data is valid
56 * o_ready: output to next stage indicating readiness to accept data
57 * i_data : an input - added by the user of this class
61 self
.i_valid
= Signal(name
="p_i_valid") # prev >>in self
62 self
.o_ready
= Signal(name
="p_o_ready") # prev <<out self
64 def connect_in(self
, prev
):
65 """ helper function to connect stage to an input source. do not
66 use to connect stage-to-stage!
68 return [self
.i_valid
.eq(prev
.i_valid
),
69 prev
.o_ready
.eq(self
.o_ready
),
70 eq(self
.i_data
, prev
.i_data
),
75 """ contains the signals that go *to* the next stage (both in and out)
76 * o_valid: output indicating to next stage that data is valid
77 * i_ready: input from next stage indicating that it can accept data
78 * o_data : an output - added by the user of this class
81 self
.o_valid
= Signal(name
="n_o_valid") # self out>> next
82 self
.i_ready
= Signal(name
="n_i_ready") # self <<in next
84 def connect_to_next(self
, nxt
):
85 """ helper function to connect to the next stage data/valid/ready.
86 data/valid is passed *TO* nxt, and ready comes *IN* from nxt.
88 return [nxt
.i_valid
.eq(self
.o_valid
),
89 self
.i_ready
.eq(nxt
.o_ready
),
90 eq(nxt
.i_data
, self
.o_data
),
93 def connect_out(self
, nxt
):
94 """ helper function to connect stage to an output source. do not
95 use to connect stage-to-stage!
97 return [nxt
.o_valid
.eq(self
.o_valid
),
98 self
.i_ready
.eq(nxt
.i_ready
),
99 eq(nxt
.o_data
, self
.o_data
),
104 """ makes signals equal: a helper routine which identifies if it is being
105 passsed a list (or tuple) of objects, and calls the objects' eq
108 Record is a special (unusual, recursive) case, where the input
109 is specified as a dictionary (which may contain further dictionaries,
110 recursively), where the field names of the dictionary must match
111 the Record's field spec.
113 if not isinstance(o
, Sequence
):
116 for (ao
, ai
) in zip(o
, i
):
117 #print ("eq", ao, ai)
118 if isinstance(ao
, Record
):
119 for idx
, (field_name
, field_shape
, _
) in enumerate(ao
.layout
):
120 if isinstance(field_shape
, Layout
):
121 rres
= eq(ao
.fields
[field_name
], ai
.fields
[field_name
])
123 rres
= eq(ao
.fields
[field_name
], ai
[field_name
])
126 res
.append(ao
.eq(ai
))
131 """ Common functions for Pipeline API
133 def __init__(self
, stage
):
134 """ pass in a "stage" which may be either a static class or a class
135 instance, which has three functions:
136 * ispec: returns input signals according to the input specification
137 * ispec: returns output signals to the output specification
138 * process: takes an input instance and returns processed data
141 * add i_data member to PrevControl and
142 * add o_data member to NextControl
146 # set up input and output IO ACK (prev/next ready/valid)
147 self
.p
= PrevControl()
148 self
.n
= NextControl()
150 def connect_to_next(self
, nxt
):
151 """ helper function to connect to the next stage data/valid/ready.
153 return self
.n
.connect_to_next(nxt
.p
)
155 def connect_in(self
, prev
):
156 """ helper function to connect stage to an input source. do not
157 use to connect stage-to-stage!
159 return self
.p
.connect_in(prev
.p
)
161 def connect_out(self
, nxt
):
162 """ helper function to connect stage to an output source. do not
163 use to connect stage-to-stage!
165 return self
.n
.connect_out(nxt
.n
)
167 def set_input(self
, i
):
168 """ helper function to set the input data
170 return eq(self
.p
.i_data
, i
)
173 return [self
.p
.i_valid
, self
.n
.i_ready
,
174 self
.n
.o_valid
, self
.p
.o_ready
,
175 self
.p
.i_data
, self
.n
.o_data
# XXX need flattening!
179 class BufferedPipeline(PipelineBase
):
180 """ buffered pipeline stage. data and strobe signals travel in sync.
181 if ever the input is ready and the output is not, processed data
182 is stored in a temporary register.
184 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
185 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
186 stage-1 p.i_data >>in stage n.o_data out>> stage+1
192 input data p.i_data is read (only), is processed and goes into an
193 intermediate result store [process()]. this is updated combinatorially.
195 in a non-stall condition, the intermediate result will go into the
196 output (update_output). however if ever there is a stall, it goes
197 into r_data instead [update_buffer()].
199 when the non-stall condition is released, r_data is the first
200 to be transferred to the output [flush_buffer()], and the stall
203 on the next cycle (as long as stall is not raised again) the
204 input may begin to be processed and transferred directly to output.
206 def __init__(self
, stage
):
207 PipelineBase
.__init
__(self
, stage
)
209 # set up the input and output data
210 self
.p
.i_data
= stage
.ispec() # input type
211 self
.n
.o_data
= stage
.ospec()
213 def elaborate(self
, platform
):
216 result
= self
.stage
.ospec()
217 r_data
= self
.stage
.ospec()
218 if hasattr(self
.stage
, "setup"):
219 self
.stage
.setup(m
, self
.p
.i_data
)
221 # establish some combinatorial temporaries
222 o_n_validn
= Signal(reset_less
=True)
223 i_p_valid_o_p_ready
= Signal(reset_less
=True)
224 m
.d
.comb
+= [o_n_validn
.eq(~self
.n
.o_valid
),
225 i_p_valid_o_p_ready
.eq(self
.p
.i_valid
& self
.p
.o_ready
),
228 # store result of processing in combinatorial temporary
229 #with m.If(self.p.i_valid): # input is valid: process it
230 m
.d
.comb
+= eq(result
, self
.stage
.process(self
.p
.i_data
))
231 # if not in stall condition, update the temporary register
232 with m
.If(self
.p
.o_ready
): # not stalled
233 m
.d
.sync
+= eq(r_data
, result
) # update buffer
235 #with m.If(self.p.i_rst): # reset
236 # m.d.sync += self.n.o_valid.eq(0)
237 # m.d.sync += self.p.o_ready.eq(0)
238 with m
.If(self
.n
.i_ready
): # next stage is ready
239 with m
.If(self
.p
.o_ready
): # not stalled
240 # nothing in buffer: send (processed) input direct to output
241 m
.d
.sync
+= [self
.n
.o_valid
.eq(self
.p
.i_valid
),
242 eq(self
.n
.o_data
, result
), # update output
244 with m
.Else(): # p.o_ready is false, and something is in buffer.
245 # Flush the [already processed] buffer to the output port.
246 m
.d
.sync
+= [self
.n
.o_valid
.eq(1),
247 eq(self
.n
.o_data
, r_data
), # flush buffer
248 # clear stall condition, declare register empty.
249 self
.p
.o_ready
.eq(1),
251 # ignore input, since p.o_ready is also false.
253 # (n.i_ready) is false here: next stage is ready
254 with m
.Elif(o_n_validn
): # next stage being told "ready"
255 m
.d
.sync
+= [self
.n
.o_valid
.eq(self
.p
.i_valid
),
256 self
.p
.o_ready
.eq(1), # Keep the buffer empty
257 # set the output data (from comb result)
258 eq(self
.n
.o_data
, result
),
260 # (n.i_ready) false and (n.o_valid) true:
261 with m
.Elif(i_p_valid_o_p_ready
):
262 # If next stage *is* ready, and not stalled yet, accept input
263 m
.d
.sync
+= self
.p
.o_ready
.eq(~
(self
.p
.i_valid
& self
.n
.o_valid
))
268 class ExampleAddStage
:
269 """ an example of how to use the buffered pipeline, as a class instance
273 """ returns a tuple of input signals which will be the incoming data
275 return (Signal(16), Signal(16))
278 """ returns an output signal which will happen to contain the sum
283 def process(self
, i
):
284 """ process the input data (sums the values in the tuple) and returns it
289 class ExampleBufPipeAdd(BufferedPipeline
):
290 """ an example of how to use the buffered pipeline, using a class instance
294 addstage
= ExampleAddStage()
295 BufferedPipeline
.__init
__(self
, addstage
)
299 """ an example of how to use the buffered pipeline, in a static class
310 """ process the input data and returns it (adds 1)
315 class ExampleBufPipe(BufferedPipeline
):
316 """ an example of how to use the buffered pipeline.
320 BufferedPipeline
.__init
__(self
, ExampleStage
)
323 class CombPipe(PipelineBase
):
324 """A simple pipeline stage containing combinational logic that can execute
325 completely in one clock cycle.
329 input_shape : int or tuple or None
330 the shape of ``input.data`` and ``comb_input``
331 output_shape : int or tuple or None
332 the shape of ``output.data`` and ``comb_output``
342 comb_input : Signal, input_shape
343 The input to the combinatorial logic
344 comb_output: Signal, output_shape
345 The output of the combinatorial logic
348 def __init__(self
, stage
):
349 PipelineBase
.__init
__(self
, stage
)
350 self
._data
_valid
= Signal()
352 # set up the input and output data
353 self
.p
.i_data
= stage
.ispec() # input type
354 self
.n
.o_data
= stage
.ospec() # output type
356 def elaborate(self
, platform
):
359 r_data
= self
.stage
.ispec() # input type
360 result
= self
.stage
.ospec() # output data
361 if hasattr(self
.stage
, "setup"):
362 self
.stage
.setup(m
, r_data
)
364 m
.d
.comb
+= eq(result
, self
.stage
.process(r_data
))
365 m
.d
.comb
+= self
.n
.o_valid
.eq(self
._data
_valid
)
366 m
.d
.comb
+= self
.p
.o_ready
.eq(~self
._data
_valid | self
.n
.i_ready
)
367 m
.d
.sync
+= self
._data
_valid
.eq(self
.p
.i_valid | \
368 (~self
.n
.i_ready
& self
._data
_valid
))
369 with m
.If(self
.p
.i_valid
& self
.p
.o_ready
):
370 m
.d
.sync
+= eq(r_data
, self
.p
.i_data
)
371 m
.d
.comb
+= eq(self
.n
.o_data
, result
)
375 class ExampleCombPipe(CombPipe
):
376 """ an example of how to use the combinatorial pipeline.
380 CombPipe
.__init
__(self
, ExampleStage
)
383 if __name__
== '__main__':
384 dut
= ExampleBufPipe()
385 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
386 with
open("test_bufpipe.il", "w") as f
:
389 dut
= ExampleCombPipe()
390 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
391 with
open("test_combpipe.il", "w") as f
: