1 """ nmigen implementation of buffered pipeline stage, based on zipcpu:
2 https://zipcpu.com/blog/2017/08/14/strategies-for-pipelining.html
4 this module requires quite a bit of thought to understand how it works
5 (and why it is needed in the first place). reading the above is
6 *strongly* recommended.
8 unlike john dawson's IEEE754 FPU STB/ACK signalling, which requires
9 the STB / ACK signals to raise and lower (on separate clocks) before
10 data may proceeed (thus only allowing one piece of data to proceed
11 on *ALTERNATE* cycles), the signalling here is a true pipeline
12 where data will flow on *every* clock when the conditions are right.
14 input acceptance conditions are when:
15 * incoming previous-stage strobe (p.i_valid) is HIGH
16 * outgoing previous-stage ready (p.o_ready) is LOW
18 output transmission conditions are when:
19 * outgoing next-stage strobe (n.o_valid) is HIGH
20 * outgoing next-stage ready (n.i_ready) is LOW
22 the tricky bit is when the input has valid data and the output is not
23 ready to accept it. if it wasn't for the clock synchronisation, it
24 would be possible to tell the input "hey don't send that data, we're
25 not ready". unfortunately, it's not possible to "change the past":
26 the previous stage *has no choice* but to pass on its data.
28 therefore, the incoming data *must* be accepted - and stored: that
29 is the responsibility / contract that this stage *must* accept.
30 on the same clock, it's possible to tell the input that it must
31 not send any more data. this is the "stall" condition.
33 we now effectively have *two* possible pieces of data to "choose" from:
34 the buffered data, and the incoming data. the decision as to which
35 to process and output is based on whether we are in "stall" or not.
36 i.e. when the next stage is no longer ready, the output comes from
37 the buffer if a stall had previously occurred, otherwise it comes
38 direct from processing the input.
40 this allows us to respect a synchronous "travelling STB" with what
41 dan calls a "buffered handshake".
43 it's quite a complex state machine!
46 from nmigen
import Signal
, Cat
, Const
, Mux
, Module
47 from nmigen
.cli
import verilog
, rtlil
48 from nmigen
.hdl
.rec
import Record
, Layout
50 from collections
.abc
import Sequence
54 """ contains signals that come *from* the previous stage (both in and out)
55 * i_valid: input from previous stage indicating incoming data is valid
56 * o_ready: output to next stage indicating readiness to accept data
57 * i_data : an input - added by the user of this class
61 self
.i_valid
= Signal(name
="p_i_valid") # prev >>in self
62 self
.o_ready
= Signal(name
="p_o_ready") # prev <<out self
64 def connect_in(self
, prev
):
65 """ helper function to connect stage to an input source. do not
66 use to connect stage-to-stage!
68 return [self
.i_valid
.eq(prev
.i_valid
),
69 prev
.o_ready
.eq(self
.o_ready
),
70 eq(self
.i_data
, prev
.i_data
),
75 """ contains the signals that go *to* the next stage (both in and out)
76 * o_valid: output indicating to next stage that data is valid
77 * i_ready: input from next stage indicating that it can accept data
78 * o_data : an output - added by the user of this class
81 self
.o_valid
= Signal(name
="n_o_valid") # self out>> next
82 self
.i_ready
= Signal(name
="n_i_ready") # self <<in next
84 def connect_to_next(self
, nxt
):
85 """ helper function to connect to the next stage data/valid/ready.
86 data/valid is passed *TO* nxt, and ready comes *IN* from nxt.
88 return [nxt
.i_valid
.eq(self
.o_valid
),
89 self
.i_ready
.eq(nxt
.o_ready
),
90 eq(nxt
.i_data
, self
.o_data
),
93 def connect_out(self
, nxt
):
94 """ helper function to connect stage to an output source. do not
95 use to connect stage-to-stage!
97 return [nxt
.o_valid
.eq(self
.o_valid
),
98 self
.i_ready
.eq(nxt
.i_ready
),
99 eq(nxt
.o_data
, self
.o_data
),
104 """ makes signals equal: a helper routine which identifies if it is being
105 passsed a list (or tuple) of objects, and calls the objects' eq
108 complex objects (classes) can be used: they must follow the
109 convention of having an eq member function, which takes the
110 responsibility of further calling eq and returning a list of
113 Record is a special (unusual, recursive) case, where the input
114 is specified as a dictionary (which may contain further dictionaries,
115 recursively), where the field names of the dictionary must match
116 the Record's field spec.
118 if not isinstance(o
, Sequence
):
121 for (ao
, ai
) in zip(o
, i
):
122 #print ("eq", ao, ai)
123 if isinstance(ao
, Record
):
124 for idx
, (field_name
, field_shape
, _
) in enumerate(ao
.layout
):
125 if isinstance(field_shape
, Layout
):
126 rres
= eq(ao
.fields
[field_name
], ai
.fields
[field_name
])
128 rres
= eq(ao
.fields
[field_name
], ai
[field_name
])
131 res
.append(ao
.eq(ai
))
136 """ Common functions for Pipeline API
138 def __init__(self
, stage
):
139 """ pass in a "stage" which may be either a static class or a class
140 instance, which has four functions (one optional):
141 * ispec: returns input signals according to the input specification
142 * ispec: returns output signals to the output specification
143 * process: takes an input instance and returns processed data
144 * setup: performs any module linkage if the stage uses one.
147 * add i_data member to PrevControl and
148 * add o_data member to NextControl
152 # set up input and output IO ACK (prev/next ready/valid)
153 self
.p
= PrevControl()
154 self
.n
= NextControl()
156 def connect_to_next(self
, nxt
):
157 """ helper function to connect to the next stage data/valid/ready.
159 return self
.n
.connect_to_next(nxt
.p
)
161 def connect_in(self
, prev
):
162 """ helper function to connect stage to an input source. do not
163 use to connect stage-to-stage!
165 return self
.p
.connect_in(prev
.p
)
167 def connect_out(self
, nxt
):
168 """ helper function to connect stage to an output source. do not
169 use to connect stage-to-stage!
171 return self
.n
.connect_out(nxt
.n
)
173 def set_input(self
, i
):
174 """ helper function to set the input data
176 return eq(self
.p
.i_data
, i
)
179 return [self
.p
.i_valid
, self
.n
.i_ready
,
180 self
.n
.o_valid
, self
.p
.o_ready
,
181 self
.p
.i_data
, self
.n
.o_data
# XXX need flattening!
185 class BufferedPipeline(PipelineBase
):
186 """ buffered pipeline stage. data and strobe signals travel in sync.
187 if ever the input is ready and the output is not, processed data
188 is stored in a temporary register.
190 stage-1 p.i_valid >>in stage n.o_valid out>> stage+1
191 stage-1 p.o_ready <<out stage n.i_ready <<in stage+1
192 stage-1 p.i_data >>in stage n.o_data out>> stage+1
198 input data p.i_data is read (only), is processed and goes into an
199 intermediate result store [process()]. this is updated combinatorially.
201 in a non-stall condition, the intermediate result will go into the
202 output (update_output). however if ever there is a stall, it goes
203 into r_data instead [update_buffer()].
205 when the non-stall condition is released, r_data is the first
206 to be transferred to the output [flush_buffer()], and the stall
209 on the next cycle (as long as stall is not raised again) the
210 input may begin to be processed and transferred directly to output.
212 def __init__(self
, stage
):
213 PipelineBase
.__init
__(self
, stage
)
215 # set up the input and output data
216 self
.p
.i_data
= stage
.ispec() # input type
217 self
.n
.o_data
= stage
.ospec()
219 def elaborate(self
, platform
):
222 result
= self
.stage
.ospec()
223 r_data
= self
.stage
.ospec()
224 if hasattr(self
.stage
, "setup"):
225 self
.stage
.setup(m
, self
.p
.i_data
)
227 # establish some combinatorial temporaries
228 o_n_validn
= Signal(reset_less
=True)
229 i_p_valid_o_p_ready
= Signal(reset_less
=True)
230 m
.d
.comb
+= [o_n_validn
.eq(~self
.n
.o_valid
),
231 i_p_valid_o_p_ready
.eq(self
.p
.i_valid
& self
.p
.o_ready
),
234 # store result of processing in combinatorial temporary
235 #with m.If(self.p.i_valid): # input is valid: process it
236 m
.d
.comb
+= eq(result
, self
.stage
.process(self
.p
.i_data
))
237 # if not in stall condition, update the temporary register
238 with m
.If(self
.p
.o_ready
): # not stalled
239 m
.d
.sync
+= eq(r_data
, result
) # update buffer
241 #with m.If(self.p.i_rst): # reset
242 # m.d.sync += self.n.o_valid.eq(0)
243 # m.d.sync += self.p.o_ready.eq(0)
244 with m
.If(self
.n
.i_ready
): # next stage is ready
245 with m
.If(self
.p
.o_ready
): # not stalled
246 # nothing in buffer: send (processed) input direct to output
247 m
.d
.sync
+= [self
.n
.o_valid
.eq(self
.p
.i_valid
),
248 eq(self
.n
.o_data
, result
), # update output
250 with m
.Else(): # p.o_ready is false, and something is in buffer.
251 # Flush the [already processed] buffer to the output port.
252 m
.d
.sync
+= [self
.n
.o_valid
.eq(1),
253 eq(self
.n
.o_data
, r_data
), # flush buffer
254 # clear stall condition, declare register empty.
255 self
.p
.o_ready
.eq(1),
257 # ignore input, since p.o_ready is also false.
259 # (n.i_ready) is false here: next stage is ready
260 with m
.Elif(o_n_validn
): # next stage being told "ready"
261 m
.d
.sync
+= [self
.n
.o_valid
.eq(self
.p
.i_valid
),
262 self
.p
.o_ready
.eq(1), # Keep the buffer empty
263 # set the output data (from comb result)
264 eq(self
.n
.o_data
, result
),
266 # (n.i_ready) false and (n.o_valid) true:
267 with m
.Elif(i_p_valid_o_p_ready
):
268 # If next stage *is* ready, and not stalled yet, accept input
269 m
.d
.sync
+= self
.p
.o_ready
.eq(~
(self
.p
.i_valid
& self
.n
.o_valid
))
274 class ExampleAddStage
:
275 """ an example of how to use the buffered pipeline, as a class instance
279 """ returns a tuple of input signals which will be the incoming data
281 return (Signal(16), Signal(16))
284 """ returns an output signal which will happen to contain the sum
289 def process(self
, i
):
290 """ process the input data (sums the values in the tuple) and returns it
295 class ExampleBufPipeAdd(BufferedPipeline
):
296 """ an example of how to use the buffered pipeline, using a class instance
300 addstage
= ExampleAddStage()
301 BufferedPipeline
.__init
__(self
, addstage
)
305 """ an example of how to use the buffered pipeline, in a static class
316 """ process the input data and returns it (adds 1)
321 class ExampleBufPipe(BufferedPipeline
):
322 """ an example of how to use the buffered pipeline.
326 BufferedPipeline
.__init
__(self
, ExampleStage
)
329 class CombPipe(PipelineBase
):
330 """A simple pipeline stage containing combinational logic that can execute
331 completely in one clock cycle.
339 r_data : Signal, input_shape
340 A temporary (buffered) copy of a prior (valid) input
341 result: Signal, output_shape
342 The output of the combinatorial logic
345 def __init__(self
, stage
):
346 PipelineBase
.__init
__(self
, stage
)
347 self
._data
_valid
= Signal()
349 # set up the input and output data
350 self
.p
.i_data
= stage
.ispec() # input type
351 self
.n
.o_data
= stage
.ospec() # output type
353 def elaborate(self
, platform
):
356 r_data
= self
.stage
.ispec() # input type
357 result
= self
.stage
.ospec() # output data
358 if hasattr(self
.stage
, "setup"):
359 self
.stage
.setup(m
, r_data
)
361 m
.d
.comb
+= eq(result
, self
.stage
.process(r_data
))
362 m
.d
.comb
+= self
.n
.o_valid
.eq(self
._data
_valid
)
363 m
.d
.comb
+= self
.p
.o_ready
.eq(~self
._data
_valid | self
.n
.i_ready
)
364 m
.d
.sync
+= self
._data
_valid
.eq(self
.p
.i_valid | \
365 (~self
.n
.i_ready
& self
._data
_valid
))
366 with m
.If(self
.p
.i_valid
& self
.p
.o_ready
):
367 m
.d
.sync
+= eq(r_data
, self
.p
.i_data
)
368 m
.d
.comb
+= eq(self
.n
.o_data
, result
)
372 class ExampleCombPipe(CombPipe
):
373 """ an example of how to use the combinatorial pipeline.
377 CombPipe
.__init
__(self
, ExampleStage
)
380 if __name__
== '__main__':
381 dut
= ExampleBufPipe()
382 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
383 with
open("test_bufpipe.il", "w") as f
:
386 dut
= ExampleCombPipe()
387 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
388 with
open("test_combpipe.il", "w") as f
: