b77f3dcabe22d09f211ca3fdc5396f83c665159d
1 # This file is Copyright (c) 2019 David Shah <dave@ds0.me>
2 # This file is Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
3 # This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
6 # 1:2 frequency-ratio DDR3 PHY for Lattice's ECP5
12 from nmigen
.lib
.cdc
import FFSynchronizer
13 from nmigen
.utils
import log2_int
15 from lambdasoc
.periph
import Peripheral
17 import gram
.stream
as stream
18 from gram
.common
import *
19 from gram
.phy
.dfi
import Interface
20 from gram
.compat
import Timeline
22 # Lattice ECP5 DDR PHY Initialization --------------------------------------------------------------
25 class ECP5DDRPHYInit(Elaboratable
):
32 def elaborate(self
, platform
):
39 # DDRDLLA instance -------------------------------------------------------------------------
42 m
.submodules
+= Instance("DDRDLLA",
43 i_CLK
=ClockSignal("sync2x"),
44 i_RST
=ResetSignal("init"),
51 m
.submodules
+= FFSynchronizer(_lock
, lock
, o_domain
="init")
52 m
.d
.init
+= lock_d
.eq(lock
)
53 m
.d
.sync
+= new_lock
.eq(lock
& ~lock_d
)
55 # DDRDLLA/DDQBUFM/ECLK initialization sequence ---------------------------------------------
58 (1*t
, [freeze
.eq(1)]), # Freeze DDRDLLA
59 (2*t
, [self
.stop
.eq(1)]), # Stop ECLK domain
60 (3*t
, [self
.reset
.eq(1)]), # Reset ECLK domain
61 (4*t
, [self
.reset
.eq(0)]), # Release ECLK domain reset
62 (5*t
, [self
.stop
.eq(0)]), # Release ECLK domain stop
63 (6*t
, [freeze
.eq(0)]), # Release DDRDLLA freeze
64 (7*t
, [self
.pause
.eq(1)]), # Pause DQSBUFM
65 (8*t
, [update
.eq(1)]), # Update DDRDLLA
66 (9*t
, [update
.eq(0)]), # Release DDRDMMA update
67 (10*t
, [self
.pause
.eq(0)]), # Release DQSBUFM pause
71 m
.d
.comb
+= tl
.trigger
.eq(new_lock
)
73 m
.d
.comb
+= self
.delay
.eq(delay
)
77 # Lattice ECP5 DDR PHY -----------------------------------------------------------------------------
80 class ECP5DDRPHY(Peripheral
, Elaboratable
):
81 def __init__(self
, pads
, sys_clk_freq
=100e6
):
82 super().__init
__(name
="phy")
85 self
._sys
_clk
_freq
= sys_clk_freq
87 databits
= len(self
.pads
.dq
.io
)
88 assert databits
% 8 == 0
91 bank
= self
.csr_bank()
93 self
.burstdet
= bank
.csr(databits
//8, "rw")
96 self
.rdly
+= [bank
.csr(3, "rw", name
="rdly_p0")]
97 self
.rdly
+= [bank
.csr(3, "rw", name
="rdly_p1")]
99 self
._bridge
= self
.bridge(data_width
=32, granularity
=8, alignment
=2)
100 self
.bus
= self
._bridge
.bus
102 addressbits
= len(self
.pads
.a
.o0
)
103 bankbits
= len(self
.pads
.ba
.o0
)
104 nranks
= 1 if not hasattr(self
.pads
, "cs") else len(self
.pads
.cs
.o0
)
105 databits
= len(self
.pads
.dq
.io
)
106 self
.dfi
= Interface(addressbits
, bankbits
, nranks
, 4*databits
, 4)
108 # PHY settings -----------------------------------------------------------------------------
109 tck
= 1/(2*self
._sys
_clk
_freq
)
111 databits
= len(self
.pads
.dq
.io
)
112 nranks
= 1 if not hasattr(self
.pads
, "cs") else len(self
.pads
.cs
.o0
)
113 cl
, cwl
= get_cl_cw("DDR3", tck
)
114 cl_sys_latency
= get_sys_latency(nphases
, cl
)
115 cwl_sys_latency
= get_sys_latency(nphases
, cwl
)
116 rdcmdphase
, rdphase
= get_sys_phases(nphases
, cl_sys_latency
, cl
)
117 wrcmdphase
, wrphase
= get_sys_phases(nphases
, cwl_sys_latency
, cwl
)
118 self
.settings
= PhySettings(
119 phytype
="ECP5DDRPHY",
122 dfi_databits
=4*databits
,
127 rdcmdphase
=rdcmdphase
,
128 wrcmdphase
=wrcmdphase
,
131 read_latency
=2 + cl_sys_latency
+ 2 + log2_int(4//nphases
) + 4,
132 write_latency
=cwl_sys_latency
135 def elaborate(self
, platform
):
138 m
.submodules
.bridge
= self
._bridge
140 tck
= 1/(2*self
._sys
_clk
_freq
)
142 databits
= len(self
.pads
.dq
.io
)
144 burstdet_reg
= Signal(databits
//8)
145 m
.d
.comb
+= self
.burstdet
.r_data
.eq(burstdet_reg
)
148 with m
.If(self
.burstdet
.w_stb
):
149 m
.d
.sync
+= burstdet_reg
.eq(0)
151 # Init -------------------------------------------------------------------------------------
152 m
.submodules
.init
= init
= ECP5DDRPHYInit()
154 # Parameters -------------------------------------------------------------------------------
155 cl
, cwl
= get_cl_cw("DDR3", tck
)
156 cl_sys_latency
= get_sys_latency(nphases
, cl
)
157 cwl_sys_latency
= get_sys_latency(nphases
, cwl
)
159 # DFI Interface ----------------------------------------------------------------------------
164 # Clock --------------------------------------------------------------------------------
166 self
.pads
.clk
.o_clk
.eq(ClockSignal("dramsync")),
167 self
.pads
.clk
.o_fclk
.eq(ClockSignal("sync2x")),
169 for i
in range(len(self
.pads
.clk
.o0
)):
171 self
.pads
.clk
.o0
[i
].eq(0),
172 self
.pads
.clk
.o1
[i
].eq(1),
173 self
.pads
.clk
.o2
[i
].eq(0),
174 self
.pads
.clk
.o3
[i
].eq(1),
177 # Addresses and Commands ---------------------------------------------------------------
179 self
.pads
.a
.o_clk
.eq(ClockSignal("dramsync")),
180 self
.pads
.a
.o_fclk
.eq(ClockSignal("sync2x")),
181 self
.pads
.ba
.o_clk
.eq(ClockSignal("dramsync")),
182 self
.pads
.ba
.o_fclk
.eq(ClockSignal("sync2x")),
184 for i
in range(len(self
.pads
.a
.o0
)):
186 self
.pads
.a
.o0
[i
].eq(dfi
.phases
[0].address
[i
]),
187 self
.pads
.a
.o1
[i
].eq(dfi
.phases
[0].address
[i
]),
188 self
.pads
.a
.o2
[i
].eq(dfi
.phases
[1].address
[i
]),
189 self
.pads
.a
.o3
[i
].eq(dfi
.phases
[1].address
[i
]),
191 for i
in range(len(self
.pads
.ba
.o0
)):
193 self
.pads
.ba
.o0
[i
].eq(dfi
.phases
[0].bank
[i
]),
194 self
.pads
.ba
.o1
[i
].eq(dfi
.phases
[0].bank
[i
]),
195 self
.pads
.ba
.o2
[i
].eq(dfi
.phases
[1].bank
[i
]),
196 self
.pads
.ba
.o3
[i
].eq(dfi
.phases
[1].bank
[i
]),
200 controls
= ["ras", "cas", "we", "clk_en", "odt"]
201 if hasattr(self
.pads
, "reset"):
202 controls
.append("reset")
203 if hasattr(self
.pads
, "cs"):
204 controls
.append("cs")
205 for name
in controls
:
207 getattr(self
.pads
, name
).o_clk
.eq(ClockSignal("dramsync")),
208 getattr(self
.pads
, name
).o_fclk
.eq(ClockSignal("sync2x")),
210 for i
in range(len(getattr(self
.pads
, name
).o0
)):
212 getattr(self
.pads
, name
).o0
[i
].eq(getattr(dfi
.phases
[0], name
)[i
]),
213 getattr(self
.pads
, name
).o1
[i
].eq(getattr(dfi
.phases
[0], name
)[i
]),
214 getattr(self
.pads
, name
).o2
[i
].eq(getattr(dfi
.phases
[1], name
)[i
]),
215 getattr(self
.pads
, name
).o3
[i
].eq(getattr(dfi
.phases
[1], name
)[i
]),
218 # DQ ---------------------------------------------------------------------------------------
222 dqs_postamble
= Signal()
223 dqs_preamble
= Signal()
224 for i
in range(databits
//8):
234 m
.submodules
+= Instance("DQSBUFM",
235 p_DQS_LI_DEL_ADJ
="MINUS",
237 p_DQS_LO_DEL_ADJ
="MINUS",
251 i_SCLK
=ClockSignal("sync"),
252 i_ECLK
=ClockSignal("sync2x"),
253 i_RST
=ResetSignal("dramsync"),
255 i_PAUSE
=init
.pause | self
.rdly
[i
].w_stb
,
258 # Assert LOADNs to use DDRDEL control
266 # Reads (generate shifted DQS clock for reads)
269 i_READCLKSEL0
=self
.rdly
[i
].w_data
[0],
270 i_READCLKSEL1
=self
.rdly
[i
].w_data
[1],
271 i_READCLKSEL2
=self
.rdly
[i
].w_data
[2],
282 # Writes (generate shifted ECLK clock for writes)
287 m
.d
.sync
+= burstdet_reg
[i
].eq(1)
289 # DQS and DM ---------------------------------------------------------------------------
290 dm_o_data
= Signal(8)
291 dm_o_data_d
= Signal(8)
292 dm_o_data_muxed
= Signal(4)
293 m
.d
.comb
+= dm_o_data
.eq(Cat(
294 dfi
.phases
[0].wrdata_mask
[0*databits
//8+i
],
295 dfi
.phases
[0].wrdata_mask
[1*databits
//8+i
],
296 dfi
.phases
[0].wrdata_mask
[2*databits
//8+i
],
297 dfi
.phases
[0].wrdata_mask
[3*databits
//8+i
],
299 dfi
.phases
[1].wrdata_mask
[0*databits
//8+i
],
300 dfi
.phases
[1].wrdata_mask
[1*databits
//8+i
],
301 dfi
.phases
[1].wrdata_mask
[2*databits
//8+i
],
302 dfi
.phases
[1].wrdata_mask
[3*databits
//8+i
]),
304 m
.d
.sync
+= dm_o_data_d
.eq(dm_o_data
)
306 with m
.If(bl8_chunk
):
307 m
.d
.sync
+= dm_o_data_muxed
.eq(dm_o_data_d
[4:])
309 m
.d
.sync
+= dm_o_data_muxed
.eq(dm_o_data
[:4])
311 m
.submodules
+= Instance("ODDRX2DQA",
312 i_RST
=ResetSignal("dramsync"),
313 i_ECLK
=ClockSignal("sync2x"),
314 i_SCLK
=ClockSignal("dramsync"),
316 i_D0
=dm_o_data_muxed
[0],
317 i_D1
=dm_o_data_muxed
[1],
318 i_D2
=dm_o_data_muxed
[2],
319 i_D3
=dm_o_data_muxed
[3],
320 o_Q
=self
.pads
.dm
.o
[i
])
325 Instance("ODDRX2DQSB",
326 i_RST
=ResetSignal("dramsync"),
327 i_ECLK
=ClockSignal("sync2x"),
328 i_SCLK
=ClockSignal(),
335 Instance("TSHX2DQSA",
336 i_RST
=ResetSignal("dramsync"),
337 i_ECLK
=ClockSignal("sync2x"),
338 i_SCLK
=ClockSignal(),
340 i_T0
=~
(dqs_oe | dqs_postamble
),
341 i_T1
=~
(dqs_oe | dqs_preamble
),
347 io_B
=self
.pads
.dqs
.p
[i
]),
350 for j
in range(8*i
, 8*(i
+1)):
354 dq_i_delayed
= Signal()
355 dq_i_data
= Signal(4)
356 dq_o_data
= Signal(8)
357 dq_o_data_d
= Signal(8)
358 dq_o_data_muxed
= Signal(4)
359 m
.d
.comb
+= dq_o_data
.eq(Cat(
360 dfi
.phases
[0].wrdata
[0*databits
+j
],
361 dfi
.phases
[0].wrdata
[1*databits
+j
],
362 dfi
.phases
[0].wrdata
[2*databits
+j
],
363 dfi
.phases
[0].wrdata
[3*databits
+j
],
364 dfi
.phases
[1].wrdata
[0*databits
+j
],
365 dfi
.phases
[1].wrdata
[1*databits
+j
],
366 dfi
.phases
[1].wrdata
[2*databits
+j
],
367 dfi
.phases
[1].wrdata
[3*databits
+j
])
370 m
.d
.sync
+= dq_o_data_d
.eq(dq_o_data
)
371 with m
.If(bl8_chunk
):
372 m
.d
.sync
+= dq_o_data_muxed
.eq(dq_o_data_d
[4:])
374 m
.d
.sync
+= dq_o_data_muxed
.eq(dq_o_data
[:4])
377 Instance("ODDRX2DQA",
378 i_RST
=ResetSignal("dramsync"),
379 i_ECLK
=ClockSignal("sync2x"),
380 i_SCLK
=ClockSignal(),
382 i_D0
=dq_o_data_muxed
[0],
383 i_D1
=dq_o_data_muxed
[1],
384 i_D2
=dq_o_data_muxed
[2],
385 i_D3
=dq_o_data_muxed
[3],
388 p_DEL_MODE
="DQS_ALIGNED_X2",
394 Instance("IDDRX2DQA",
395 i_RST
=ResetSignal("dramsync"),
396 i_ECLK
=ClockSignal("sync2x"),
397 i_SCLK
=ClockSignal(),
413 i_RST
=ResetSignal("dramsync"),
414 i_ECLK
=ClockSignal("sync2x"),
415 i_SCLK
=ClockSignal(),
424 io_B
=self
.pads
.dq
.io
[j
])
427 dfi
.phases
[1].rddata
[j
].eq(dq_i_data
[0]),
428 dfi
.phases
[1].rddata
[1*databits
+j
].eq(dq_i_data
[1]),
429 dfi
.phases
[1].rddata
[2*databits
+j
].eq(dq_i_data
[2]),
430 dfi
.phases
[1].rddata
[3*databits
+j
].eq(dq_i_data
[3]),
433 dfi
.phases
[0].rddata
.eq(dfi
.phases
[1].rddata
),
436 # Read Control Path ------------------------------------------------------------------------
437 # Creates a shift register of read commands coming from the DFI interface. This shift register
438 # is used to control DQS read (internal read pulse of the DQSBUF) and to indicate to the
439 # DFI interface that the read data is valid.
441 # The DQS read must be asserted for 2 sys_clk cycles before the read data is coming back from
442 # the DRAM (see 6.2.4 READ Pulse Positioning Optimization of FPGA-TN-02035-1.2)
444 # The read data valid is asserted for 1 sys_clk cycle when the data is available on the DFI
445 # interface, the latency is the sum of the ODDRX2DQA, CAS, IDDRX2DQA latencies.
446 rddata_en
= Signal(self
.settings
.read_latency
)
447 rddata_en_last
= Signal
.like(rddata_en
)
448 m
.d
.comb
+= rddata_en
.eq(Cat(dfi
.phases
[self
.settings
.rdphase
].rddata_en
, rddata_en_last
))
449 m
.d
.sync
+= rddata_en_last
.eq(rddata_en
)
450 m
.d
.sync
+= [phase
.rddata_valid
.eq(rddata_en
[-1]) for phase
in dfi
.phases
]
451 m
.d
.comb
+= dqs_re
.eq(rddata_en
[cl_sys_latency
+ 1] | rddata_en
[cl_sys_latency
+ 2])
453 # Write Control Path -----------------------------------------------------------------------
454 # Creates a shift register of write commands coming from the DFI interface. This shift register
455 # is used to control DQ/DQS tristates and to select write data of the DRAM burst from the DFI
456 # interface: The PHY is operating in halfrate mode (so provide 4 datas every sys_clk cycles:
457 # 2x for DDR, 2x for halfrate) but DDR3 requires a burst of 8 datas (BL8) for best efficiency.
458 # Writes are then performed in 2 sys_clk cycles and data needs to be selected for each cycle.
459 # FIXME: understand +2
460 wrdata_en
= Signal(cwl_sys_latency
+ 4)
461 wrdata_en_last
= Signal
.like(wrdata_en
)
462 m
.d
.comb
+= wrdata_en
.eq(Cat(dfi
.phases
[self
.settings
.wrphase
].wrdata_en
, wrdata_en_last
))
463 m
.d
.sync
+= wrdata_en_last
.eq(wrdata_en
)
464 m
.d
.comb
+= dq_oe
.eq(wrdata_en
[cwl_sys_latency
+ 1] | wrdata_en
[cwl_sys_latency
+ 2])
465 m
.d
.comb
+= bl8_chunk
.eq(wrdata_en
[cwl_sys_latency
+ 1])
466 m
.d
.comb
+= dqs_oe
.eq(dq_oe
)
468 # Write DQS Postamble/Preamble Control Path ------------------------------------------------
469 # Generates DQS Preamble 1 cycle before the first write and Postamble 1 cycle after the last
470 # write. During writes, DQS tristate is configured as output for at least 4 sys_clk cycles:
471 # 1 for Preamble, 2 for the Write and 1 for the Postamble.
472 m
.d
.comb
+= dqs_preamble
.eq(wrdata_en
[cwl_sys_latency
+ 0] & ~wrdata_en
[cwl_sys_latency
+ 1])
473 m
.d
.comb
+= dqs_postamble
.eq(wrdata_en
[cwl_sys_latency
+ 3] & ~wrdata_en
[cwl_sys_latency
+ 2])