3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test if microwatt compatibility is to be enabled
169 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
170 (pspec
.microwatt_compat
== True))
171 self
.alt_reset
= Signal(reset_less
=True) # not connected yet (microwatt)
173 # test is SVP64 is to be enabled
174 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
176 # and if regfiles are reduced
177 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
178 (pspec
.regreduce
== True))
180 # and if overlap requested
181 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
182 (pspec
.allow_overlap
== True))
184 # and get the core domain
185 self
.core_domain
= "coresync"
186 if (hasattr(pspec
, "core_domain") and
187 isinstance(pspec
.core_domain
, str)):
188 self
.core_domain
= pspec
.core_domain
190 # JTAG interface. add this right at the start because if it's
191 # added it *modifies* the pspec, by adding enable/disable signals
192 # for parts of the rest of the core
193 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
194 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
195 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
197 # XXX MUST keep this up-to-date with litex, and
198 # soc-cocotb-sim, and err.. all needs sorting out, argh
201 'eint', 'gpio', 'mspi0',
202 # 'mspi1', - disabled for now
203 # 'pwm', 'sd0', - disabled for now
205 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
206 domain
=self
.dbg_domain
)
207 # add signals to pspec to enable/disable icache and dcache
208 # (or data and intstruction wishbone if icache/dcache not included)
209 # https://bugs.libre-soc.org/show_bug.cgi?id=520
210 # TODO: do we actually care if these are not domain-synchronised?
211 # honestly probably not.
212 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
213 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
214 self
.wb_sram_en
= self
.jtag
.wb_sram_en
216 self
.wb_sram_en
= Const(1)
218 # add 4k sram blocks?
219 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
220 pspec
.sram4x4kblock
== True)
224 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
228 # add interrupt controller?
229 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
231 self
.xics_icp
= XICS_ICP()
232 self
.xics_ics
= XICS_ICS()
233 self
.int_level_i
= self
.xics_ics
.int_level_i
235 self
.ext_irq
= Signal()
237 # add GPIO peripheral?
238 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
240 self
.simple_gpio
= SimpleGPIO()
241 self
.gpio_o
= self
.simple_gpio
.gpio_o
243 # main instruction core. suitable for prototyping / demo only
244 self
.core
= core
= NonProductionCore(pspec
)
245 self
.core_rst
= ResetSignal(self
.core_domain
)
247 # instruction decoder. goes into Trap Record
248 #pdecode = create_pdecode()
249 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
250 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
251 opkls
=IssuerDecode2ToOperand
,
252 svp64_en
=self
.svp64_en
,
253 regreduce_en
=self
.regreduce_en
)
254 pdecode
= self
.pdecode2
.dec
257 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
259 self
.update_svstate
= Signal() # set this if updating svstate
260 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
262 # Test Instruction memory
263 if hasattr(core
, "icache"):
264 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
265 # truly dreadful. needs a huge reorg.
266 pspec
.icache
= core
.icache
267 self
.imem
= ConfigFetchUnit(pspec
).fu
270 self
.dbg
= CoreDebug()
271 self
.dbg_rst_i
= Signal(reset_less
=True)
273 # instruction go/monitor
274 self
.pc_o
= Signal(64, reset_less
=True)
275 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
276 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
277 self
.svstate_i
= Data(64, "svstate_i") # ditto
278 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
279 self
.busy_o
= Signal(reset_less
=True)
280 self
.memerr_o
= Signal(reset_less
=True)
282 # STATE regfile read /write ports for PC, MSR, SVSTATE
283 staterf
= self
.core
.regs
.rf
['state']
284 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
285 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
286 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
288 self
.state_w_msr
= staterf
.w_ports
['d_wr2'] # MSR wr
289 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
290 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
292 # DMI interface access
293 intrf
= self
.core
.regs
.rf
['int']
294 fastrf
= self
.core
.regs
.rf
['fast']
295 crrf
= self
.core
.regs
.rf
['cr']
296 xerrf
= self
.core
.regs
.rf
['xer']
297 self
.int_r
= intrf
.r_ports
['dmi'] # INT DMI read
298 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR DMI read
299 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER DMI read
300 self
.fast_r
= fastrf
.r_ports
['dmi'] # FAST DMI read
304 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
305 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
307 # hack method of keeping an eye on whether branch/trap set the PC
308 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
309 self
.state_nia
.wen
.name
= 'state_nia_wen'
310 # and whether SPR pipeline sets DEC or TB
311 self
.state_spr
= self
.core
.regs
.rf
['state'].w_ports
['state1']
313 # pulse to synchronize the simulator at instruction end
314 self
.insn_done
= Signal()
316 # indicate any instruction still outstanding, in execution
317 self
.any_busy
= Signal()
320 # store copies of predicate masks
321 self
.srcmask
= Signal(64)
322 self
.dstmask
= Signal(64)
324 # sigh, the wishbone addresses are not wishbone-compliant in microwatt
325 if self
.microwatt_compat
:
326 self
.ibus_adr
= Signal(32, name
='wishbone_insn_out.adr')
327 self
.dbus_adr
= Signal(32, name
='wishbone_data_out.adr')
329 # add an output of the PC and instruction, and whether it was requested
330 # this is for verilator debug purposes
331 if self
.microwatt_compat
:
332 self
.nia
= Signal(64)
333 self
.msr_o
= Signal(64)
334 self
.nia_req
= Signal(1)
335 self
.insn
= Signal(32)
336 self
.ldst_req
= Signal(1)
337 self
.ldst_addr
= Signal(1)
339 # for pausing dec/tb during an SPR pipeline event, this
340 # ensures that an SPR write (mtspr) to TB or DEC does not
341 # get overwritten by the DEC/TB FSM
342 self
.pause_dec_tb
= Signal()
344 def setup_peripherals(self
, m
):
345 comb
, sync
= m
.d
.comb
, m
.d
.sync
347 # okaaaay so the debug module must be in coresync clock domain
348 # but NOT its reset signal. to cope with this, set every single
349 # submodule explicitly in coresync domain, debug and JTAG
350 # in their own one but using *external* reset.
351 csd
= DomainRenamer(self
.core_domain
)
352 dbd
= DomainRenamer(self
.dbg_domain
)
354 if self
.microwatt_compat
:
355 m
.submodules
.core
= core
= self
.core
357 m
.submodules
.core
= core
= csd(self
.core
)
359 # this _so_ needs sorting out. ICache is added down inside
360 # LoadStore1 and is already a submodule of LoadStore1
361 if not isinstance(self
.imem
, ICache
):
362 m
.submodules
.imem
= imem
= csd(self
.imem
)
364 # set up JTAG Debug Module (in correct domain)
365 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
367 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
368 # TODO: UART2GDB mux, here, from external pin
369 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
370 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
372 # fixup the clocks in microwatt-compat mode (but leave resets alone
373 # so that microwatt soc.vhdl can pull a reset on the core or DMI
374 # can do it, just like in TestIssuer)
375 if self
.microwatt_compat
:
376 intclk
= ClockSignal(self
.core_domain
)
377 dbgclk
= ClockSignal(self
.dbg_domain
)
378 if self
.core_domain
!= 'sync':
379 comb
+= intclk
.eq(ClockSignal())
380 if self
.dbg_domain
!= 'sync':
381 comb
+= dbgclk
.eq(ClockSignal())
383 # drop the first 3 bits of the incoming wishbone addresses
384 # this can go if using later versions of microwatt (not now)
385 if self
.microwatt_compat
:
386 ibus
= self
.imem
.ibus
387 dbus
= self
.core
.l0
.cmpi
.wb_bus()
388 comb
+= self
.ibus_adr
.eq(Cat(Const(0, 3), ibus
.adr
))
389 comb
+= self
.dbus_adr
.eq(Cat(Const(0, 3), dbus
.adr
))
390 # microwatt verilator debug purposes
391 pi
= self
.core
.l0
.cmpi
.pi
.pi
392 comb
+= self
.ldst_req
.eq(pi
.addr_ok_o
)
393 comb
+= self
.ldst_addr
.eq(pi
.addr
)
395 cur_state
= self
.cur_state
397 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
399 for i
, sram
in enumerate(self
.sram4k
):
400 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
401 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
403 # XICS interrupt handler
405 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
406 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
407 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
408 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
410 sync
+= cur_state
.eint
.eq(self
.ext_irq
) # connect externally
412 # GPIO test peripheral
414 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
416 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
417 # XXX causes litex ECP5 test to get wrong idea about input and output
418 # (but works with verilator sim *sigh*)
419 # if self.gpio and self.xics:
420 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
422 # instruction decoder
423 pdecode
= create_pdecode()
424 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
426 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
428 # clock delay power-on reset
429 cd_por
= ClockDomain(reset_less
=True)
430 cd_sync
= ClockDomain()
431 m
.domains
+= cd_por
, cd_sync
432 core_sync
= ClockDomain(self
.core_domain
)
433 if self
.core_domain
!= "sync":
434 m
.domains
+= core_sync
435 if self
.dbg_domain
!= "sync":
436 dbg_sync
= ClockDomain(self
.dbg_domain
)
437 m
.domains
+= dbg_sync
439 # create a delay, but remember it is in the power-on-reset clock domain!
440 ti_rst
= Signal(reset_less
=True)
441 delay
= Signal(range(4), reset
=3)
442 stop_delay
= Signal(range(16), reset
=5)
443 with m
.If(delay
!= 0):
444 m
.d
.por
+= delay
.eq(delay
- 1) # decrement... in POR domain!
445 with m
.If(stop_delay
!= 0):
446 m
.d
.por
+= stop_delay
.eq(stop_delay
- 1) # likewise
447 comb
+= cd_por
.clk
.eq(ClockSignal())
449 # power-on reset delay
450 core_rst
= ResetSignal(self
.core_domain
)
451 if self
.core_domain
!= "sync":
452 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
453 comb
+= core_rst
.eq(ti_rst
)
455 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
456 comb
+= core_rst
.eq(1)
457 with m
.If(stop_delay
!= 0):
458 # run DMI core-stop as well but on an extra couple of cycles
459 comb
+= dbg
.core_stopped_i
.eq(1)
461 # connect external reset signal to DMI Reset
462 if self
.dbg_domain
!= "sync":
463 dbg_rst
= ResetSignal(self
.dbg_domain
)
464 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
466 # busy/halted signals from core
467 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
468 comb
+= self
.busy_o
.eq(core_busy_o
)
469 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
471 # temporary hack: says "go" immediately for both address gen and ST
472 # XXX: st.go_i is set to 1 cycle delay to reduce combinatorial chains
474 ldst
= core
.fus
.fus
['ldst0']
475 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
476 # link addr-go direct to rel
477 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
478 m
.d
.sync
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
480 def do_dmi(self
, m
, dbg
):
481 """deals with DMI debug requests
483 currently only provides read requests for the INT regfile, CR and XER
484 it will later also deal with *writing* to these regfiles.
488 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
490 intrf
= self
.core
.regs
.rf
['int']
491 fastrf
= self
.core
.regs
.rf
['fast']
493 with m
.If(d_reg
.req
): # request for regfile access being made
494 # TODO: error-check this
495 # XXX should this be combinatorial? sync better?
497 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
499 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
500 comb
+= self
.int_r
.ren
.eq(1)
501 d_reg_delay
= Signal()
502 sync
+= d_reg_delay
.eq(d_reg
.req
)
503 with m
.If(d_reg_delay
):
504 # data arrives one clock later
505 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
506 comb
+= d_reg
.ack
.eq(1)
509 with m
.If(d_fast
.req
): # request for regfile access being made
511 comb
+= self
.fast_r
.ren
.eq(1 << d_fast
.addr
)
513 comb
+= self
.fast_r
.addr
.eq(d_fast
.addr
)
514 comb
+= self
.fast_r
.ren
.eq(1)
515 d_fast_delay
= Signal()
516 sync
+= d_fast_delay
.eq(d_fast
.req
)
517 with m
.If(d_fast_delay
):
518 # data arrives one clock later
519 comb
+= d_fast
.data
.eq(self
.fast_r
.o_data
)
520 comb
+= d_fast
.ack
.eq(1)
522 # sigh same thing for CR debug
523 with m
.If(d_cr
.req
): # request for regfile access being made
524 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
525 d_cr_delay
= Signal()
526 sync
+= d_cr_delay
.eq(d_cr
.req
)
527 with m
.If(d_cr_delay
):
528 # data arrives one clock later
529 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
530 comb
+= d_cr
.ack
.eq(1)
533 with m
.If(d_xer
.req
): # request for regfile access being made
534 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
535 d_xer_delay
= Signal()
536 sync
+= d_xer_delay
.eq(d_xer
.req
)
537 with m
.If(d_xer_delay
):
538 # data arrives one clock later
539 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
540 comb
+= d_xer
.ack
.eq(1)
542 def tb_dec_fsm(self
, m
, spr_dec
):
545 this is a FSM for updating either dec or tb. it runs alternately
546 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
547 value to DEC, however the regfile has "passthrough" on it so this
550 see v3.0B p1097-1099 for Timer Resource and p1065 and p1076
553 comb
, sync
= m
.d
.comb
, m
.d
.sync
554 state_rf
= self
.core
.regs
.rf
['state']
555 state_r_dectb
= state_rf
.r_ports
['issue'] # DEC/TB
556 state_w_dectb
= state_rf
.w_ports
['issue'] # DEC/TB
560 # initiates read of current DEC
561 with m
.State("DEC_READ"):
562 comb
+= state_r_dectb
.ren
.eq(1<<StateRegs
.DEC
)
563 with m
.If(~self
.pause_dec_tb
):
566 # waits for DEC read to arrive (1 cycle), updates with new value
567 # respects if dec/tb writing has been paused
568 with m
.State("DEC_WRITE"):
569 with m
.If(self
.pause_dec_tb
):
570 # if paused, return to reading
574 # TODO: MSR.LPCR 32-bit decrement mode
575 comb
+= new_dec
.eq(state_r_dectb
.o_data
- 1)
576 comb
+= state_w_dectb
.wen
.eq(1<<StateRegs
.DEC
)
577 comb
+= state_w_dectb
.i_data
.eq(new_dec
)
578 # copy to cur_state for decoder, for an interrupt
579 sync
+= spr_dec
.eq(new_dec
)
582 # initiates read of current TB
583 with m
.State("TB_READ"):
584 comb
+= state_r_dectb
.ren
.eq(1<<StateRegs
.TB
)
585 with m
.If(~self
.pause_dec_tb
):
588 # waits for read TB to arrive, initiates write of current TB
589 # respects if dec/tb writing has been paused
590 with m
.State("TB_WRITE"):
591 with m
.If(self
.pause_dec_tb
):
592 # if paused, return to reading
596 comb
+= new_tb
.eq(state_r_dectb
.o_data
+ 1)
597 comb
+= state_w_dectb
.wen
.eq(1<<StateRegs
.TB
)
598 comb
+= state_w_dectb
.i_data
.eq(new_tb
)
603 def elaborate(self
, platform
):
606 comb
, sync
= m
.d
.comb
, m
.d
.sync
607 cur_state
= self
.cur_state
608 pdecode2
= self
.pdecode2
611 # set up peripherals and core
612 core_rst
= self
.core_rst
613 self
.setup_peripherals(m
)
615 # reset current state if core reset requested
617 m
.d
.sync
+= self
.cur_state
.eq(0)
619 # check halted condition: requested PC to execute matches DMI stop addr
620 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
623 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
625 comb
+= dbg
.core_stopped_i
.eq(1)
626 comb
+= dbg
.terminate_i
.eq(1)
628 # PC and instruction from I-Memory
629 comb
+= self
.pc_o
.eq(cur_state
.pc
)
630 self
.pc_changed
= Signal() # note write to PC
631 self
.msr_changed
= Signal() # note write to MSR
632 self
.sv_changed
= Signal() # note write to SVSTATE
634 # read state either from incoming override or from regfile
635 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
636 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
638 self
.state_r_msr
, StateRegs
.MSR
)
639 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
641 self
.state_r_pc
, StateRegs
.PC
)
642 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
643 "svstate", # read SVSTATE
644 self
.state_r_sv
, StateRegs
.SVSTATE
)
646 # don't write pc every cycle
647 comb
+= self
.state_w_pc
.wen
.eq(0)
648 comb
+= self
.state_w_pc
.i_data
.eq(0)
650 # connect up debug state. note "combinatorially same" below,
651 # this is a bit naff, passing state over in the dbg class, but
652 # because it is combinatorial it achieves the desired goal
653 comb
+= dbg
.state
.eq(state
)
655 # this bit doesn't have to be in the FSM: connect up to read
656 # regfiles on demand from DMI
659 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
660 # (which uses that in PowerDecoder2 to raise 0x900 exception)
661 self
.tb_dec_fsm(m
, cur_state
.dec
)
663 # while stopped, allow updating the MSR, PC and SVSTATE.
664 # these are mainly for debugging purposes (including DMI/JTAG)
665 with m
.If(dbg
.core_stopped_i
):
666 with m
.If(self
.pc_i
.ok
):
667 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
668 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
669 sync
+= self
.pc_changed
.eq(1)
670 with m
.If(self
.msr_i
.ok
):
671 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
672 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
673 sync
+= self
.msr_changed
.eq(1)
674 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
675 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
676 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
677 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
678 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
679 sync
+= self
.sv_changed
.eq(1)
681 # start renaming some of the ports to match microwatt
682 if self
.microwatt_compat
:
683 self
.core
.o
.core_terminate_o
.name
= "terminated_out"
684 # names of DMI interface
685 self
.dbg
.dmi
.addr_i
.name
= 'dmi_addr'
686 self
.dbg
.dmi
.din
.name
= 'dmi_din'
687 self
.dbg
.dmi
.dout
.name
= 'dmi_dout'
688 self
.dbg
.dmi
.req_i
.name
= 'dmi_req'
689 self
.dbg
.dmi
.we_i
.name
= 'dmi_wr'
690 self
.dbg
.dmi
.ack_o
.name
= 'dmi_ack'
691 # wishbone instruction bus
692 ibus
= self
.imem
.ibus
693 ibus
.adr
.name
= 'wishbone_insn_out.adr'
694 ibus
.dat_w
.name
= 'wishbone_insn_out.dat'
695 ibus
.sel
.name
= 'wishbone_insn_out.sel'
696 ibus
.cyc
.name
= 'wishbone_insn_out.cyc'
697 ibus
.stb
.name
= 'wishbone_insn_out.stb'
698 ibus
.we
.name
= 'wishbone_insn_out.we'
699 ibus
.dat_r
.name
= 'wishbone_insn_in.dat'
700 ibus
.ack
.name
= 'wishbone_insn_in.ack'
701 ibus
.stall
.name
= 'wishbone_insn_in.stall'
703 dbus
= self
.core
.l0
.cmpi
.wb_bus()
704 dbus
.adr
.name
= 'wishbone_data_out.adr'
705 dbus
.dat_w
.name
= 'wishbone_data_out.dat'
706 dbus
.sel
.name
= 'wishbone_data_out.sel'
707 dbus
.cyc
.name
= 'wishbone_data_out.cyc'
708 dbus
.stb
.name
= 'wishbone_data_out.stb'
709 dbus
.we
.name
= 'wishbone_data_out.we'
710 dbus
.dat_r
.name
= 'wishbone_data_in.dat'
711 dbus
.ack
.name
= 'wishbone_data_in.ack'
712 dbus
.stall
.name
= 'wishbone_data_in.stall'
717 yield from self
.pc_i
.ports()
718 yield from self
.msr_i
.ports()
721 yield from self
.core
.ports()
722 yield from self
.imem
.ports()
723 yield self
.core_bigendian_i
729 def external_ports(self
):
730 if self
.microwatt_compat
:
731 ports
= [self
.core
.o
.core_terminate_o
,
733 self
.alt_reset
, # not connected yet
734 self
.nia
, self
.insn
, self
.nia_req
, self
.msr_o
,
735 self
.ldst_req
, self
.ldst_addr
,
739 ports
+= list(self
.dbg
.dmi
.ports())
740 # for dbus/ibus microwatt, exclude err btw and cti
741 for name
, sig
in self
.imem
.ibus
.fields
.items():
742 if name
not in ['err', 'bte', 'cti', 'adr']:
744 for name
, sig
in self
.core
.l0
.cmpi
.wb_bus().fields
.items():
745 if name
not in ['err', 'bte', 'cti', 'adr']:
747 # microwatt non-compliant with wishbone
748 ports
.append(self
.ibus_adr
)
749 ports
.append(self
.dbus_adr
)
752 ports
= self
.pc_i
.ports()
753 ports
= self
.msr_i
.ports()
754 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
758 ports
+= list(self
.jtag
.external_ports())
760 # don't add DMI if JTAG is enabled
761 ports
+= list(self
.dbg
.dmi
.ports())
763 ports
+= list(self
.imem
.ibus
.fields
.values())
764 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
767 for sram
in self
.sram4k
:
768 ports
+= list(sram
.bus
.fields
.values())
771 ports
+= list(self
.xics_icp
.bus
.fields
.values())
772 ports
+= list(self
.xics_ics
.bus
.fields
.values())
773 ports
.append(self
.int_level_i
)
775 ports
.append(self
.ext_irq
)
778 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
779 ports
.append(self
.gpio_o
)
787 class TestIssuerInternal(TestIssuerBase
):
788 """TestIssuer - reads instructions from TestMemory and issues them
790 efficiency and speed is not the main goal here: functional correctness
791 and code clarity is. optimisations (which almost 100% interfere with
792 easy understanding) come later.
795 def fetch_fsm(self
, m
, dbg
, core
, nia
, is_svp64_mode
,
796 fetch_pc_o_ready
, fetch_pc_i_valid
,
797 fetch_insn_o_valid
, fetch_insn_i_ready
):
800 this FSM performs fetch of raw instruction data, partial-decodes
801 it 32-bit at a time to detect SVP64 prefixes, and will optionally
802 read a 2nd 32-bit quantity if that occurs.
806 pdecode2
= self
.pdecode2
807 cur_state
= self
.cur_state
808 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
809 pc
, msr
, svstate
= cur_state
.pc
, cur_state
.msr
, cur_state
.svstate
811 # also note instruction fetch failed
812 if hasattr(core
, "icache"):
813 fetch_failed
= core
.icache
.i_out
.fetch_failed
816 fetch_failed
= Const(0, 1)
819 # set priv / virt mode on I-Cache, sigh
820 if isinstance(self
.imem
, ICache
):
821 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
822 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
824 with m
.FSM(name
='fetch_fsm'):
826 # allow fetch to not run at startup due to I-Cache reset not
827 # having time to settle. power-on-reset holds dbg.core_stopped_i
828 with m
.State("PRE_IDLE"):
829 with m
.If(~dbg
.core_stopped_i
& ~dbg
.core_stop_o
):
833 with m
.State("IDLE"):
834 # fetch allowed if not failed and stopped but not stepping
835 # (see dmi.py for how core_stop_o is generated)
836 with m
.If(~fetch_failed
& ~dbg
.core_stop_o
):
837 comb
+= fetch_pc_o_ready
.eq(1)
838 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
840 # instruction allowed to go: start by reading the PC
841 # capture the PC and also drop it into Insn Memory
842 # we have joined a pair of combinatorial memory
843 # lookups together. this is Generally Bad.
844 comb
+= self
.imem
.a_pc_i
.eq(pc
)
845 comb
+= self
.imem
.a_i_valid
.eq(1)
846 comb
+= self
.imem
.f_i_valid
.eq(1)
847 m
.next
= "INSN_READ" # move to "wait for bus" phase
849 # dummy pause to find out why simulation is not keeping up
850 with m
.State("INSN_READ"):
851 # when using "single-step" mode, checking dbg.stopping_o
852 # prevents progress. allow fetch to proceed once started
854 #if self.allow_overlap:
855 # stopping = dbg.stopping_o
857 # stopping: jump back to idle
860 with m
.If(self
.imem
.f_busy_o
&
861 ~pdecode2
.instr_fault
): # zzz...
862 # busy but not fetch failed: stay in wait-read
863 comb
+= self
.imem
.a_pc_i
.eq(pc
)
864 comb
+= self
.imem
.a_i_valid
.eq(1)
865 comb
+= self
.imem
.f_i_valid
.eq(1)
867 # not busy (or fetch failed!): instruction fetched
868 # when fetch failed, the instruction gets ignored
870 if hasattr(core
, "icache"):
871 # blech, icache returns actual instruction
872 insn
= self
.imem
.f_instr_o
874 # but these return raw memory
875 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
878 # decode the SVP64 prefix, if any
879 comb
+= svp64
.raw_opcode_in
.eq(insn
)
880 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
881 # pass the decoded prefix (if any) to PowerDecoder2
882 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
883 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
884 # remember whether this is a prefixed instruction,
885 # so the FSM can readily loop when VL==0
886 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
887 # calculate the address of the following instruction
888 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
889 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
890 with m
.If(~svp64
.is_svp64_mode
):
891 # with no prefix, store the instruction
892 # and hand it directly to the next FSM
893 sync
+= dec_opcode_i
.eq(insn
)
894 m
.next
= "INSN_READY"
896 # fetch the rest of the instruction from memory
897 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
898 comb
+= self
.imem
.a_i_valid
.eq(1)
899 comb
+= self
.imem
.f_i_valid
.eq(1)
900 m
.next
= "INSN_READ2"
902 # not SVP64 - 32-bit only
903 sync
+= nia
.eq(cur_state
.pc
+ 4)
904 sync
+= dec_opcode_i
.eq(insn
)
905 if self
.microwatt_compat
:
906 # for verilator debug purposes
907 comb
+= self
.insn
.eq(insn
)
908 comb
+= self
.nia
.eq(cur_state
.pc
)
909 comb
+= self
.msr_o
.eq(cur_state
.msr
)
910 comb
+= self
.nia_req
.eq(1)
911 m
.next
= "INSN_READY"
913 with m
.State("INSN_READ2"):
914 with m
.If(self
.imem
.f_busy_o
): # zzz...
915 # busy: stay in wait-read
916 comb
+= self
.imem
.a_i_valid
.eq(1)
917 comb
+= self
.imem
.f_i_valid
.eq(1)
919 # not busy: instruction fetched
920 if hasattr(core
, "icache"):
921 # blech, icache returns actual instruction
922 insn
= self
.imem
.f_instr_o
924 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
925 sync
+= dec_opcode_i
.eq(insn
)
926 m
.next
= "INSN_READY"
927 # TODO: probably can start looking at pdecode2.rm_dec
928 # here or maybe even in INSN_READ state, if svp64_mode
929 # detected, in order to trigger - and wait for - the
932 pmode
= pdecode2
.rm_dec
.predmode
934 if pmode != SVP64PredMode.ALWAYS.value:
935 fire predicate loading FSM and wait before
938 sync += self.srcmask.eq(-1) # set to all 1s
939 sync += self.dstmask.eq(-1) # set to all 1s
940 m.next = "INSN_READY"
943 with m
.State("INSN_READY"):
944 # hand over the instruction, to be decoded
945 comb
+= fetch_insn_o_valid
.eq(1)
946 with m
.If(fetch_insn_i_ready
):
950 def fetch_predicate_fsm(self
, m
,
951 pred_insn_i_valid
, pred_insn_o_ready
,
952 pred_mask_o_valid
, pred_mask_i_ready
):
953 """fetch_predicate_fsm - obtains (constructs in the case of CR)
954 src/dest predicate masks
956 https://bugs.libre-soc.org/show_bug.cgi?id=617
957 the predicates can be read here, by using IntRegs r_ports['pred']
958 or CRRegs r_ports['pred']. in the case of CRs it will have to
959 be done through multiple reads, extracting one relevant at a time.
960 later, a faster way would be to use the 32-bit-wide CR port but
961 this is more complex decoding, here. equivalent code used in
962 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
964 note: this ENTIRE FSM is not to be called when svp64 is disabled
968 pdecode2
= self
.pdecode2
969 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
970 predmode
= rm_dec
.predmode
971 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
972 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
973 # get src/dst step, so we can skip already used mask bits
974 cur_state
= self
.cur_state
975 srcstep
= cur_state
.svstate
.srcstep
976 dststep
= cur_state
.svstate
.dststep
977 cur_vl
= cur_state
.svstate
.vl
980 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
981 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
982 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
983 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
985 # store fetched masks, for either intpred or crpred
986 # when src/dst step is not zero, the skipped mask bits need to be
987 # shifted-out, before actually storing them in src/dest mask
988 new_srcmask
= Signal(64, reset_less
=True)
989 new_dstmask
= Signal(64, reset_less
=True)
991 with m
.FSM(name
="fetch_predicate"):
993 with m
.State("FETCH_PRED_IDLE"):
994 comb
+= pred_insn_o_ready
.eq(1)
995 with m
.If(pred_insn_i_valid
):
996 with m
.If(predmode
== SVP64PredMode
.INT
):
997 # skip fetching destination mask register, when zero
999 sync
+= new_dstmask
.eq(-1)
1000 # directly go to fetch source mask register
1001 # guaranteed not to be zero (otherwise predmode
1002 # would be SVP64PredMode.ALWAYS, not INT)
1003 comb
+= int_pred
.addr
.eq(sregread
)
1004 comb
+= int_pred
.ren
.eq(1)
1005 m
.next
= "INT_SRC_READ"
1006 # fetch destination predicate register
1008 comb
+= int_pred
.addr
.eq(dregread
)
1009 comb
+= int_pred
.ren
.eq(1)
1010 m
.next
= "INT_DST_READ"
1011 with m
.Elif(predmode
== SVP64PredMode
.CR
):
1012 # go fetch masks from the CR register file
1013 sync
+= new_srcmask
.eq(0)
1014 sync
+= new_dstmask
.eq(0)
1017 sync
+= self
.srcmask
.eq(-1)
1018 sync
+= self
.dstmask
.eq(-1)
1019 m
.next
= "FETCH_PRED_DONE"
1021 with m
.State("INT_DST_READ"):
1022 # store destination mask
1023 inv
= Repl(dinvert
, 64)
1025 # set selected mask bit for 1<<r3 mode
1026 dst_shift
= Signal(range(64))
1027 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1028 sync
+= new_dstmask
.eq(1 << dst_shift
)
1030 # invert mask if requested
1031 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
1032 # skip fetching source mask register, when zero
1034 sync
+= new_srcmask
.eq(-1)
1035 m
.next
= "FETCH_PRED_SHIFT_MASK"
1036 # fetch source predicate register
1038 comb
+= int_pred
.addr
.eq(sregread
)
1039 comb
+= int_pred
.ren
.eq(1)
1040 m
.next
= "INT_SRC_READ"
1042 with m
.State("INT_SRC_READ"):
1044 inv
= Repl(sinvert
, 64)
1046 # set selected mask bit for 1<<r3 mode
1047 src_shift
= Signal(range(64))
1048 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1049 sync
+= new_srcmask
.eq(1 << src_shift
)
1051 # invert mask if requested
1052 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
1053 m
.next
= "FETCH_PRED_SHIFT_MASK"
1055 # fetch masks from the CR register file
1056 # implements the following loop:
1057 # idx, inv = get_predcr(mask)
1059 # for cr_idx in range(vl):
1060 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
1062 # mask |= 1 << cr_idx
1064 with m
.State("CR_READ"):
1065 # CR index to be read, which will be ready by the next cycle
1066 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
1067 # submit the read operation to the regfile
1068 with m
.If(cr_idx
!= cur_vl
):
1069 # the CR read port is unary ...
1071 # ... in MSB0 convention ...
1072 # ren = 1 << (7 - cr_idx)
1073 # ... and with an offset:
1074 # ren = 1 << (7 - off - cr_idx)
1075 idx
= SVP64CROffs
.CRPred
+ cr_idx
1076 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
1077 # signal data valid in the next cycle
1078 cr_read
= Signal(reset_less
=True)
1079 sync
+= cr_read
.eq(1)
1080 # load the next index
1081 sync
+= cr_idx
.eq(cr_idx
+ 1)
1084 sync
+= cr_read
.eq(0)
1085 sync
+= cr_idx
.eq(0)
1086 m
.next
= "FETCH_PRED_SHIFT_MASK"
1088 # compensate for the one cycle delay on the regfile
1089 cur_cr_idx
= Signal
.like(cur_vl
)
1090 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
1091 # read the CR field, select the appropriate bit
1092 cr_field
= Signal(4)
1095 comb
+= cr_field
.eq(cr_pred
.o_data
)
1096 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
1098 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
1100 # set the corresponding mask bit
1101 bit_to_set
= Signal
.like(self
.srcmask
)
1102 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1104 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1106 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1108 with m
.State("FETCH_PRED_SHIFT_MASK"):
1109 # shift-out skipped mask bits
1110 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1111 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1112 m
.next
= "FETCH_PRED_DONE"
1114 with m
.State("FETCH_PRED_DONE"):
1115 comb
+= pred_mask_o_valid
.eq(1)
1116 with m
.If(pred_mask_i_ready
):
1117 m
.next
= "FETCH_PRED_IDLE"
1119 def issue_fsm(self
, m
, core
, nia
,
1120 dbg
, core_rst
, is_svp64_mode
,
1121 fetch_pc_o_ready
, fetch_pc_i_valid
,
1122 fetch_insn_o_valid
, fetch_insn_i_ready
,
1123 pred_insn_i_valid
, pred_insn_o_ready
,
1124 pred_mask_o_valid
, pred_mask_i_ready
,
1125 exec_insn_i_valid
, exec_insn_o_ready
,
1126 exec_pc_o_valid
, exec_pc_i_ready
):
1129 decode / issue FSM. this interacts with the "fetch" FSM
1130 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1131 (outgoing). also interacts with the "execute" FSM
1132 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1134 SVP64 RM prefixes have already been set up by the
1135 "fetch" phase, so execute is fairly straightforward.
1140 pdecode2
= self
.pdecode2
1141 cur_state
= self
.cur_state
1142 new_svstate
= self
.new_svstate
1145 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1147 # for updating svstate (things like srcstep etc.)
1148 comb
+= new_svstate
.eq(cur_state
.svstate
)
1150 # precalculate srcstep+1 and dststep+1
1151 cur_srcstep
= cur_state
.svstate
.srcstep
1152 cur_dststep
= cur_state
.svstate
.dststep
1153 next_srcstep
= Signal
.like(cur_srcstep
)
1154 next_dststep
= Signal
.like(cur_dststep
)
1155 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1156 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1158 # note if an exception happened. in a pipelined or OoO design
1159 # this needs to be accompanied by "shadowing" (or stalling)
1160 exc_happened
= self
.core
.o
.exc_happened
1161 # also note instruction fetch failed
1162 if hasattr(core
, "icache"):
1163 fetch_failed
= core
.icache
.i_out
.fetch_failed
1165 # set to fault in decoder
1166 # update (highest priority) instruction fault
1167 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1168 with m
.If(rising_fetch_failed
):
1169 sync
+= pdecode2
.instr_fault
.eq(1)
1171 fetch_failed
= Const(0, 1)
1172 flush_needed
= False
1174 sync
+= fetch_pc_i_valid
.eq(0)
1176 with m
.FSM(name
="issue_fsm"):
1178 # sync with the "fetch" phase which is reading the instruction
1179 # at this point, there is no instruction running, that
1180 # could inadvertently update the PC.
1181 with m
.State("ISSUE_START"):
1182 # reset instruction fault
1183 sync
+= pdecode2
.instr_fault
.eq(0)
1184 # wait on "core stop" release, before next fetch
1185 # need to do this here, in case we are in a VL==0 loop
1186 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1187 sync
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1188 sync
+= cur_state
.pc
.eq(dbg
.state
.pc
)
1189 sync
+= cur_state
.svstate
.eq(dbg
.state
.svstate
)
1190 sync
+= cur_state
.msr
.eq(dbg
.state
.msr
)
1191 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1192 m
.next
= "INSN_WAIT"
1194 # tell core it's stopped, and acknowledge debug handshake
1195 comb
+= dbg
.core_stopped_i
.eq(1)
1196 # while stopped, allow updating SVSTATE
1197 with m
.If(self
.svstate_i
.ok
):
1198 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1199 comb
+= self
.update_svstate
.eq(1)
1200 sync
+= self
.sv_changed
.eq(1)
1202 # wait for an instruction to arrive from Fetch
1203 with m
.State("INSN_WAIT"):
1204 # when using "single-step" mode, checking dbg.stopping_o
1205 # prevents progress. allow issue to proceed once started
1207 #if self.allow_overlap:
1208 # stopping = dbg.stopping_o
1209 with m
.If(stopping
):
1210 # stopping: jump back to idle
1211 m
.next
= "ISSUE_START"
1213 # request the icache to stop asserting "failed"
1214 comb
+= core
.icache
.flush_in
.eq(1)
1215 # stop instruction fault
1216 sync
+= pdecode2
.instr_fault
.eq(0)
1218 comb
+= fetch_insn_i_ready
.eq(1)
1219 with m
.If(fetch_insn_o_valid
):
1220 # loop into ISSUE_START if it's a SVP64 instruction
1221 # and VL == 0. this because VL==0 is a for-loop
1222 # from 0 to 0 i.e. always, always a NOP.
1223 cur_vl
= cur_state
.svstate
.vl
1224 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1225 # update the PC before fetching the next instruction
1226 # since we are in a VL==0 loop, no instruction was
1227 # executed that we could be overwriting
1228 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1229 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1230 comb
+= self
.insn_done
.eq(1)
1231 m
.next
= "ISSUE_START"
1234 m
.next
= "PRED_START" # fetching predicate
1236 m
.next
= "DECODE_SV" # skip predication
1238 with m
.State("PRED_START"):
1239 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1240 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1241 m
.next
= "MASK_WAIT"
1243 with m
.State("MASK_WAIT"):
1244 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1245 with m
.If(pred_mask_o_valid
): # predication masks are ready
1246 m
.next
= "PRED_SKIP"
1248 # skip zeros in predicate
1249 with m
.State("PRED_SKIP"):
1250 with m
.If(~is_svp64_mode
):
1251 m
.next
= "DECODE_SV" # nothing to do
1254 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1255 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1257 # new srcstep, after skipping zeros
1258 skip_srcstep
= Signal
.like(cur_srcstep
)
1259 # value to be added to the current srcstep
1260 src_delta
= Signal
.like(cur_srcstep
)
1261 # add leading zeros to srcstep, if not in zero mode
1262 with m
.If(~pred_src_zero
):
1263 # priority encoder (count leading zeros)
1264 # append guard bit, in case the mask is all zeros
1265 pri_enc_src
= PriorityEncoder(65)
1266 m
.submodules
.pri_enc_src
= pri_enc_src
1267 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1269 comb
+= src_delta
.eq(pri_enc_src
.o
)
1270 # apply delta to srcstep
1271 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1272 # shift-out all leading zeros from the mask
1273 # plus the leading "one" bit
1274 # TODO count leading zeros and shift-out the zero
1275 # bits, in the same step, in hardware
1276 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1278 # same as above, but for dststep
1279 skip_dststep
= Signal
.like(cur_dststep
)
1280 dst_delta
= Signal
.like(cur_dststep
)
1281 with m
.If(~pred_dst_zero
):
1282 pri_enc_dst
= PriorityEncoder(65)
1283 m
.submodules
.pri_enc_dst
= pri_enc_dst
1284 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1286 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1287 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1288 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1290 # TODO: initialize mask[VL]=1 to avoid passing past VL
1291 with m
.If((skip_srcstep
>= cur_vl
) |
1292 (skip_dststep
>= cur_vl
)):
1293 # end of VL loop. Update PC and reset src/dst step
1294 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1295 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1296 comb
+= new_svstate
.srcstep
.eq(0)
1297 comb
+= new_svstate
.dststep
.eq(0)
1298 comb
+= self
.update_svstate
.eq(1)
1299 # synchronize with the simulator
1300 comb
+= self
.insn_done
.eq(1)
1302 m
.next
= "ISSUE_START"
1304 # update new src/dst step
1305 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1306 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1307 comb
+= self
.update_svstate
.eq(1)
1309 m
.next
= "DECODE_SV"
1311 # pass predicate mask bits through to satellite decoders
1312 # TODO: for SIMD this will be *multiple* bits
1313 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1314 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1316 # after src/dst step have been updated, we are ready
1317 # to decode the instruction
1318 with m
.State("DECODE_SV"):
1319 # decode the instruction
1320 with m
.If(~fetch_failed
):
1321 sync
+= pdecode2
.instr_fault
.eq(0)
1322 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1323 sync
+= core
.i
.state
.eq(cur_state
)
1324 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1325 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1327 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1328 # set RA_OR_ZERO detection in satellite decoders
1329 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1330 # and svp64 detection
1331 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1332 # and svp64 bit-rev'd ldst mode
1333 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1334 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1335 # after decoding, reset any previous exception condition,
1336 # allowing it to be set again during the next execution
1337 sync
+= pdecode2
.ldst_exc
.eq(0)
1339 m
.next
= "INSN_EXECUTE" # move to "execute"
1341 # handshake with execution FSM, move to "wait" once acknowledged
1342 with m
.State("INSN_EXECUTE"):
1343 # when using "single-step" mode, checking dbg.stopping_o
1344 # prevents progress. allow execute to proceed once started
1346 #if self.allow_overlap:
1347 # stopping = dbg.stopping_o
1348 with m
.If(stopping
):
1349 # stopping: jump back to idle
1350 m
.next
= "ISSUE_START"
1352 # request the icache to stop asserting "failed"
1353 comb
+= core
.icache
.flush_in
.eq(1)
1354 # stop instruction fault
1355 sync
+= pdecode2
.instr_fault
.eq(0)
1357 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1358 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1359 m
.next
= "EXECUTE_WAIT"
1361 with m
.State("EXECUTE_WAIT"):
1362 comb
+= exec_pc_i_ready
.eq(1)
1363 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1364 # the exception info needs to be blatted into
1365 # pdecode.ldst_exc, and the instruction "re-run".
1366 # when ldst_exc.happened is set, the PowerDecoder2
1367 # reacts very differently: it re-writes the instruction
1368 # with a "trap" (calls PowerDecoder2.trap()) which
1369 # will *overwrite* whatever was requested and jump the
1370 # PC to the exception address, as well as alter MSR.
1371 # nothing else needs to be done other than to note
1372 # the change of PC and MSR (and, later, SVSTATE)
1373 with m
.If(exc_happened
):
1374 mmu
= core
.fus
.get_exc("mmu0")
1375 ldst
= core
.fus
.get_exc("ldst0")
1377 with m
.If(fetch_failed
):
1378 # instruction fetch: exception is from MMU
1379 # reset instr_fault (highest priority)
1380 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1381 sync
+= pdecode2
.instr_fault
.eq(0)
1383 # request icache to stop asserting "failed"
1384 comb
+= core
.icache
.flush_in
.eq(1)
1385 with m
.If(~fetch_failed
):
1386 # otherwise assume it was a LDST exception
1387 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1389 with m
.If(exec_pc_o_valid
):
1391 # was this the last loop iteration?
1393 cur_vl
= cur_state
.svstate
.vl
1394 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1396 with m
.If(pdecode2
.instr_fault
):
1397 # reset instruction fault, try again
1398 sync
+= pdecode2
.instr_fault
.eq(0)
1399 m
.next
= "ISSUE_START"
1401 # return directly to Decode if Execute generated an
1403 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1404 m
.next
= "DECODE_SV"
1406 # if MSR, PC or SVSTATE were changed by the previous
1407 # instruction, go directly back to Fetch, without
1408 # updating either MSR PC or SVSTATE
1409 with m
.Elif(self
.msr_changed | self
.pc_changed |
1411 m
.next
= "ISSUE_START"
1413 # also return to Fetch, when no output was a vector
1414 # (regardless of SRCSTEP and VL), or when the last
1415 # instruction was really the last one of the VL loop
1416 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1417 # before going back to fetch, update the PC state
1418 # register with the NIA.
1419 # ok here we are not reading the branch unit.
1420 # TODO: this just blithely overwrites whatever
1421 # pipeline updated the PC
1422 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1423 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1424 # reset SRCSTEP before returning to Fetch
1426 with m
.If(pdecode2
.loop_continue
):
1427 comb
+= new_svstate
.srcstep
.eq(0)
1428 comb
+= new_svstate
.dststep
.eq(0)
1429 comb
+= self
.update_svstate
.eq(1)
1431 comb
+= new_svstate
.srcstep
.eq(0)
1432 comb
+= new_svstate
.dststep
.eq(0)
1433 comb
+= self
.update_svstate
.eq(1)
1434 m
.next
= "ISSUE_START"
1436 # returning to Execute? then, first update SRCSTEP
1438 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1439 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1440 comb
+= self
.update_svstate
.eq(1)
1441 # return to mask skip loop
1442 m
.next
= "PRED_SKIP"
1445 # check if svstate needs updating: if so, write it to State Regfile
1446 with m
.If(self
.update_svstate
):
1447 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1449 def execute_fsm(self
, m
, core
,
1450 exec_insn_i_valid
, exec_insn_o_ready
,
1451 exec_pc_o_valid
, exec_pc_i_ready
):
1454 execute FSM. this interacts with the "issue" FSM
1455 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1456 (outgoing). SVP64 RM prefixes have already been set up by the
1457 "issue" phase, so execute is fairly straightforward.
1463 pdecode2
= self
.pdecode2
1466 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1467 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1469 if hasattr(core
, "icache"):
1470 fetch_failed
= core
.icache
.i_out
.fetch_failed
1472 fetch_failed
= Const(0, 1)
1474 with m
.FSM(name
="exec_fsm"):
1476 # waiting for instruction bus (stays there until not busy)
1477 with m
.State("INSN_START"):
1478 comb
+= exec_insn_o_ready
.eq(1)
1479 with m
.If(exec_insn_i_valid
):
1480 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1481 sync
+= self
.sv_changed
.eq(0)
1482 sync
+= self
.pc_changed
.eq(0)
1483 sync
+= self
.msr_changed
.eq(0)
1484 with m
.If(core
.p
.o_ready
): # only move if accepted
1485 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1487 # instruction started: must wait till it finishes
1488 with m
.State("INSN_ACTIVE"):
1489 # note changes to MSR, PC and SVSTATE, and DEC/TB
1490 # these last two are done together, and passed to the
1492 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1493 sync
+= self
.sv_changed
.eq(1)
1494 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1495 sync
+= self
.msr_changed
.eq(1)
1496 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1497 sync
+= self
.pc_changed
.eq(1)
1498 with m
.If((self
.state_spr
.wen
&
1499 ((1 << StateRegs
.DEC
) |
(1 << StateRegs
.TB
))).bool()):
1500 comb
+= self
.pause_dec_tb
.eq(1)
1501 with m
.If(~core_busy_o
): # instruction done!
1502 comb
+= exec_pc_o_valid
.eq(1)
1503 with m
.If(exec_pc_i_ready
):
1504 # when finished, indicate "done".
1505 # however, if there was an exception, the instruction
1506 # is *not* yet done. this is an implementation
1507 # detail: we choose to implement exceptions by
1508 # taking the exception information from the LDST
1509 # unit, putting that *back* into the PowerDecoder2,
1510 # and *re-running the entire instruction*.
1511 # if we erroneously indicate "done" here, it is as if
1512 # there were *TWO* instructions:
1513 # 1) the failed LDST 2) a TRAP.
1514 with m
.If(~pdecode2
.ldst_exc
.happened
&
1515 ~pdecode2
.instr_fault
):
1516 comb
+= self
.insn_done
.eq(1)
1517 m
.next
= "INSN_START" # back to fetch
1518 # terminate returns directly to INSN_START
1519 with m
.If(dbg
.terminate_i
):
1520 # comb += self.insn_done.eq(1) - no because it's not
1521 m
.next
= "INSN_START" # back to fetch
1523 def elaborate(self
, platform
):
1524 m
= super().elaborate(platform
)
1526 comb
, sync
= m
.d
.comb
, m
.d
.sync
1527 cur_state
= self
.cur_state
1528 pdecode2
= self
.pdecode2
1532 # set up peripherals and core
1533 core_rst
= self
.core_rst
1535 # indicate to outside world if any FU is still executing
1536 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1538 # address of the next instruction, in the absence of a branch
1539 # depends on the instruction size
1542 # connect up debug signals
1543 with m
.If(core
.o
.core_terminate_o
):
1544 comb
+= dbg
.terminate_i
.eq(1)
1546 # pass the prefix mode from Fetch to Issue, so the latter can loop
1548 is_svp64_mode
= Signal()
1550 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1551 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1552 # these are the handshake signals between each
1554 # fetch FSM can run as soon as the PC is valid
1555 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1556 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1558 # fetch FSM hands over the instruction to be decoded / issued
1559 fetch_insn_o_valid
= Signal()
1560 fetch_insn_i_ready
= Signal()
1562 # predicate fetch FSM decodes and fetches the predicate
1563 pred_insn_i_valid
= Signal()
1564 pred_insn_o_ready
= Signal()
1566 # predicate fetch FSM delivers the masks
1567 pred_mask_o_valid
= Signal()
1568 pred_mask_i_ready
= Signal()
1570 # issue FSM delivers the instruction to the be executed
1571 exec_insn_i_valid
= Signal()
1572 exec_insn_o_ready
= Signal()
1574 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1575 exec_pc_o_valid
= Signal()
1576 exec_pc_i_ready
= Signal()
1578 # the FSMs here are perhaps unusual in that they detect conditions
1579 # then "hold" information, combinatorially, for the core
1580 # (as opposed to using sync - which would be on a clock's delay)
1581 # this includes the actual opcode, valid flags and so on.
1583 # Fetch, then predicate fetch, then Issue, then Execute.
1584 # Issue is where the VL for-loop # lives. the ready/valid
1585 # signalling is used to communicate between the four.
1587 self
.fetch_fsm(m
, dbg
, core
, nia
, is_svp64_mode
,
1588 fetch_pc_o_ready
, fetch_pc_i_valid
,
1589 fetch_insn_o_valid
, fetch_insn_i_ready
)
1591 self
.issue_fsm(m
, core
, nia
,
1592 dbg
, core_rst
, is_svp64_mode
,
1593 fetch_pc_o_ready
, fetch_pc_i_valid
,
1594 fetch_insn_o_valid
, fetch_insn_i_ready
,
1595 pred_insn_i_valid
, pred_insn_o_ready
,
1596 pred_mask_o_valid
, pred_mask_i_ready
,
1597 exec_insn_i_valid
, exec_insn_o_ready
,
1598 exec_pc_o_valid
, exec_pc_i_ready
)
1601 self
.fetch_predicate_fsm(m
,
1602 pred_insn_i_valid
, pred_insn_o_ready
,
1603 pred_mask_o_valid
, pred_mask_i_ready
)
1605 self
.execute_fsm(m
, core
,
1606 exec_insn_i_valid
, exec_insn_o_ready
,
1607 exec_pc_o_valid
, exec_pc_i_ready
)
1609 # whatever was done above, over-ride it if core reset is held
1610 with m
.If(core_rst
):
1616 class TestIssuer(Elaboratable
):
1617 def __init__(self
, pspec
):
1618 self
.ti
= TestIssuerInternal(pspec
)
1619 self
.pll
= DummyPLL(instance
=True)
1621 self
.dbg_rst_i
= Signal(reset_less
=True)
1623 # PLL direct clock or not
1624 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1626 self
.pll_test_o
= Signal(reset_less
=True)
1627 self
.pll_vco_o
= Signal(reset_less
=True)
1628 self
.clk_sel_i
= Signal(2, reset_less
=True)
1629 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1630 self
.pllclk_clk
= ClockSignal("pllclk")
1632 def elaborate(self
, platform
):
1636 # TestIssuer nominally runs at main clock, actually it is
1637 # all combinatorial internally except for coresync'd components
1638 m
.submodules
.ti
= ti
= self
.ti
1641 # ClockSelect runs at PLL output internal clock rate
1642 m
.submodules
.wrappll
= pll
= self
.pll
1644 # add clock domains from PLL
1645 cd_pll
= ClockDomain("pllclk")
1648 # PLL clock established. has the side-effect of running clklsel
1649 # at the PLL's speed (see DomainRenamer("pllclk") above)
1650 pllclk
= self
.pllclk_clk
1651 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1653 # wire up external 24mhz to PLL
1654 #comb += pll.clk_24_i.eq(self.ref_clk)
1655 # output 18 mhz PLL test signal, and analog oscillator out
1656 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1657 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1659 # input to pll clock selection
1660 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1662 # now wire up ResetSignals. don't mind them being in this domain
1663 pll_rst
= ResetSignal("pllclk")
1664 comb
+= pll_rst
.eq(ResetSignal())
1666 # internal clock is set to selector clock-out. has the side-effect of
1667 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1668 # debug clock runs at coresync internal clock
1669 if self
.ti
.dbg_domain
!= 'sync':
1670 cd_dbgsync
= ClockDomain("dbgsync")
1671 intclk
= ClockSignal(self
.ti
.core_domain
)
1672 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1673 # XXX BYPASS PLL XXX
1674 # XXX BYPASS PLL XXX
1675 # XXX BYPASS PLL XXX
1677 comb
+= intclk
.eq(self
.ref_clk
)
1678 assert self
.ti
.core_domain
!= 'sync', \
1679 "cannot set core_domain to sync and use pll at the same time"
1681 if self
.ti
.core_domain
!= 'sync':
1682 comb
+= intclk
.eq(ClockSignal())
1683 if self
.ti
.dbg_domain
!= 'sync':
1684 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1685 comb
+= dbgclk
.eq(intclk
)
1686 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1691 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1692 [ClockSignal(), ResetSignal()]
1694 def external_ports(self
):
1695 ports
= self
.ti
.external_ports()
1696 ports
.append(ClockSignal())
1697 ports
.append(ResetSignal())
1699 ports
.append(self
.clk_sel_i
)
1700 ports
.append(self
.pll
.clk_24_i
)
1701 ports
.append(self
.pll_test_o
)
1702 ports
.append(self
.pll_vco_o
)
1703 ports
.append(self
.pllclk_clk
)
1704 ports
.append(self
.ref_clk
)
1708 if __name__
== '__main__':
1709 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1715 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1716 imem_ifacetype
='bare_wb',
1721 dut
= TestIssuer(pspec
)
1722 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1724 if len(sys
.argv
) == 1:
1725 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1726 with
open("test_issuer.il", "w") as f
: