06bfcdf3de19d13c45a7c3be60d010ed16b04e1a
3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test if microwatt compatibility is to be enabled
169 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
170 (pspec
.microwatt_compat
== True))
171 self
.alt_reset
= Signal(reset_less
=True) # not connected yet (microwatt)
173 # test is SVP64 is to be enabled
174 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
176 # and if regfiles are reduced
177 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
178 (pspec
.regreduce
== True))
180 # and if overlap requested
181 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
182 (pspec
.allow_overlap
== True))
184 # and get the core domain
185 self
.core_domain
= "coresync"
186 if (hasattr(pspec
, "core_domain") and
187 isinstance(pspec
.core_domain
, str)):
188 self
.core_domain
= pspec
.core_domain
190 # JTAG interface. add this right at the start because if it's
191 # added it *modifies* the pspec, by adding enable/disable signals
192 # for parts of the rest of the core
193 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
194 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
195 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
197 # XXX MUST keep this up-to-date with litex, and
198 # soc-cocotb-sim, and err.. all needs sorting out, argh
201 'eint', 'gpio', 'mspi0',
202 # 'mspi1', - disabled for now
203 # 'pwm', 'sd0', - disabled for now
205 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
206 domain
=self
.dbg_domain
)
207 # add signals to pspec to enable/disable icache and dcache
208 # (or data and intstruction wishbone if icache/dcache not included)
209 # https://bugs.libre-soc.org/show_bug.cgi?id=520
210 # TODO: do we actually care if these are not domain-synchronised?
211 # honestly probably not.
212 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
213 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
214 self
.wb_sram_en
= self
.jtag
.wb_sram_en
216 self
.wb_sram_en
= Const(1)
218 # add 4k sram blocks?
219 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
220 pspec
.sram4x4kblock
== True)
224 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
228 # add interrupt controller?
229 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
231 self
.xics_icp
= XICS_ICP()
232 self
.xics_ics
= XICS_ICS()
233 self
.int_level_i
= self
.xics_ics
.int_level_i
235 self
.ext_irq
= Signal()
237 # add GPIO peripheral?
238 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
240 self
.simple_gpio
= SimpleGPIO()
241 self
.gpio_o
= self
.simple_gpio
.gpio_o
243 # main instruction core. suitable for prototyping / demo only
244 self
.core
= core
= NonProductionCore(pspec
)
245 self
.core_rst
= ResetSignal(self
.core_domain
)
247 # instruction decoder. goes into Trap Record
248 #pdecode = create_pdecode()
249 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
250 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
251 opkls
=IssuerDecode2ToOperand
,
252 svp64_en
=self
.svp64_en
,
253 regreduce_en
=self
.regreduce_en
)
254 pdecode
= self
.pdecode2
.dec
257 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
259 self
.update_svstate
= Signal() # set this if updating svstate
260 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
262 # Test Instruction memory
263 if hasattr(core
, "icache"):
264 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
265 # truly dreadful. needs a huge reorg.
266 pspec
.icache
= core
.icache
267 self
.imem
= ConfigFetchUnit(pspec
).fu
270 self
.dbg
= CoreDebug()
271 self
.dbg_rst_i
= Signal(reset_less
=True)
273 # instruction go/monitor
274 self
.pc_o
= Signal(64, reset_less
=True)
275 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
276 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
277 self
.svstate_i
= Data(64, "svstate_i") # ditto
278 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
279 self
.busy_o
= Signal(reset_less
=True)
280 self
.memerr_o
= Signal(reset_less
=True)
282 # STATE regfile read /write ports for PC, MSR, SVSTATE
283 staterf
= self
.core
.regs
.rf
['state']
284 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
285 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
286 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
288 self
.state_w_msr
= staterf
.w_ports
['d_wr2'] # MSR wr
289 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
290 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
292 # DMI interface access
293 intrf
= self
.core
.regs
.rf
['int']
294 fastrf
= self
.core
.regs
.rf
['fast']
295 crrf
= self
.core
.regs
.rf
['cr']
296 xerrf
= self
.core
.regs
.rf
['xer']
297 self
.int_r
= intrf
.r_ports
['dmi'] # INT DMI read
298 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR DMI read
299 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER DMI read
300 self
.fast_r
= fastrf
.r_ports
['dmi'] # FAST DMI read
304 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
305 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
307 # hack method of keeping an eye on whether branch/trap set the PC
308 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
309 self
.state_nia
.wen
.name
= 'state_nia_wen'
310 # and whether SPR pipeline sets DEC or TB
311 self
.state_spr
= self
.core
.regs
.rf
['state'].w_ports
['state1']
313 # pulse to synchronize the simulator at instruction end
314 self
.insn_done
= Signal()
316 # indicate any instruction still outstanding, in execution
317 self
.any_busy
= Signal()
320 # store copies of predicate masks
321 self
.srcmask
= Signal(64)
322 self
.dstmask
= Signal(64)
324 # sigh, the wishbone addresses are not wishbone-compliant in microwatt
325 if self
.microwatt_compat
:
326 self
.ibus_adr
= Signal(32, name
='wishbone_insn_out.adr')
327 self
.dbus_adr
= Signal(32, name
='wishbone_data_out.adr')
329 # add an output of the PC and instruction, and whether it was requested
330 # this is for verilator debug purposes
331 if self
.microwatt_compat
:
332 self
.nia
= Signal(64)
333 self
.msr_o
= Signal(64)
334 self
.nia_req
= Signal(1)
335 self
.insn
= Signal(32)
336 self
.ldst_req
= Signal(1)
337 self
.ldst_addr
= Signal(1)
339 # for pausing dec/tb during an SPR pipeline event, this
340 # ensures that an SPR write (mtspr) to TB or DEC does not
341 # get overwritten by the DEC/TB FSM
342 self
.pause_dec_tb
= Signal()
344 def setup_peripherals(self
, m
):
345 comb
, sync
= m
.d
.comb
, m
.d
.sync
347 # okaaaay so the debug module must be in coresync clock domain
348 # but NOT its reset signal. to cope with this, set every single
349 # submodule explicitly in coresync domain, debug and JTAG
350 # in their own one but using *external* reset.
351 csd
= DomainRenamer(self
.core_domain
)
352 dbd
= DomainRenamer(self
.dbg_domain
)
354 if self
.microwatt_compat
:
355 m
.submodules
.core
= core
= self
.core
357 m
.submodules
.core
= core
= csd(self
.core
)
359 # this _so_ needs sorting out. ICache is added down inside
360 # LoadStore1 and is already a submodule of LoadStore1
361 if not isinstance(self
.imem
, ICache
):
362 m
.submodules
.imem
= imem
= csd(self
.imem
)
364 # set up JTAG Debug Module (in correct domain)
365 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
367 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
368 # TODO: UART2GDB mux, here, from external pin
369 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
370 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
372 # fixup the clocks in microwatt-compat mode (but leave resets alone
373 # so that microwatt soc.vhdl can pull a reset on the core or DMI
374 # can do it, just like in TestIssuer)
375 if self
.microwatt_compat
:
376 intclk
= ClockSignal(self
.core_domain
)
377 dbgclk
= ClockSignal(self
.dbg_domain
)
378 if self
.core_domain
!= 'sync':
379 comb
+= intclk
.eq(ClockSignal())
380 if self
.dbg_domain
!= 'sync':
381 comb
+= dbgclk
.eq(ClockSignal())
383 # drop the first 3 bits of the incoming wishbone addresses
384 # this can go if using later versions of microwatt (not now)
385 if self
.microwatt_compat
:
386 ibus
= self
.imem
.ibus
387 dbus
= self
.core
.l0
.cmpi
.wb_bus()
388 comb
+= self
.ibus_adr
.eq(Cat(Const(0, 3), ibus
.adr
))
389 comb
+= self
.dbus_adr
.eq(Cat(Const(0, 3), dbus
.adr
))
390 # microwatt verilator debug purposes
391 pi
= self
.core
.l0
.cmpi
.pi
.pi
392 comb
+= self
.ldst_req
.eq(pi
.addr_ok_o
)
393 comb
+= self
.ldst_addr
.eq(pi
.addr
)
395 cur_state
= self
.cur_state
397 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
399 for i
, sram
in enumerate(self
.sram4k
):
400 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
401 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
403 # XICS interrupt handler
405 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
406 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
407 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
408 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
410 sync
+= cur_state
.eint
.eq(self
.ext_irq
) # connect externally
412 # GPIO test peripheral
414 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
416 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
417 # XXX causes litex ECP5 test to get wrong idea about input and output
418 # (but works with verilator sim *sigh*)
419 # if self.gpio and self.xics:
420 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
422 # instruction decoder
423 pdecode
= create_pdecode()
424 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
426 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
428 # clock delay power-on reset
429 cd_por
= ClockDomain(reset_less
=True)
430 cd_sync
= ClockDomain()
431 m
.domains
+= cd_por
, cd_sync
432 core_sync
= ClockDomain(self
.core_domain
)
433 if self
.core_domain
!= "sync":
434 m
.domains
+= core_sync
435 if self
.dbg_domain
!= "sync":
436 dbg_sync
= ClockDomain(self
.dbg_domain
)
437 m
.domains
+= dbg_sync
439 ti_rst
= Signal(reset_less
=True)
440 delay
= Signal(range(4), reset
=3)
441 with m
.If(delay
!= 0):
442 m
.d
.por
+= delay
.eq(delay
- 1)
443 comb
+= cd_por
.clk
.eq(ClockSignal())
445 # power-on reset delay
446 core_rst
= ResetSignal(self
.core_domain
)
447 if self
.core_domain
!= "sync":
448 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
449 comb
+= core_rst
.eq(ti_rst
)
451 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
452 comb
+= core_rst
.eq(1)
454 # connect external reset signal to DMI Reset
455 if self
.dbg_domain
!= "sync":
456 dbg_rst
= ResetSignal(self
.dbg_domain
)
457 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
459 # busy/halted signals from core
460 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
461 comb
+= self
.busy_o
.eq(core_busy_o
)
462 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
464 # temporary hack: says "go" immediately for both address gen and ST
466 ldst
= core
.fus
.fus
['ldst0']
467 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
468 # link addr-go direct to rel
469 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
470 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
472 def do_dmi(self
, m
, dbg
):
473 """deals with DMI debug requests
475 currently only provides read requests for the INT regfile, CR and XER
476 it will later also deal with *writing* to these regfiles.
480 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
482 intrf
= self
.core
.regs
.rf
['int']
483 fastrf
= self
.core
.regs
.rf
['fast']
485 with m
.If(d_reg
.req
): # request for regfile access being made
486 # TODO: error-check this
487 # XXX should this be combinatorial? sync better?
489 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
491 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
492 comb
+= self
.int_r
.ren
.eq(1)
493 d_reg_delay
= Signal()
494 sync
+= d_reg_delay
.eq(d_reg
.req
)
495 with m
.If(d_reg_delay
):
496 # data arrives one clock later
497 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
498 comb
+= d_reg
.ack
.eq(1)
501 with m
.If(d_fast
.req
): # request for regfile access being made
503 comb
+= self
.fast_r
.ren
.eq(1 << d_fast
.addr
)
505 comb
+= self
.fast_r
.addr
.eq(d_fast
.addr
)
506 comb
+= self
.fast_r
.ren
.eq(1)
507 d_fast_delay
= Signal()
508 sync
+= d_fast_delay
.eq(d_fast
.req
)
509 with m
.If(d_fast_delay
):
510 # data arrives one clock later
511 comb
+= d_fast
.data
.eq(self
.fast_r
.o_data
)
512 comb
+= d_fast
.ack
.eq(1)
514 # sigh same thing for CR debug
515 with m
.If(d_cr
.req
): # request for regfile access being made
516 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
517 d_cr_delay
= Signal()
518 sync
+= d_cr_delay
.eq(d_cr
.req
)
519 with m
.If(d_cr_delay
):
520 # data arrives one clock later
521 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
522 comb
+= d_cr
.ack
.eq(1)
525 with m
.If(d_xer
.req
): # request for regfile access being made
526 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
527 d_xer_delay
= Signal()
528 sync
+= d_xer_delay
.eq(d_xer
.req
)
529 with m
.If(d_xer_delay
):
530 # data arrives one clock later
531 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
532 comb
+= d_xer
.ack
.eq(1)
534 def tb_dec_fsm(self
, m
, spr_dec
):
537 this is a FSM for updating either dec or tb. it runs alternately
538 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
539 value to DEC, however the regfile has "passthrough" on it so this
542 see v3.0B p1097-1099 for Timer Resource and p1065 and p1076
545 comb
, sync
= m
.d
.comb
, m
.d
.sync
546 state_rf
= self
.core
.regs
.rf
['state']
547 state_r_dectb
= state_rf
.r_ports
['issue'] # DEC/TB
548 state_w_dectb
= state_rf
.w_ports
['issue'] # DEC/TB
552 # initiates read of current DEC
553 with m
.State("DEC_READ"):
554 comb
+= state_r_dectb
.ren
.eq(1<<StateRegs
.DEC
)
555 with m
.If(~self
.pause_dec_tb
):
558 # waits for DEC read to arrive (1 cycle), updates with new value
559 # respects if dec/tb writing has been paused
560 with m
.State("DEC_WRITE"):
561 with m
.If(self
.pause_dec_tb
):
562 # if paused, return to reading
566 # TODO: MSR.LPCR 32-bit decrement mode
567 comb
+= new_dec
.eq(state_r_dectb
.o_data
- 1)
568 comb
+= state_w_dectb
.wen
.eq(1<<StateRegs
.DEC
)
569 comb
+= state_w_dectb
.i_data
.eq(new_dec
)
570 # copy to cur_state for decoder, for an interrupt
571 sync
+= spr_dec
.eq(new_dec
)
574 # initiates read of current TB
575 with m
.State("TB_READ"):
576 comb
+= state_r_dectb
.ren
.eq(1<<StateRegs
.TB
)
577 with m
.If(~self
.pause_dec_tb
):
580 # waits for read TB to arrive, initiates write of current TB
581 # respects if dec/tb writing has been paused
582 with m
.State("TB_WRITE"):
583 with m
.If(self
.pause_dec_tb
):
584 # if paused, return to reading
588 comb
+= new_tb
.eq(state_r_dectb
.o_data
+ 1)
589 comb
+= state_w_dectb
.wen
.eq(1<<StateRegs
.TB
)
590 comb
+= state_w_dectb
.i_data
.eq(new_tb
)
595 def elaborate(self
, platform
):
598 comb
, sync
= m
.d
.comb
, m
.d
.sync
599 cur_state
= self
.cur_state
600 pdecode2
= self
.pdecode2
603 # set up peripherals and core
604 core_rst
= self
.core_rst
605 self
.setup_peripherals(m
)
607 # reset current state if core reset requested
609 m
.d
.sync
+= self
.cur_state
.eq(0)
611 # check halted condition: requested PC to execute matches DMI stop addr
612 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
615 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
617 comb
+= dbg
.core_stopped_i
.eq(1)
618 comb
+= dbg
.terminate_i
.eq(1)
620 # PC and instruction from I-Memory
621 comb
+= self
.pc_o
.eq(cur_state
.pc
)
622 self
.pc_changed
= Signal() # note write to PC
623 self
.msr_changed
= Signal() # note write to MSR
624 self
.sv_changed
= Signal() # note write to SVSTATE
626 # read state either from incoming override or from regfile
627 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
628 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
630 self
.state_r_msr
, StateRegs
.MSR
)
631 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
633 self
.state_r_pc
, StateRegs
.PC
)
634 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
635 "svstate", # read SVSTATE
636 self
.state_r_sv
, StateRegs
.SVSTATE
)
638 # don't write pc every cycle
639 comb
+= self
.state_w_pc
.wen
.eq(0)
640 comb
+= self
.state_w_pc
.i_data
.eq(0)
642 # connect up debug state. note "combinatorially same" below,
643 # this is a bit naff, passing state over in the dbg class, but
644 # because it is combinatorial it achieves the desired goal
645 comb
+= dbg
.state
.eq(state
)
647 # this bit doesn't have to be in the FSM: connect up to read
648 # regfiles on demand from DMI
651 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
652 # (which uses that in PowerDecoder2 to raise 0x900 exception)
653 self
.tb_dec_fsm(m
, cur_state
.dec
)
655 # while stopped, allow updating the MSR, PC and SVSTATE.
656 # these are mainly for debugging purposes (including DMI/JTAG)
657 with m
.If(dbg
.core_stopped_i
):
658 with m
.If(self
.pc_i
.ok
):
659 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
660 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
661 sync
+= self
.pc_changed
.eq(1)
662 with m
.If(self
.msr_i
.ok
):
663 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
664 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
665 sync
+= self
.msr_changed
.eq(1)
666 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
667 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
668 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
669 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
670 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
671 sync
+= self
.sv_changed
.eq(1)
673 # start renaming some of the ports to match microwatt
674 if self
.microwatt_compat
:
675 self
.core
.o
.core_terminate_o
.name
= "terminated_out"
676 # names of DMI interface
677 self
.dbg
.dmi
.addr_i
.name
= 'dmi_addr'
678 self
.dbg
.dmi
.din
.name
= 'dmi_din'
679 self
.dbg
.dmi
.dout
.name
= 'dmi_dout'
680 self
.dbg
.dmi
.req_i
.name
= 'dmi_req'
681 self
.dbg
.dmi
.we_i
.name
= 'dmi_wr'
682 self
.dbg
.dmi
.ack_o
.name
= 'dmi_ack'
683 # wishbone instruction bus
684 ibus
= self
.imem
.ibus
685 ibus
.adr
.name
= 'wishbone_insn_out.adr'
686 ibus
.dat_w
.name
= 'wishbone_insn_out.dat'
687 ibus
.sel
.name
= 'wishbone_insn_out.sel'
688 ibus
.cyc
.name
= 'wishbone_insn_out.cyc'
689 ibus
.stb
.name
= 'wishbone_insn_out.stb'
690 ibus
.we
.name
= 'wishbone_insn_out.we'
691 ibus
.dat_r
.name
= 'wishbone_insn_in.dat'
692 ibus
.ack
.name
= 'wishbone_insn_in.ack'
693 ibus
.stall
.name
= 'wishbone_insn_in.stall'
695 dbus
= self
.core
.l0
.cmpi
.wb_bus()
696 dbus
.adr
.name
= 'wishbone_data_out.adr'
697 dbus
.dat_w
.name
= 'wishbone_data_out.dat'
698 dbus
.sel
.name
= 'wishbone_data_out.sel'
699 dbus
.cyc
.name
= 'wishbone_data_out.cyc'
700 dbus
.stb
.name
= 'wishbone_data_out.stb'
701 dbus
.we
.name
= 'wishbone_data_out.we'
702 dbus
.dat_r
.name
= 'wishbone_data_in.dat'
703 dbus
.ack
.name
= 'wishbone_data_in.ack'
704 dbus
.stall
.name
= 'wishbone_data_in.stall'
709 yield from self
.pc_i
.ports()
710 yield from self
.msr_i
.ports()
713 yield from self
.core
.ports()
714 yield from self
.imem
.ports()
715 yield self
.core_bigendian_i
721 def external_ports(self
):
722 if self
.microwatt_compat
:
723 ports
= [self
.core
.o
.core_terminate_o
,
725 self
.alt_reset
, # not connected yet
726 self
.nia
, self
.insn
, self
.nia_req
, self
.msr_o
,
727 self
.ldst_req
, self
.ldst_addr
,
731 ports
+= list(self
.dbg
.dmi
.ports())
732 # for dbus/ibus microwatt, exclude err btw and cti
733 for name
, sig
in self
.imem
.ibus
.fields
.items():
734 if name
not in ['err', 'bte', 'cti', 'adr']:
736 for name
, sig
in self
.core
.l0
.cmpi
.wb_bus().fields
.items():
737 if name
not in ['err', 'bte', 'cti', 'adr']:
739 # microwatt non-compliant with wishbone
740 ports
.append(self
.ibus_adr
)
741 ports
.append(self
.dbus_adr
)
744 ports
= self
.pc_i
.ports()
745 ports
= self
.msr_i
.ports()
746 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
750 ports
+= list(self
.jtag
.external_ports())
752 # don't add DMI if JTAG is enabled
753 ports
+= list(self
.dbg
.dmi
.ports())
755 ports
+= list(self
.imem
.ibus
.fields
.values())
756 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
759 for sram
in self
.sram4k
:
760 ports
+= list(sram
.bus
.fields
.values())
763 ports
+= list(self
.xics_icp
.bus
.fields
.values())
764 ports
+= list(self
.xics_ics
.bus
.fields
.values())
765 ports
.append(self
.int_level_i
)
767 ports
.append(self
.ext_irq
)
770 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
771 ports
.append(self
.gpio_o
)
779 class TestIssuerInternal(TestIssuerBase
):
780 """TestIssuer - reads instructions from TestMemory and issues them
782 efficiency and speed is not the main goal here: functional correctness
783 and code clarity is. optimisations (which almost 100% interfere with
784 easy understanding) come later.
787 def fetch_fsm(self
, m
, dbg
, core
, pc
, msr
, svstate
, nia
, is_svp64_mode
,
788 fetch_pc_o_ready
, fetch_pc_i_valid
,
789 fetch_insn_o_valid
, fetch_insn_i_ready
):
792 this FSM performs fetch of raw instruction data, partial-decodes
793 it 32-bit at a time to detect SVP64 prefixes, and will optionally
794 read a 2nd 32-bit quantity if that occurs.
798 pdecode2
= self
.pdecode2
799 cur_state
= self
.cur_state
800 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
802 # also note instruction fetch failed
803 if hasattr(core
, "icache"):
804 fetch_failed
= core
.icache
.i_out
.fetch_failed
807 fetch_failed
= Const(0, 1)
810 # set priv / virt mode on I-Cache, sigh
811 if isinstance(self
.imem
, ICache
):
812 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
813 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
815 with m
.FSM(name
='fetch_fsm'):
818 with m
.State("IDLE"):
819 # fetch allowed if not failed and stopped but not stepping
820 # (see dmi.py for how core_stop_o is generated)
821 with m
.If(~fetch_failed
& ~dbg
.core_stop_o
):
822 comb
+= fetch_pc_o_ready
.eq(1)
823 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
825 # instruction allowed to go: start by reading the PC
826 # capture the PC and also drop it into Insn Memory
827 # we have joined a pair of combinatorial memory
828 # lookups together. this is Generally Bad.
829 comb
+= self
.imem
.a_pc_i
.eq(pc
)
830 comb
+= self
.imem
.a_i_valid
.eq(1)
831 comb
+= self
.imem
.f_i_valid
.eq(1)
832 # transfer state to output
833 sync
+= cur_state
.pc
.eq(pc
)
834 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
835 sync
+= cur_state
.msr
.eq(msr
) # and msr
837 m
.next
= "INSN_READ" # move to "wait for bus" phase
839 # dummy pause to find out why simulation is not keeping up
840 with m
.State("INSN_READ"):
841 # when using "single-step" mode, checking dbg.stopping_o
842 # prevents progress. allow fetch to proceed once started
844 #if self.allow_overlap:
845 # stopping = dbg.stopping_o
847 # stopping: jump back to idle
850 with m
.If(self
.imem
.f_busy_o
&
851 ~pdecode2
.instr_fault
): # zzz...
852 # busy but not fetch failed: stay in wait-read
853 comb
+= self
.imem
.a_pc_i
.eq(pc
)
854 comb
+= self
.imem
.a_i_valid
.eq(1)
855 comb
+= self
.imem
.f_i_valid
.eq(1)
857 # not busy (or fetch failed!): instruction fetched
858 # when fetch failed, the instruction gets ignored
860 if hasattr(core
, "icache"):
861 # blech, icache returns actual instruction
862 insn
= self
.imem
.f_instr_o
864 # but these return raw memory
865 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
868 # decode the SVP64 prefix, if any
869 comb
+= svp64
.raw_opcode_in
.eq(insn
)
870 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
871 # pass the decoded prefix (if any) to PowerDecoder2
872 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
873 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
874 # remember whether this is a prefixed instruction,
875 # so the FSM can readily loop when VL==0
876 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
877 # calculate the address of the following instruction
878 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
879 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
880 with m
.If(~svp64
.is_svp64_mode
):
881 # with no prefix, store the instruction
882 # and hand it directly to the next FSM
883 sync
+= dec_opcode_i
.eq(insn
)
884 m
.next
= "INSN_READY"
886 # fetch the rest of the instruction from memory
887 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
888 comb
+= self
.imem
.a_i_valid
.eq(1)
889 comb
+= self
.imem
.f_i_valid
.eq(1)
890 m
.next
= "INSN_READ2"
892 # not SVP64 - 32-bit only
893 sync
+= nia
.eq(cur_state
.pc
+ 4)
894 sync
+= dec_opcode_i
.eq(insn
)
895 if self
.microwatt_compat
:
896 # for verilator debug purposes
897 comb
+= self
.insn
.eq(insn
)
898 comb
+= self
.nia
.eq(cur_state
.pc
)
899 comb
+= self
.msr_o
.eq(cur_state
.msr
)
900 comb
+= self
.nia_req
.eq(1)
901 m
.next
= "INSN_READY"
903 with m
.State("INSN_READ2"):
904 with m
.If(self
.imem
.f_busy_o
): # zzz...
905 # busy: stay in wait-read
906 comb
+= self
.imem
.a_i_valid
.eq(1)
907 comb
+= self
.imem
.f_i_valid
.eq(1)
909 # not busy: instruction fetched
910 if hasattr(core
, "icache"):
911 # blech, icache returns actual instruction
912 insn
= self
.imem
.f_instr_o
914 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
915 sync
+= dec_opcode_i
.eq(insn
)
916 m
.next
= "INSN_READY"
917 # TODO: probably can start looking at pdecode2.rm_dec
918 # here or maybe even in INSN_READ state, if svp64_mode
919 # detected, in order to trigger - and wait for - the
922 pmode
= pdecode2
.rm_dec
.predmode
924 if pmode != SVP64PredMode.ALWAYS.value:
925 fire predicate loading FSM and wait before
928 sync += self.srcmask.eq(-1) # set to all 1s
929 sync += self.dstmask.eq(-1) # set to all 1s
930 m.next = "INSN_READY"
933 with m
.State("INSN_READY"):
934 # hand over the instruction, to be decoded
935 comb
+= fetch_insn_o_valid
.eq(1)
936 with m
.If(fetch_insn_i_ready
):
940 def fetch_predicate_fsm(self
, m
,
941 pred_insn_i_valid
, pred_insn_o_ready
,
942 pred_mask_o_valid
, pred_mask_i_ready
):
943 """fetch_predicate_fsm - obtains (constructs in the case of CR)
944 src/dest predicate masks
946 https://bugs.libre-soc.org/show_bug.cgi?id=617
947 the predicates can be read here, by using IntRegs r_ports['pred']
948 or CRRegs r_ports['pred']. in the case of CRs it will have to
949 be done through multiple reads, extracting one relevant at a time.
950 later, a faster way would be to use the 32-bit-wide CR port but
951 this is more complex decoding, here. equivalent code used in
952 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
954 note: this ENTIRE FSM is not to be called when svp64 is disabled
958 pdecode2
= self
.pdecode2
959 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
960 predmode
= rm_dec
.predmode
961 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
962 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
963 # get src/dst step, so we can skip already used mask bits
964 cur_state
= self
.cur_state
965 srcstep
= cur_state
.svstate
.srcstep
966 dststep
= cur_state
.svstate
.dststep
967 cur_vl
= cur_state
.svstate
.vl
970 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
971 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
972 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
973 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
975 # store fetched masks, for either intpred or crpred
976 # when src/dst step is not zero, the skipped mask bits need to be
977 # shifted-out, before actually storing them in src/dest mask
978 new_srcmask
= Signal(64, reset_less
=True)
979 new_dstmask
= Signal(64, reset_less
=True)
981 with m
.FSM(name
="fetch_predicate"):
983 with m
.State("FETCH_PRED_IDLE"):
984 comb
+= pred_insn_o_ready
.eq(1)
985 with m
.If(pred_insn_i_valid
):
986 with m
.If(predmode
== SVP64PredMode
.INT
):
987 # skip fetching destination mask register, when zero
989 sync
+= new_dstmask
.eq(-1)
990 # directly go to fetch source mask register
991 # guaranteed not to be zero (otherwise predmode
992 # would be SVP64PredMode.ALWAYS, not INT)
993 comb
+= int_pred
.addr
.eq(sregread
)
994 comb
+= int_pred
.ren
.eq(1)
995 m
.next
= "INT_SRC_READ"
996 # fetch destination predicate register
998 comb
+= int_pred
.addr
.eq(dregread
)
999 comb
+= int_pred
.ren
.eq(1)
1000 m
.next
= "INT_DST_READ"
1001 with m
.Elif(predmode
== SVP64PredMode
.CR
):
1002 # go fetch masks from the CR register file
1003 sync
+= new_srcmask
.eq(0)
1004 sync
+= new_dstmask
.eq(0)
1007 sync
+= self
.srcmask
.eq(-1)
1008 sync
+= self
.dstmask
.eq(-1)
1009 m
.next
= "FETCH_PRED_DONE"
1011 with m
.State("INT_DST_READ"):
1012 # store destination mask
1013 inv
= Repl(dinvert
, 64)
1015 # set selected mask bit for 1<<r3 mode
1016 dst_shift
= Signal(range(64))
1017 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1018 sync
+= new_dstmask
.eq(1 << dst_shift
)
1020 # invert mask if requested
1021 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
1022 # skip fetching source mask register, when zero
1024 sync
+= new_srcmask
.eq(-1)
1025 m
.next
= "FETCH_PRED_SHIFT_MASK"
1026 # fetch source predicate register
1028 comb
+= int_pred
.addr
.eq(sregread
)
1029 comb
+= int_pred
.ren
.eq(1)
1030 m
.next
= "INT_SRC_READ"
1032 with m
.State("INT_SRC_READ"):
1034 inv
= Repl(sinvert
, 64)
1036 # set selected mask bit for 1<<r3 mode
1037 src_shift
= Signal(range(64))
1038 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
1039 sync
+= new_srcmask
.eq(1 << src_shift
)
1041 # invert mask if requested
1042 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
1043 m
.next
= "FETCH_PRED_SHIFT_MASK"
1045 # fetch masks from the CR register file
1046 # implements the following loop:
1047 # idx, inv = get_predcr(mask)
1049 # for cr_idx in range(vl):
1050 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
1052 # mask |= 1 << cr_idx
1054 with m
.State("CR_READ"):
1055 # CR index to be read, which will be ready by the next cycle
1056 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
1057 # submit the read operation to the regfile
1058 with m
.If(cr_idx
!= cur_vl
):
1059 # the CR read port is unary ...
1061 # ... in MSB0 convention ...
1062 # ren = 1 << (7 - cr_idx)
1063 # ... and with an offset:
1064 # ren = 1 << (7 - off - cr_idx)
1065 idx
= SVP64CROffs
.CRPred
+ cr_idx
1066 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
1067 # signal data valid in the next cycle
1068 cr_read
= Signal(reset_less
=True)
1069 sync
+= cr_read
.eq(1)
1070 # load the next index
1071 sync
+= cr_idx
.eq(cr_idx
+ 1)
1074 sync
+= cr_read
.eq(0)
1075 sync
+= cr_idx
.eq(0)
1076 m
.next
= "FETCH_PRED_SHIFT_MASK"
1078 # compensate for the one cycle delay on the regfile
1079 cur_cr_idx
= Signal
.like(cur_vl
)
1080 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
1081 # read the CR field, select the appropriate bit
1082 cr_field
= Signal(4)
1085 comb
+= cr_field
.eq(cr_pred
.o_data
)
1086 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
1088 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
1090 # set the corresponding mask bit
1091 bit_to_set
= Signal
.like(self
.srcmask
)
1092 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1094 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1096 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1098 with m
.State("FETCH_PRED_SHIFT_MASK"):
1099 # shift-out skipped mask bits
1100 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1101 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1102 m
.next
= "FETCH_PRED_DONE"
1104 with m
.State("FETCH_PRED_DONE"):
1105 comb
+= pred_mask_o_valid
.eq(1)
1106 with m
.If(pred_mask_i_ready
):
1107 m
.next
= "FETCH_PRED_IDLE"
1109 def issue_fsm(self
, m
, core
, nia
,
1110 dbg
, core_rst
, is_svp64_mode
,
1111 fetch_pc_o_ready
, fetch_pc_i_valid
,
1112 fetch_insn_o_valid
, fetch_insn_i_ready
,
1113 pred_insn_i_valid
, pred_insn_o_ready
,
1114 pred_mask_o_valid
, pred_mask_i_ready
,
1115 exec_insn_i_valid
, exec_insn_o_ready
,
1116 exec_pc_o_valid
, exec_pc_i_ready
):
1119 decode / issue FSM. this interacts with the "fetch" FSM
1120 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1121 (outgoing). also interacts with the "execute" FSM
1122 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1124 SVP64 RM prefixes have already been set up by the
1125 "fetch" phase, so execute is fairly straightforward.
1130 pdecode2
= self
.pdecode2
1131 cur_state
= self
.cur_state
1132 new_svstate
= self
.new_svstate
1135 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1137 # for updating svstate (things like srcstep etc.)
1138 comb
+= new_svstate
.eq(cur_state
.svstate
)
1140 # precalculate srcstep+1 and dststep+1
1141 cur_srcstep
= cur_state
.svstate
.srcstep
1142 cur_dststep
= cur_state
.svstate
.dststep
1143 next_srcstep
= Signal
.like(cur_srcstep
)
1144 next_dststep
= Signal
.like(cur_dststep
)
1145 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1146 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1148 # note if an exception happened. in a pipelined or OoO design
1149 # this needs to be accompanied by "shadowing" (or stalling)
1150 exc_happened
= self
.core
.o
.exc_happened
1151 # also note instruction fetch failed
1152 if hasattr(core
, "icache"):
1153 fetch_failed
= core
.icache
.i_out
.fetch_failed
1155 # set to fault in decoder
1156 # update (highest priority) instruction fault
1157 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1158 with m
.If(rising_fetch_failed
):
1159 sync
+= pdecode2
.instr_fault
.eq(1)
1161 fetch_failed
= Const(0, 1)
1162 flush_needed
= False
1164 with m
.FSM(name
="issue_fsm"):
1166 # sync with the "fetch" phase which is reading the instruction
1167 # at this point, there is no instruction running, that
1168 # could inadvertently update the PC.
1169 with m
.State("ISSUE_START"):
1170 # reset instruction fault
1171 sync
+= pdecode2
.instr_fault
.eq(0)
1172 # wait on "core stop" release, before next fetch
1173 # need to do this here, in case we are in a VL==0 loop
1174 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1175 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1176 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1177 m
.next
= "INSN_WAIT"
1179 # tell core it's stopped, and acknowledge debug handshake
1180 comb
+= dbg
.core_stopped_i
.eq(1)
1181 # while stopped, allow updating SVSTATE
1182 with m
.If(self
.svstate_i
.ok
):
1183 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1184 comb
+= self
.update_svstate
.eq(1)
1185 sync
+= self
.sv_changed
.eq(1)
1187 # wait for an instruction to arrive from Fetch
1188 with m
.State("INSN_WAIT"):
1189 # when using "single-step" mode, checking dbg.stopping_o
1190 # prevents progress. allow issue to proceed once started
1192 #if self.allow_overlap:
1193 # stopping = dbg.stopping_o
1194 with m
.If(stopping
):
1195 # stopping: jump back to idle
1196 m
.next
= "ISSUE_START"
1198 # request the icache to stop asserting "failed"
1199 comb
+= core
.icache
.flush_in
.eq(1)
1200 # stop instruction fault
1201 sync
+= pdecode2
.instr_fault
.eq(0)
1203 comb
+= fetch_insn_i_ready
.eq(1)
1204 with m
.If(fetch_insn_o_valid
):
1205 # loop into ISSUE_START if it's a SVP64 instruction
1206 # and VL == 0. this because VL==0 is a for-loop
1207 # from 0 to 0 i.e. always, always a NOP.
1208 cur_vl
= cur_state
.svstate
.vl
1209 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1210 # update the PC before fetching the next instruction
1211 # since we are in a VL==0 loop, no instruction was
1212 # executed that we could be overwriting
1213 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1214 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1215 comb
+= self
.insn_done
.eq(1)
1216 m
.next
= "ISSUE_START"
1219 m
.next
= "PRED_START" # fetching predicate
1221 m
.next
= "DECODE_SV" # skip predication
1223 with m
.State("PRED_START"):
1224 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1225 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1226 m
.next
= "MASK_WAIT"
1228 with m
.State("MASK_WAIT"):
1229 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1230 with m
.If(pred_mask_o_valid
): # predication masks are ready
1231 m
.next
= "PRED_SKIP"
1233 # skip zeros in predicate
1234 with m
.State("PRED_SKIP"):
1235 with m
.If(~is_svp64_mode
):
1236 m
.next
= "DECODE_SV" # nothing to do
1239 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1240 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1242 # new srcstep, after skipping zeros
1243 skip_srcstep
= Signal
.like(cur_srcstep
)
1244 # value to be added to the current srcstep
1245 src_delta
= Signal
.like(cur_srcstep
)
1246 # add leading zeros to srcstep, if not in zero mode
1247 with m
.If(~pred_src_zero
):
1248 # priority encoder (count leading zeros)
1249 # append guard bit, in case the mask is all zeros
1250 pri_enc_src
= PriorityEncoder(65)
1251 m
.submodules
.pri_enc_src
= pri_enc_src
1252 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1254 comb
+= src_delta
.eq(pri_enc_src
.o
)
1255 # apply delta to srcstep
1256 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1257 # shift-out all leading zeros from the mask
1258 # plus the leading "one" bit
1259 # TODO count leading zeros and shift-out the zero
1260 # bits, in the same step, in hardware
1261 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1263 # same as above, but for dststep
1264 skip_dststep
= Signal
.like(cur_dststep
)
1265 dst_delta
= Signal
.like(cur_dststep
)
1266 with m
.If(~pred_dst_zero
):
1267 pri_enc_dst
= PriorityEncoder(65)
1268 m
.submodules
.pri_enc_dst
= pri_enc_dst
1269 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1271 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1272 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1273 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1275 # TODO: initialize mask[VL]=1 to avoid passing past VL
1276 with m
.If((skip_srcstep
>= cur_vl
) |
1277 (skip_dststep
>= cur_vl
)):
1278 # end of VL loop. Update PC and reset src/dst step
1279 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1280 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1281 comb
+= new_svstate
.srcstep
.eq(0)
1282 comb
+= new_svstate
.dststep
.eq(0)
1283 comb
+= self
.update_svstate
.eq(1)
1284 # synchronize with the simulator
1285 comb
+= self
.insn_done
.eq(1)
1287 m
.next
= "ISSUE_START"
1289 # update new src/dst step
1290 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1291 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1292 comb
+= self
.update_svstate
.eq(1)
1294 m
.next
= "DECODE_SV"
1296 # pass predicate mask bits through to satellite decoders
1297 # TODO: for SIMD this will be *multiple* bits
1298 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1299 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1301 # after src/dst step have been updated, we are ready
1302 # to decode the instruction
1303 with m
.State("DECODE_SV"):
1304 # decode the instruction
1305 with m
.If(~fetch_failed
):
1306 sync
+= pdecode2
.instr_fault
.eq(0)
1307 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1308 sync
+= core
.i
.state
.eq(cur_state
)
1309 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1310 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1312 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1313 # set RA_OR_ZERO detection in satellite decoders
1314 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1315 # and svp64 detection
1316 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1317 # and svp64 bit-rev'd ldst mode
1318 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1319 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1320 # after decoding, reset any previous exception condition,
1321 # allowing it to be set again during the next execution
1322 sync
+= pdecode2
.ldst_exc
.eq(0)
1324 m
.next
= "INSN_EXECUTE" # move to "execute"
1326 # handshake with execution FSM, move to "wait" once acknowledged
1327 with m
.State("INSN_EXECUTE"):
1328 # when using "single-step" mode, checking dbg.stopping_o
1329 # prevents progress. allow execute to proceed once started
1331 #if self.allow_overlap:
1332 # stopping = dbg.stopping_o
1333 with m
.If(stopping
):
1334 # stopping: jump back to idle
1335 m
.next
= "ISSUE_START"
1337 # request the icache to stop asserting "failed"
1338 comb
+= core
.icache
.flush_in
.eq(1)
1339 # stop instruction fault
1340 sync
+= pdecode2
.instr_fault
.eq(0)
1342 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1343 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1344 m
.next
= "EXECUTE_WAIT"
1346 with m
.State("EXECUTE_WAIT"):
1347 comb
+= exec_pc_i_ready
.eq(1)
1348 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1349 # the exception info needs to be blatted into
1350 # pdecode.ldst_exc, and the instruction "re-run".
1351 # when ldst_exc.happened is set, the PowerDecoder2
1352 # reacts very differently: it re-writes the instruction
1353 # with a "trap" (calls PowerDecoder2.trap()) which
1354 # will *overwrite* whatever was requested and jump the
1355 # PC to the exception address, as well as alter MSR.
1356 # nothing else needs to be done other than to note
1357 # the change of PC and MSR (and, later, SVSTATE)
1358 with m
.If(exc_happened
):
1359 mmu
= core
.fus
.get_exc("mmu0")
1360 ldst
= core
.fus
.get_exc("ldst0")
1362 with m
.If(fetch_failed
):
1363 # instruction fetch: exception is from MMU
1364 # reset instr_fault (highest priority)
1365 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1366 sync
+= pdecode2
.instr_fault
.eq(0)
1368 # request icache to stop asserting "failed"
1369 comb
+= core
.icache
.flush_in
.eq(1)
1370 with m
.If(~fetch_failed
):
1371 # otherwise assume it was a LDST exception
1372 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1374 with m
.If(exec_pc_o_valid
):
1376 # was this the last loop iteration?
1378 cur_vl
= cur_state
.svstate
.vl
1379 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1381 with m
.If(pdecode2
.instr_fault
):
1382 # reset instruction fault, try again
1383 sync
+= pdecode2
.instr_fault
.eq(0)
1384 m
.next
= "ISSUE_START"
1386 # return directly to Decode if Execute generated an
1388 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1389 m
.next
= "DECODE_SV"
1391 # if MSR, PC or SVSTATE were changed by the previous
1392 # instruction, go directly back to Fetch, without
1393 # updating either MSR PC or SVSTATE
1394 with m
.Elif(self
.msr_changed | self
.pc_changed |
1396 m
.next
= "ISSUE_START"
1398 # also return to Fetch, when no output was a vector
1399 # (regardless of SRCSTEP and VL), or when the last
1400 # instruction was really the last one of the VL loop
1401 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1402 # before going back to fetch, update the PC state
1403 # register with the NIA.
1404 # ok here we are not reading the branch unit.
1405 # TODO: this just blithely overwrites whatever
1406 # pipeline updated the PC
1407 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1408 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1409 # reset SRCSTEP before returning to Fetch
1411 with m
.If(pdecode2
.loop_continue
):
1412 comb
+= new_svstate
.srcstep
.eq(0)
1413 comb
+= new_svstate
.dststep
.eq(0)
1414 comb
+= self
.update_svstate
.eq(1)
1416 comb
+= new_svstate
.srcstep
.eq(0)
1417 comb
+= new_svstate
.dststep
.eq(0)
1418 comb
+= self
.update_svstate
.eq(1)
1419 m
.next
= "ISSUE_START"
1421 # returning to Execute? then, first update SRCSTEP
1423 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1424 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1425 comb
+= self
.update_svstate
.eq(1)
1426 # return to mask skip loop
1427 m
.next
= "PRED_SKIP"
1430 # check if svstate needs updating: if so, write it to State Regfile
1431 with m
.If(self
.update_svstate
):
1432 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1434 def execute_fsm(self
, m
, core
,
1435 exec_insn_i_valid
, exec_insn_o_ready
,
1436 exec_pc_o_valid
, exec_pc_i_ready
):
1439 execute FSM. this interacts with the "issue" FSM
1440 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1441 (outgoing). SVP64 RM prefixes have already been set up by the
1442 "issue" phase, so execute is fairly straightforward.
1448 pdecode2
= self
.pdecode2
1451 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1452 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1454 if hasattr(core
, "icache"):
1455 fetch_failed
= core
.icache
.i_out
.fetch_failed
1457 fetch_failed
= Const(0, 1)
1459 with m
.FSM(name
="exec_fsm"):
1461 # waiting for instruction bus (stays there until not busy)
1462 with m
.State("INSN_START"):
1463 comb
+= exec_insn_o_ready
.eq(1)
1464 with m
.If(exec_insn_i_valid
):
1465 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1466 sync
+= self
.sv_changed
.eq(0)
1467 sync
+= self
.pc_changed
.eq(0)
1468 sync
+= self
.msr_changed
.eq(0)
1469 with m
.If(core
.p
.o_ready
): # only move if accepted
1470 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1472 # instruction started: must wait till it finishes
1473 with m
.State("INSN_ACTIVE"):
1474 # note changes to MSR, PC and SVSTATE, and DEC/TB
1475 # these last two are done together, and passed to the
1477 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1478 sync
+= self
.sv_changed
.eq(1)
1479 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1480 sync
+= self
.msr_changed
.eq(1)
1481 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1482 sync
+= self
.pc_changed
.eq(1)
1483 with m
.If((self
.state_spr
.wen
&
1484 ((1 << StateRegs
.DEC
) |
(1 << StateRegs
.TB
))).bool()):
1485 comb
+= self
.pause_dec_tb
.eq(1)
1486 with m
.If(~core_busy_o
): # instruction done!
1487 comb
+= exec_pc_o_valid
.eq(1)
1488 with m
.If(exec_pc_i_ready
):
1489 # when finished, indicate "done".
1490 # however, if there was an exception, the instruction
1491 # is *not* yet done. this is an implementation
1492 # detail: we choose to implement exceptions by
1493 # taking the exception information from the LDST
1494 # unit, putting that *back* into the PowerDecoder2,
1495 # and *re-running the entire instruction*.
1496 # if we erroneously indicate "done" here, it is as if
1497 # there were *TWO* instructions:
1498 # 1) the failed LDST 2) a TRAP.
1499 with m
.If(~pdecode2
.ldst_exc
.happened
&
1500 ~pdecode2
.instr_fault
):
1501 comb
+= self
.insn_done
.eq(1)
1502 m
.next
= "INSN_START" # back to fetch
1503 # terminate returns directly to INSN_START
1504 with m
.If(dbg
.terminate_i
):
1505 # comb += self.insn_done.eq(1) - no because it's not
1506 m
.next
= "INSN_START" # back to fetch
1508 def elaborate(self
, platform
):
1509 m
= super().elaborate(platform
)
1511 comb
, sync
= m
.d
.comb
, m
.d
.sync
1512 cur_state
= self
.cur_state
1513 pdecode2
= self
.pdecode2
1517 # set up peripherals and core
1518 core_rst
= self
.core_rst
1520 # indicate to outside world if any FU is still executing
1521 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1523 # address of the next instruction, in the absence of a branch
1524 # depends on the instruction size
1527 # connect up debug signals
1528 with m
.If(core
.o
.core_terminate_o
):
1529 comb
+= dbg
.terminate_i
.eq(1)
1531 # pass the prefix mode from Fetch to Issue, so the latter can loop
1533 is_svp64_mode
= Signal()
1535 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1536 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1537 # these are the handshake signals between each
1539 # fetch FSM can run as soon as the PC is valid
1540 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1541 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1543 # fetch FSM hands over the instruction to be decoded / issued
1544 fetch_insn_o_valid
= Signal()
1545 fetch_insn_i_ready
= Signal()
1547 # predicate fetch FSM decodes and fetches the predicate
1548 pred_insn_i_valid
= Signal()
1549 pred_insn_o_ready
= Signal()
1551 # predicate fetch FSM delivers the masks
1552 pred_mask_o_valid
= Signal()
1553 pred_mask_i_ready
= Signal()
1555 # issue FSM delivers the instruction to the be executed
1556 exec_insn_i_valid
= Signal()
1557 exec_insn_o_ready
= Signal()
1559 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1560 exec_pc_o_valid
= Signal()
1561 exec_pc_i_ready
= Signal()
1563 # the FSMs here are perhaps unusual in that they detect conditions
1564 # then "hold" information, combinatorially, for the core
1565 # (as opposed to using sync - which would be on a clock's delay)
1566 # this includes the actual opcode, valid flags and so on.
1568 # Fetch, then predicate fetch, then Issue, then Execute.
1569 # Issue is where the VL for-loop # lives. the ready/valid
1570 # signalling is used to communicate between the four.
1572 self
.fetch_fsm(m
, dbg
, core
, dbg
.state
.pc
, dbg
.state
.msr
,
1573 dbg
.state
.svstate
, nia
, is_svp64_mode
,
1574 fetch_pc_o_ready
, fetch_pc_i_valid
,
1575 fetch_insn_o_valid
, fetch_insn_i_ready
)
1577 self
.issue_fsm(m
, core
, nia
,
1578 dbg
, core_rst
, is_svp64_mode
,
1579 fetch_pc_o_ready
, fetch_pc_i_valid
,
1580 fetch_insn_o_valid
, fetch_insn_i_ready
,
1581 pred_insn_i_valid
, pred_insn_o_ready
,
1582 pred_mask_o_valid
, pred_mask_i_ready
,
1583 exec_insn_i_valid
, exec_insn_o_ready
,
1584 exec_pc_o_valid
, exec_pc_i_ready
)
1587 self
.fetch_predicate_fsm(m
,
1588 pred_insn_i_valid
, pred_insn_o_ready
,
1589 pred_mask_o_valid
, pred_mask_i_ready
)
1591 self
.execute_fsm(m
, core
,
1592 exec_insn_i_valid
, exec_insn_o_ready
,
1593 exec_pc_o_valid
, exec_pc_i_ready
)
1595 # whatever was done above, over-ride it if core reset is held
1596 with m
.If(core_rst
):
1602 class TestIssuer(Elaboratable
):
1603 def __init__(self
, pspec
):
1604 self
.ti
= TestIssuerInternal(pspec
)
1605 self
.pll
= DummyPLL(instance
=True)
1607 self
.dbg_rst_i
= Signal(reset_less
=True)
1609 # PLL direct clock or not
1610 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1612 self
.pll_test_o
= Signal(reset_less
=True)
1613 self
.pll_vco_o
= Signal(reset_less
=True)
1614 self
.clk_sel_i
= Signal(2, reset_less
=True)
1615 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1616 self
.pllclk_clk
= ClockSignal("pllclk")
1618 def elaborate(self
, platform
):
1622 # TestIssuer nominally runs at main clock, actually it is
1623 # all combinatorial internally except for coresync'd components
1624 m
.submodules
.ti
= ti
= self
.ti
1627 # ClockSelect runs at PLL output internal clock rate
1628 m
.submodules
.wrappll
= pll
= self
.pll
1630 # add clock domains from PLL
1631 cd_pll
= ClockDomain("pllclk")
1634 # PLL clock established. has the side-effect of running clklsel
1635 # at the PLL's speed (see DomainRenamer("pllclk") above)
1636 pllclk
= self
.pllclk_clk
1637 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1639 # wire up external 24mhz to PLL
1640 #comb += pll.clk_24_i.eq(self.ref_clk)
1641 # output 18 mhz PLL test signal, and analog oscillator out
1642 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1643 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1645 # input to pll clock selection
1646 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1648 # now wire up ResetSignals. don't mind them being in this domain
1649 pll_rst
= ResetSignal("pllclk")
1650 comb
+= pll_rst
.eq(ResetSignal())
1652 # internal clock is set to selector clock-out. has the side-effect of
1653 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1654 # debug clock runs at coresync internal clock
1655 if self
.ti
.dbg_domain
!= 'sync':
1656 cd_dbgsync
= ClockDomain("dbgsync")
1657 intclk
= ClockSignal(self
.ti
.core_domain
)
1658 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1659 # XXX BYPASS PLL XXX
1660 # XXX BYPASS PLL XXX
1661 # XXX BYPASS PLL XXX
1663 comb
+= intclk
.eq(self
.ref_clk
)
1664 assert self
.ti
.core_domain
!= 'sync', \
1665 "cannot set core_domain to sync and use pll at the same time"
1667 if self
.ti
.core_domain
!= 'sync':
1668 comb
+= intclk
.eq(ClockSignal())
1669 if self
.ti
.dbg_domain
!= 'sync':
1670 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1671 comb
+= dbgclk
.eq(intclk
)
1672 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1677 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1678 [ClockSignal(), ResetSignal()]
1680 def external_ports(self
):
1681 ports
= self
.ti
.external_ports()
1682 ports
.append(ClockSignal())
1683 ports
.append(ResetSignal())
1685 ports
.append(self
.clk_sel_i
)
1686 ports
.append(self
.pll
.clk_24_i
)
1687 ports
.append(self
.pll_test_o
)
1688 ports
.append(self
.pll_vco_o
)
1689 ports
.append(self
.pllclk_clk
)
1690 ports
.append(self
.ref_clk
)
1694 if __name__
== '__main__':
1695 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1701 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1702 imem_ifacetype
='bare_wb',
1707 dut
= TestIssuer(pspec
)
1708 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1710 if len(sys
.argv
) == 1:
1711 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1712 with
open("test_issuer.il", "w") as f
: