3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmutil
.singlepipe
import ControlBase
25 from soc
.simple
.core_data
import FetchOutput
, FetchInput
27 from nmigen
.lib
.coding
import PriorityEncoder
29 from openpower
.decoder
.power_decoder
import create_pdecode
30 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
31 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
32 from openpower
.decoder
.decode2execute1
import Data
33 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
35 from openpower
.state
import CoreState
36 from openpower
.consts
import (CR
, SVP64CROffs
, MSR
)
37 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
38 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
39 from soc
.simple
.core
import NonProductionCore
40 from soc
.config
.test
.test_loadstore
import TestMemPspec
41 from soc
.config
.ifetch
import ConfigFetchUnit
42 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
43 from soc
.debug
.jtag
import JTAG
44 from soc
.config
.pinouts
import get_pinspecs
45 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
46 from soc
.bus
.simple_gpio
import SimpleGPIO
47 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
48 from soc
.clock
.select
import ClockSelect
49 from soc
.clock
.dummypll
import DummyPLL
50 from openpower
.sv
.svstate
import SVSTATERec
51 from soc
.experiment
.icache
import ICache
53 from nmutil
.util
import rising_edge
56 def get_insn(f_instr_o
, pc
):
57 if f_instr_o
.width
== 32:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o
.word_select(pc
[2], 32)
63 # gets state input or reads from state regfile
66 def state_get(m
, res
, core_rst
, state_i
, name
, regfile
, regnum
):
69 # read the {insert state variable here}
70 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
72 sync
+= res_ok_delay
.eq(~state_i
.ok
)
73 with m
.If(state_i
.ok
):
74 # incoming override (start from pc_i)
75 comb
+= res
.eq(state_i
.data
)
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb
+= regfile
.ren
.eq(1 << regnum
)
79 # ... but on a 1-clock delay
80 with m
.If(res_ok_delay
):
81 comb
+= res
.eq(regfile
.o_data
)
84 def get_predint(m
, mask
, name
):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
96 regread
= Signal(5, name
=name
+"regread")
97 invert
= Signal(name
=name
+"invert")
98 unary
= Signal(name
=name
+"unary")
99 all1s
= Signal(name
=name
+"all1s")
101 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
102 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
103 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
104 comb
+= regread
.eq(3)
105 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m
.Case(SVP64PredInt
.R3
.value
):
107 comb
+= regread
.eq(3)
108 with m
.Case(SVP64PredInt
.R3_N
.value
):
109 comb
+= regread
.eq(3)
111 with m
.Case(SVP64PredInt
.R10
.value
):
112 comb
+= regread
.eq(10)
113 with m
.Case(SVP64PredInt
.R10_N
.value
):
114 comb
+= regread
.eq(10)
116 with m
.Case(SVP64PredInt
.R30
.value
):
117 comb
+= regread
.eq(30)
118 with m
.Case(SVP64PredInt
.R30_N
.value
):
119 comb
+= regread
.eq(30)
121 return regread
, invert
, unary
, all1s
124 def get_predcr(m
, mask
, name
):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
129 idx
= Signal(2, name
=name
+"idx")
130 invert
= Signal(name
=name
+"crinvert")
132 with m
.Case(SVP64PredCR
.LT
.value
):
133 comb
+= idx
.eq(CR
.LT
)
135 with m
.Case(SVP64PredCR
.GE
.value
):
136 comb
+= idx
.eq(CR
.LT
)
138 with m
.Case(SVP64PredCR
.GT
.value
):
139 comb
+= idx
.eq(CR
.GT
)
141 with m
.Case(SVP64PredCR
.LE
.value
):
142 comb
+= idx
.eq(CR
.GT
)
144 with m
.Case(SVP64PredCR
.EQ
.value
):
145 comb
+= idx
.eq(CR
.EQ
)
147 with m
.Case(SVP64PredCR
.NE
.value
):
148 comb
+= idx
.eq(CR
.EQ
)
150 with m
.Case(SVP64PredCR
.SO
.value
):
151 comb
+= idx
.eq(CR
.SO
)
153 with m
.Case(SVP64PredCR
.NS
.value
):
154 comb
+= idx
.eq(CR
.SO
)
159 class TestIssuerBase(Elaboratable
):
160 """TestIssuerBase - common base class for Issuers
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
166 def __init__(self
, pspec
):
168 # test if microwatt compatibility is to be enabled
169 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
170 (pspec
.microwatt_compat
== True))
171 self
.alt_reset
= Signal(reset_less
=True) # not connected yet (microwatt)
173 # test is SVP64 is to be enabled
174 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
176 # and if regfiles are reduced
177 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
178 (pspec
.regreduce
== True))
180 # and if overlap requested
181 self
.allow_overlap
= (hasattr(pspec
, "allow_overlap") and
182 (pspec
.allow_overlap
== True))
184 # and get the core domain
185 self
.core_domain
= "coresync"
186 if (hasattr(pspec
, "core_domain") and
187 isinstance(pspec
.core_domain
, str)):
188 self
.core_domain
= pspec
.core_domain
190 # JTAG interface. add this right at the start because if it's
191 # added it *modifies* the pspec, by adding enable/disable signals
192 # for parts of the rest of the core
193 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
194 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
195 self
.dbg_domain
= "dbgsync" # domain for DMI/JTAG clock
197 # XXX MUST keep this up-to-date with litex, and
198 # soc-cocotb-sim, and err.. all needs sorting out, argh
201 'eint', 'gpio', 'mspi0',
202 # 'mspi1', - disabled for now
203 # 'pwm', 'sd0', - disabled for now
205 self
.jtag
= JTAG(get_pinspecs(subset
=subset
),
206 domain
=self
.dbg_domain
)
207 # add signals to pspec to enable/disable icache and dcache
208 # (or data and intstruction wishbone if icache/dcache not included)
209 # https://bugs.libre-soc.org/show_bug.cgi?id=520
210 # TODO: do we actually care if these are not domain-synchronised?
211 # honestly probably not.
212 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
213 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
214 self
.wb_sram_en
= self
.jtag
.wb_sram_en
216 self
.wb_sram_en
= Const(1)
218 # add 4k sram blocks?
219 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
220 pspec
.sram4x4kblock
== True)
224 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
228 # add interrupt controller?
229 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
231 self
.xics_icp
= XICS_ICP()
232 self
.xics_ics
= XICS_ICS()
233 self
.int_level_i
= self
.xics_ics
.int_level_i
235 self
.ext_irq
= Signal()
237 # add GPIO peripheral?
238 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
240 self
.simple_gpio
= SimpleGPIO()
241 self
.gpio_o
= self
.simple_gpio
.gpio_o
243 # main instruction core. suitable for prototyping / demo only
244 self
.core
= core
= NonProductionCore(pspec
)
245 self
.core_rst
= ResetSignal(self
.core_domain
)
247 # instruction decoder. goes into Trap Record
248 #pdecode = create_pdecode()
249 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
250 self
.pdecode2
= PowerDecode2(None, state
=self
.cur_state
,
251 opkls
=IssuerDecode2ToOperand
,
252 svp64_en
=self
.svp64_en
,
253 regreduce_en
=self
.regreduce_en
)
254 pdecode
= self
.pdecode2
.dec
257 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
259 self
.update_svstate
= Signal() # set this if updating svstate
260 self
.new_svstate
= new_svstate
= SVSTATERec("new_svstate")
262 # Test Instruction memory
263 if hasattr(core
, "icache"):
264 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
265 # truly dreadful. needs a huge reorg.
266 pspec
.icache
= core
.icache
267 self
.imem
= ConfigFetchUnit(pspec
).fu
270 self
.dbg
= CoreDebug()
271 self
.dbg_rst_i
= Signal(reset_less
=True)
273 # instruction go/monitor
274 self
.pc_o
= Signal(64, reset_less
=True)
275 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
276 self
.msr_i
= Data(64, "msr_i") # set "ok" to indicate "please change me"
277 self
.svstate_i
= Data(64, "svstate_i") # ditto
278 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
279 self
.busy_o
= Signal(reset_less
=True)
280 self
.memerr_o
= Signal(reset_less
=True)
282 # STATE regfile read /write ports for PC, MSR, SVSTATE
283 staterf
= self
.core
.regs
.rf
['state']
284 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
285 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
286 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
288 self
.state_w_msr
= staterf
.w_ports
['d_wr2'] # MSR wr
289 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
290 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
292 # DMI interface access
293 intrf
= self
.core
.regs
.rf
['int']
294 crrf
= self
.core
.regs
.rf
['cr']
295 xerrf
= self
.core
.regs
.rf
['xer']
296 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
297 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
298 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
302 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
303 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
305 # hack method of keeping an eye on whether branch/trap set the PC
306 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
307 self
.state_nia
.wen
.name
= 'state_nia_wen'
309 # pulse to synchronize the simulator at instruction end
310 self
.insn_done
= Signal()
312 # indicate any instruction still outstanding, in execution
313 self
.any_busy
= Signal()
316 # store copies of predicate masks
317 self
.srcmask
= Signal(64)
318 self
.dstmask
= Signal(64)
320 # sigh, the wishbone addresses are not wishbone-compliant in microwatt
321 if self
.microwatt_compat
:
322 self
.ibus_adr
= Signal(32, name
='wishbone_insn_out.adr')
323 self
.dbus_adr
= Signal(32, name
='wishbone_data_out.adr')
325 def setup_peripherals(self
, m
):
326 comb
, sync
= m
.d
.comb
, m
.d
.sync
328 # okaaaay so the debug module must be in coresync clock domain
329 # but NOT its reset signal. to cope with this, set every single
330 # submodule explicitly in coresync domain, debug and JTAG
331 # in their own one but using *external* reset.
332 csd
= DomainRenamer(self
.core_domain
)
333 dbd
= DomainRenamer(self
.dbg_domain
)
335 if self
.microwatt_compat
:
336 m
.submodules
.core
= core
= self
.core
338 m
.submodules
.core
= core
= csd(self
.core
)
339 # this _so_ needs sorting out. ICache is added down inside
340 # LoadStore1 and is already a submodule of LoadStore1
341 if not isinstance(self
.imem
, ICache
):
342 m
.submodules
.imem
= imem
= csd(self
.imem
)
343 m
.submodules
.dbg
= dbg
= dbd(self
.dbg
)
345 m
.submodules
.jtag
= jtag
= dbd(self
.jtag
)
346 # TODO: UART2GDB mux, here, from external pin
347 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
348 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
350 # fixup the clocks in microwatt-compat mode (but leave resets alone
351 # so that microwatt soc.vhdl can pull a reset on the core or DMI
352 # can do it, just like in TestIssuer)
353 if self
.microwatt_compat
:
354 intclk
= ClockSignal(self
.core_domain
)
355 dbgclk
= ClockSignal(self
.dbg_domain
)
356 if self
.core_domain
!= 'sync':
357 comb
+= intclk
.eq(ClockSignal())
358 if self
.dbg_domain
!= 'sync':
359 comb
+= dbgclk
.eq(ClockSignal())
361 # drop the first 3 bits of the incoming wishbone addresses
362 # this can go if using later versions of microwatt (not now)
363 if self
.microwatt_compat
:
364 ibus
= self
.imem
.ibus
365 dbus
= self
.core
.l0
.cmpi
.wb_bus()
366 comb
+= self
.ibus_adr
.eq(Cat(Const(0, 3), ibus
.adr
))
367 comb
+= self
.dbus_adr
.eq(Cat(Const(0, 3), dbus
.adr
))
369 cur_state
= self
.cur_state
371 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
373 for i
, sram
in enumerate(self
.sram4k
):
374 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
375 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
377 # XICS interrupt handler
379 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
380 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
381 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
382 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
384 sync
+= cur_state
.eint
.eq(self
.ext_irq
) # connect externally
386 # GPIO test peripheral
388 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
390 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
391 # XXX causes litex ECP5 test to get wrong idea about input and output
392 # (but works with verilator sim *sigh*)
393 # if self.gpio and self.xics:
394 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
396 # instruction decoder
397 pdecode
= create_pdecode()
398 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
400 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
403 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
404 intrf
= self
.core
.regs
.rf
['int']
406 # clock delay power-on reset
407 cd_por
= ClockDomain(reset_less
=True)
408 cd_sync
= ClockDomain()
409 m
.domains
+= cd_por
, cd_sync
410 core_sync
= ClockDomain(self
.core_domain
)
411 if self
.core_domain
!= "sync":
412 m
.domains
+= core_sync
413 if self
.dbg_domain
!= "sync":
414 dbg_sync
= ClockDomain(self
.dbg_domain
)
415 m
.domains
+= dbg_sync
417 ti_rst
= Signal(reset_less
=True)
418 delay
= Signal(range(4), reset
=3)
419 with m
.If(delay
!= 0):
420 m
.d
.por
+= delay
.eq(delay
- 1)
421 comb
+= cd_por
.clk
.eq(ClockSignal())
423 # power-on reset delay
424 core_rst
= ResetSignal(self
.core_domain
)
425 if self
.core_domain
!= "sync":
426 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
427 comb
+= core_rst
.eq(ti_rst
)
429 with m
.If(delay
!= 0 | dbg
.core_rst_o
):
430 comb
+= core_rst
.eq(1)
432 # connect external reset signal to DMI Reset
433 if self
.dbg_domain
!= "sync":
434 dbg_rst
= ResetSignal(self
.dbg_domain
)
435 comb
+= dbg_rst
.eq(self
.dbg_rst_i
)
437 # busy/halted signals from core
438 core_busy_o
= ~core
.p
.o_ready | core
.n
.o_data
.busy_o
# core is busy
439 comb
+= self
.busy_o
.eq(core_busy_o
)
440 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
442 # temporary hack: says "go" immediately for both address gen and ST
444 ldst
= core
.fus
.fus
['ldst0']
445 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
446 # link addr-go direct to rel
447 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
)
448 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
450 def do_dmi(self
, m
, dbg
):
451 """deals with DMI debug requests
453 currently only provides read requests for the INT regfile, CR and XER
454 it will later also deal with *writing* to these regfiles.
458 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
459 intrf
= self
.core
.regs
.rf
['int']
461 with m
.If(d_reg
.req
): # request for regfile access being made
462 # TODO: error-check this
463 # XXX should this be combinatorial? sync better?
465 comb
+= self
.int_r
.ren
.eq(1 << d_reg
.addr
)
467 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
468 comb
+= self
.int_r
.ren
.eq(1)
469 d_reg_delay
= Signal()
470 sync
+= d_reg_delay
.eq(d_reg
.req
)
471 with m
.If(d_reg_delay
):
472 # data arrives one clock later
473 comb
+= d_reg
.data
.eq(self
.int_r
.o_data
)
474 comb
+= d_reg
.ack
.eq(1)
476 # sigh same thing for CR debug
477 with m
.If(d_cr
.req
): # request for regfile access being made
478 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
479 d_cr_delay
= Signal()
480 sync
+= d_cr_delay
.eq(d_cr
.req
)
481 with m
.If(d_cr_delay
):
482 # data arrives one clock later
483 comb
+= d_cr
.data
.eq(self
.cr_r
.o_data
)
484 comb
+= d_cr
.ack
.eq(1)
487 with m
.If(d_xer
.req
): # request for regfile access being made
488 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
489 d_xer_delay
= Signal()
490 sync
+= d_xer_delay
.eq(d_xer
.req
)
491 with m
.If(d_xer_delay
):
492 # data arrives one clock later
493 comb
+= d_xer
.data
.eq(self
.xer_r
.o_data
)
494 comb
+= d_xer
.ack
.eq(1)
496 def tb_dec_fsm(self
, m
, spr_dec
):
499 this is a FSM for updating either dec or tb. it runs alternately
500 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
501 value to DEC, however the regfile has "passthrough" on it so this
504 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
507 comb
, sync
= m
.d
.comb
, m
.d
.sync
508 fast_rf
= self
.core
.regs
.rf
['fast']
509 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
510 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
514 # initiates read of current DEC
515 with m
.State("DEC_READ"):
516 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
517 comb
+= fast_r_dectb
.ren
.eq(1)
520 # waits for DEC read to arrive (1 cycle), updates with new value
521 with m
.State("DEC_WRITE"):
523 # TODO: MSR.LPCR 32-bit decrement mode
524 comb
+= new_dec
.eq(fast_r_dectb
.o_data
- 1)
525 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
526 comb
+= fast_w_dectb
.wen
.eq(1)
527 comb
+= fast_w_dectb
.i_data
.eq(new_dec
)
528 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
531 # initiates read of current TB
532 with m
.State("TB_READ"):
533 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
534 comb
+= fast_r_dectb
.ren
.eq(1)
537 # waits for read TB to arrive, initiates write of current TB
538 with m
.State("TB_WRITE"):
540 comb
+= new_tb
.eq(fast_r_dectb
.o_data
+ 1)
541 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
542 comb
+= fast_w_dectb
.wen
.eq(1)
543 comb
+= fast_w_dectb
.i_data
.eq(new_tb
)
548 def elaborate(self
, platform
):
551 comb
, sync
= m
.d
.comb
, m
.d
.sync
552 cur_state
= self
.cur_state
553 pdecode2
= self
.pdecode2
556 # set up peripherals and core
557 core_rst
= self
.core_rst
558 self
.setup_peripherals(m
)
560 # reset current state if core reset requested
562 m
.d
.sync
+= self
.cur_state
.eq(0)
564 # check halted condition: requested PC to execute matches DMI stop addr
565 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
568 comb
+= halted
.eq(dbg
.stop_addr_o
== dbg
.state
.pc
)
570 comb
+= dbg
.core_stopped_i
.eq(1)
571 comb
+= dbg
.terminate_i
.eq(1)
573 # PC and instruction from I-Memory
574 comb
+= self
.pc_o
.eq(cur_state
.pc
)
575 self
.pc_changed
= Signal() # note write to PC
576 self
.msr_changed
= Signal() # note write to MSR
577 self
.sv_changed
= Signal() # note write to SVSTATE
579 # read state either from incoming override or from regfile
580 state
= CoreState("get") # current state (MSR/PC/SVSTATE)
581 state_get(m
, state
.msr
, core_rst
, self
.msr_i
,
583 self
.state_r_msr
, StateRegs
.MSR
)
584 state_get(m
, state
.pc
, core_rst
, self
.pc_i
,
586 self
.state_r_pc
, StateRegs
.PC
)
587 state_get(m
, state
.svstate
, core_rst
, self
.svstate_i
,
588 "svstate", # read SVSTATE
589 self
.state_r_sv
, StateRegs
.SVSTATE
)
591 # don't write pc every cycle
592 comb
+= self
.state_w_pc
.wen
.eq(0)
593 comb
+= self
.state_w_pc
.i_data
.eq(0)
595 # connect up debug state. note "combinatorially same" below,
596 # this is a bit naff, passing state over in the dbg class, but
597 # because it is combinatorial it achieves the desired goal
598 comb
+= dbg
.state
.eq(state
)
600 # this bit doesn't have to be in the FSM: connect up to read
601 # regfiles on demand from DMI
604 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
605 # (which uses that in PowerDecoder2 to raise 0x900 exception)
606 self
.tb_dec_fsm(m
, cur_state
.dec
)
608 # while stopped, allow updating the MSR, PC and SVSTATE.
609 # these are mainly for debugging purposes (including DMI/JTAG)
610 with m
.If(dbg
.core_stopped_i
):
611 with m
.If(self
.pc_i
.ok
):
612 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
613 comb
+= self
.state_w_pc
.i_data
.eq(self
.pc_i
.data
)
614 sync
+= self
.pc_changed
.eq(1)
615 with m
.If(self
.msr_i
.ok
):
616 comb
+= self
.state_w_msr
.wen
.eq(1 << StateRegs
.MSR
)
617 comb
+= self
.state_w_msr
.i_data
.eq(self
.msr_i
.data
)
618 sync
+= self
.msr_changed
.eq(1)
619 with m
.If(self
.svstate_i
.ok | self
.update_svstate
):
620 with m
.If(self
.svstate_i
.ok
): # over-ride from external source
621 comb
+= self
.new_svstate
.eq(self
.svstate_i
.data
)
622 comb
+= self
.state_w_sv
.wen
.eq(1 << StateRegs
.SVSTATE
)
623 comb
+= self
.state_w_sv
.i_data
.eq(self
.new_svstate
)
624 sync
+= self
.sv_changed
.eq(1)
626 # start renaming some of the ports to match microwatt
627 if self
.microwatt_compat
:
628 self
.core
.o
.core_terminate_o
.name
= "terminated_out"
629 # names of DMI interface
630 self
.dbg
.dmi
.addr_i
.name
= 'dmi_addr'
631 self
.dbg
.dmi
.din
.name
= 'dmi_din'
632 self
.dbg
.dmi
.dout
.name
= 'dmi_dout'
633 self
.dbg
.dmi
.req_i
.name
= 'dmi_req'
634 self
.dbg
.dmi
.we_i
.name
= 'dmi_wr'
635 self
.dbg
.dmi
.ack_o
.name
= 'dmi_ack'
636 # wishbone instruction bus
637 ibus
= self
.imem
.ibus
638 ibus
.adr
.name
= 'wishbone_insn_out.adr'
639 ibus
.dat_w
.name
= 'wishbone_insn_out.dat'
640 ibus
.sel
.name
= 'wishbone_insn_out.sel'
641 ibus
.cyc
.name
= 'wishbone_insn_out.cyc'
642 ibus
.stb
.name
= 'wishbone_insn_out.stb'
643 ibus
.we
.name
= 'wishbone_insn_out.we'
644 ibus
.dat_r
.name
= 'wishbone_insn_in.dat'
645 ibus
.ack
.name
= 'wishbone_insn_in.ack'
646 ibus
.stall
.name
= 'wishbone_insn_in.stall'
648 dbus
= self
.core
.l0
.cmpi
.wb_bus()
649 dbus
.adr
.name
= 'wishbone_data_out.adr'
650 dbus
.dat_w
.name
= 'wishbone_data_out.dat'
651 dbus
.sel
.name
= 'wishbone_data_out.sel'
652 dbus
.cyc
.name
= 'wishbone_data_out.cyc'
653 dbus
.stb
.name
= 'wishbone_data_out.stb'
654 dbus
.we
.name
= 'wishbone_data_out.we'
655 dbus
.dat_r
.name
= 'wishbone_data_in.dat'
656 dbus
.ack
.name
= 'wishbone_data_in.ack'
657 dbus
.stall
.name
= 'wishbone_data_in.stall'
662 yield from self
.pc_i
.ports()
663 yield from self
.msr_i
.ports()
666 yield from self
.core
.ports()
667 yield from self
.imem
.ports()
668 yield self
.core_bigendian_i
674 def external_ports(self
):
675 if self
.microwatt_compat
:
676 ports
= [self
.core
.o
.core_terminate_o
,
678 self
.alt_reset
, # not connected yet
682 ports
+= list(self
.dbg
.dmi
.ports())
683 # for dbus/ibus microwatt, exclude err btw and cti
684 for name
, sig
in self
.imem
.ibus
.fields
.items():
685 if name
not in ['err', 'bte', 'cti', 'adr']:
687 for name
, sig
in self
.core
.l0
.cmpi
.wb_bus().fields
.items():
688 if name
not in ['err', 'bte', 'cti', 'adr']:
690 # microwatt non-compliant with wishbone
691 ports
.append(self
.ibus_adr
)
692 ports
.append(self
.dbus_adr
)
695 ports
= self
.pc_i
.ports()
696 ports
= self
.msr_i
.ports()
697 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
701 ports
+= list(self
.jtag
.external_ports())
703 # don't add DMI if JTAG is enabled
704 ports
+= list(self
.dbg
.dmi
.ports())
706 ports
+= list(self
.imem
.ibus
.fields
.values())
707 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
710 for sram
in self
.sram4k
:
711 ports
+= list(sram
.bus
.fields
.values())
714 ports
+= list(self
.xics_icp
.bus
.fields
.values())
715 ports
+= list(self
.xics_ics
.bus
.fields
.values())
716 ports
.append(self
.int_level_i
)
718 ports
.append(self
.ext_irq
)
721 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
722 ports
.append(self
.gpio_o
)
730 class TestIssuerInternal(TestIssuerBase
):
731 """TestIssuer - reads instructions from TestMemory and issues them
733 efficiency and speed is not the main goal here: functional correctness
734 and code clarity is. optimisations (which almost 100% interfere with
735 easy understanding) come later.
738 def fetch_fsm(self
, m
, dbg
, core
, pc
, msr
, svstate
, nia
, is_svp64_mode
,
739 fetch_pc_o_ready
, fetch_pc_i_valid
,
740 fetch_insn_o_valid
, fetch_insn_i_ready
):
743 this FSM performs fetch of raw instruction data, partial-decodes
744 it 32-bit at a time to detect SVP64 prefixes, and will optionally
745 read a 2nd 32-bit quantity if that occurs.
749 pdecode2
= self
.pdecode2
750 cur_state
= self
.cur_state
751 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
753 # also note instruction fetch failed
754 if hasattr(core
, "icache"):
755 fetch_failed
= core
.icache
.i_out
.fetch_failed
758 fetch_failed
= Const(0, 1)
761 # set priv / virt mode on I-Cache, sigh
762 if isinstance(self
.imem
, ICache
):
763 comb
+= self
.imem
.i_in
.priv_mode
.eq(~msr
[MSR
.PR
])
764 comb
+= self
.imem
.i_in
.virt_mode
.eq(msr
[MSR
.IR
]) # Instr. Redir (VM)
766 with m
.FSM(name
='fetch_fsm'):
769 with m
.State("IDLE"):
770 # fetch allowed if not failed and stopped but not stepping
771 # (see dmi.py for how core_stop_o is generated)
772 with m
.If(~fetch_failed
& ~dbg
.core_stop_o
):
773 comb
+= fetch_pc_o_ready
.eq(1)
774 with m
.If(fetch_pc_i_valid
& ~pdecode2
.instr_fault
776 # instruction allowed to go: start by reading the PC
777 # capture the PC and also drop it into Insn Memory
778 # we have joined a pair of combinatorial memory
779 # lookups together. this is Generally Bad.
780 comb
+= self
.imem
.a_pc_i
.eq(pc
)
781 comb
+= self
.imem
.a_i_valid
.eq(1)
782 comb
+= self
.imem
.f_i_valid
.eq(1)
783 # transfer state to output
784 sync
+= cur_state
.pc
.eq(pc
)
785 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
786 sync
+= cur_state
.msr
.eq(msr
) # and msr
788 m
.next
= "INSN_READ" # move to "wait for bus" phase
790 # dummy pause to find out why simulation is not keeping up
791 with m
.State("INSN_READ"):
792 # when using "single-step" mode, checking dbg.stopping_o
793 # prevents progress. allow fetch to proceed once started
795 #if self.allow_overlap:
796 # stopping = dbg.stopping_o
798 # stopping: jump back to idle
801 with m
.If(self
.imem
.f_busy_o
&
802 ~pdecode2
.instr_fault
): # zzz...
803 # busy but not fetch failed: stay in wait-read
804 comb
+= self
.imem
.a_pc_i
.eq(pc
)
805 comb
+= self
.imem
.a_i_valid
.eq(1)
806 comb
+= self
.imem
.f_i_valid
.eq(1)
808 # not busy (or fetch failed!): instruction fetched
809 # when fetch failed, the instruction gets ignored
811 if hasattr(core
, "icache"):
812 # blech, icache returns actual instruction
813 insn
= self
.imem
.f_instr_o
815 # but these return raw memory
816 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
819 # decode the SVP64 prefix, if any
820 comb
+= svp64
.raw_opcode_in
.eq(insn
)
821 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
822 # pass the decoded prefix (if any) to PowerDecoder2
823 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
824 sync
+= pdecode2
.is_svp64_mode
.eq(is_svp64_mode
)
825 # remember whether this is a prefixed instruction,
826 # so the FSM can readily loop when VL==0
827 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
828 # calculate the address of the following instruction
829 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
830 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
831 with m
.If(~svp64
.is_svp64_mode
):
832 # with no prefix, store the instruction
833 # and hand it directly to the next FSM
834 sync
+= dec_opcode_i
.eq(insn
)
835 m
.next
= "INSN_READY"
837 # fetch the rest of the instruction from memory
838 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
839 comb
+= self
.imem
.a_i_valid
.eq(1)
840 comb
+= self
.imem
.f_i_valid
.eq(1)
841 m
.next
= "INSN_READ2"
843 # not SVP64 - 32-bit only
844 sync
+= nia
.eq(cur_state
.pc
+ 4)
845 sync
+= dec_opcode_i
.eq(insn
)
846 m
.next
= "INSN_READY"
848 with m
.State("INSN_READ2"):
849 with m
.If(self
.imem
.f_busy_o
): # zzz...
850 # busy: stay in wait-read
851 comb
+= self
.imem
.a_i_valid
.eq(1)
852 comb
+= self
.imem
.f_i_valid
.eq(1)
854 # not busy: instruction fetched
855 if hasattr(core
, "icache"):
856 # blech, icache returns actual instruction
857 insn
= self
.imem
.f_instr_o
859 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
860 sync
+= dec_opcode_i
.eq(insn
)
861 m
.next
= "INSN_READY"
862 # TODO: probably can start looking at pdecode2.rm_dec
863 # here or maybe even in INSN_READ state, if svp64_mode
864 # detected, in order to trigger - and wait for - the
867 pmode
= pdecode2
.rm_dec
.predmode
869 if pmode != SVP64PredMode.ALWAYS.value:
870 fire predicate loading FSM and wait before
873 sync += self.srcmask.eq(-1) # set to all 1s
874 sync += self.dstmask.eq(-1) # set to all 1s
875 m.next = "INSN_READY"
878 with m
.State("INSN_READY"):
879 # hand over the instruction, to be decoded
880 comb
+= fetch_insn_o_valid
.eq(1)
881 with m
.If(fetch_insn_i_ready
):
885 def fetch_predicate_fsm(self
, m
,
886 pred_insn_i_valid
, pred_insn_o_ready
,
887 pred_mask_o_valid
, pred_mask_i_ready
):
888 """fetch_predicate_fsm - obtains (constructs in the case of CR)
889 src/dest predicate masks
891 https://bugs.libre-soc.org/show_bug.cgi?id=617
892 the predicates can be read here, by using IntRegs r_ports['pred']
893 or CRRegs r_ports['pred']. in the case of CRs it will have to
894 be done through multiple reads, extracting one relevant at a time.
895 later, a faster way would be to use the 32-bit-wide CR port but
896 this is more complex decoding, here. equivalent code used in
897 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
899 note: this ENTIRE FSM is not to be called when svp64 is disabled
903 pdecode2
= self
.pdecode2
904 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
905 predmode
= rm_dec
.predmode
906 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
907 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
908 # get src/dst step, so we can skip already used mask bits
909 cur_state
= self
.cur_state
910 srcstep
= cur_state
.svstate
.srcstep
911 dststep
= cur_state
.svstate
.dststep
912 cur_vl
= cur_state
.svstate
.vl
915 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
916 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
917 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
918 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
920 # store fetched masks, for either intpred or crpred
921 # when src/dst step is not zero, the skipped mask bits need to be
922 # shifted-out, before actually storing them in src/dest mask
923 new_srcmask
= Signal(64, reset_less
=True)
924 new_dstmask
= Signal(64, reset_less
=True)
926 with m
.FSM(name
="fetch_predicate"):
928 with m
.State("FETCH_PRED_IDLE"):
929 comb
+= pred_insn_o_ready
.eq(1)
930 with m
.If(pred_insn_i_valid
):
931 with m
.If(predmode
== SVP64PredMode
.INT
):
932 # skip fetching destination mask register, when zero
934 sync
+= new_dstmask
.eq(-1)
935 # directly go to fetch source mask register
936 # guaranteed not to be zero (otherwise predmode
937 # would be SVP64PredMode.ALWAYS, not INT)
938 comb
+= int_pred
.addr
.eq(sregread
)
939 comb
+= int_pred
.ren
.eq(1)
940 m
.next
= "INT_SRC_READ"
941 # fetch destination predicate register
943 comb
+= int_pred
.addr
.eq(dregread
)
944 comb
+= int_pred
.ren
.eq(1)
945 m
.next
= "INT_DST_READ"
946 with m
.Elif(predmode
== SVP64PredMode
.CR
):
947 # go fetch masks from the CR register file
948 sync
+= new_srcmask
.eq(0)
949 sync
+= new_dstmask
.eq(0)
952 sync
+= self
.srcmask
.eq(-1)
953 sync
+= self
.dstmask
.eq(-1)
954 m
.next
= "FETCH_PRED_DONE"
956 with m
.State("INT_DST_READ"):
957 # store destination mask
958 inv
= Repl(dinvert
, 64)
960 # set selected mask bit for 1<<r3 mode
961 dst_shift
= Signal(range(64))
962 comb
+= dst_shift
.eq(self
.int_pred
.o_data
& 0b111111)
963 sync
+= new_dstmask
.eq(1 << dst_shift
)
965 # invert mask if requested
966 sync
+= new_dstmask
.eq(self
.int_pred
.o_data ^ inv
)
967 # skip fetching source mask register, when zero
969 sync
+= new_srcmask
.eq(-1)
970 m
.next
= "FETCH_PRED_SHIFT_MASK"
971 # fetch source predicate register
973 comb
+= int_pred
.addr
.eq(sregread
)
974 comb
+= int_pred
.ren
.eq(1)
975 m
.next
= "INT_SRC_READ"
977 with m
.State("INT_SRC_READ"):
979 inv
= Repl(sinvert
, 64)
981 # set selected mask bit for 1<<r3 mode
982 src_shift
= Signal(range(64))
983 comb
+= src_shift
.eq(self
.int_pred
.o_data
& 0b111111)
984 sync
+= new_srcmask
.eq(1 << src_shift
)
986 # invert mask if requested
987 sync
+= new_srcmask
.eq(self
.int_pred
.o_data ^ inv
)
988 m
.next
= "FETCH_PRED_SHIFT_MASK"
990 # fetch masks from the CR register file
991 # implements the following loop:
992 # idx, inv = get_predcr(mask)
994 # for cr_idx in range(vl):
995 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
997 # mask |= 1 << cr_idx
999 with m
.State("CR_READ"):
1000 # CR index to be read, which will be ready by the next cycle
1001 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
1002 # submit the read operation to the regfile
1003 with m
.If(cr_idx
!= cur_vl
):
1004 # the CR read port is unary ...
1006 # ... in MSB0 convention ...
1007 # ren = 1 << (7 - cr_idx)
1008 # ... and with an offset:
1009 # ren = 1 << (7 - off - cr_idx)
1010 idx
= SVP64CROffs
.CRPred
+ cr_idx
1011 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
1012 # signal data valid in the next cycle
1013 cr_read
= Signal(reset_less
=True)
1014 sync
+= cr_read
.eq(1)
1015 # load the next index
1016 sync
+= cr_idx
.eq(cr_idx
+ 1)
1019 sync
+= cr_read
.eq(0)
1020 sync
+= cr_idx
.eq(0)
1021 m
.next
= "FETCH_PRED_SHIFT_MASK"
1023 # compensate for the one cycle delay on the regfile
1024 cur_cr_idx
= Signal
.like(cur_vl
)
1025 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
1026 # read the CR field, select the appropriate bit
1027 cr_field
= Signal(4)
1030 comb
+= cr_field
.eq(cr_pred
.o_data
)
1031 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1)
1033 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1)
1035 # set the corresponding mask bit
1036 bit_to_set
= Signal
.like(self
.srcmask
)
1037 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
1039 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
1041 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
1043 with m
.State("FETCH_PRED_SHIFT_MASK"):
1044 # shift-out skipped mask bits
1045 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
1046 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
1047 m
.next
= "FETCH_PRED_DONE"
1049 with m
.State("FETCH_PRED_DONE"):
1050 comb
+= pred_mask_o_valid
.eq(1)
1051 with m
.If(pred_mask_i_ready
):
1052 m
.next
= "FETCH_PRED_IDLE"
1054 def issue_fsm(self
, m
, core
, nia
,
1055 dbg
, core_rst
, is_svp64_mode
,
1056 fetch_pc_o_ready
, fetch_pc_i_valid
,
1057 fetch_insn_o_valid
, fetch_insn_i_ready
,
1058 pred_insn_i_valid
, pred_insn_o_ready
,
1059 pred_mask_o_valid
, pred_mask_i_ready
,
1060 exec_insn_i_valid
, exec_insn_o_ready
,
1061 exec_pc_o_valid
, exec_pc_i_ready
):
1064 decode / issue FSM. this interacts with the "fetch" FSM
1065 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1066 (outgoing). also interacts with the "execute" FSM
1067 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1069 SVP64 RM prefixes have already been set up by the
1070 "fetch" phase, so execute is fairly straightforward.
1075 pdecode2
= self
.pdecode2
1076 cur_state
= self
.cur_state
1077 new_svstate
= self
.new_svstate
1080 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
1082 # for updating svstate (things like srcstep etc.)
1083 comb
+= new_svstate
.eq(cur_state
.svstate
)
1085 # precalculate srcstep+1 and dststep+1
1086 cur_srcstep
= cur_state
.svstate
.srcstep
1087 cur_dststep
= cur_state
.svstate
.dststep
1088 next_srcstep
= Signal
.like(cur_srcstep
)
1089 next_dststep
= Signal
.like(cur_dststep
)
1090 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
1091 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
1093 # note if an exception happened. in a pipelined or OoO design
1094 # this needs to be accompanied by "shadowing" (or stalling)
1095 exc_happened
= self
.core
.o
.exc_happened
1096 # also note instruction fetch failed
1097 if hasattr(core
, "icache"):
1098 fetch_failed
= core
.icache
.i_out
.fetch_failed
1100 # set to fault in decoder
1101 # update (highest priority) instruction fault
1102 rising_fetch_failed
= rising_edge(m
, fetch_failed
)
1103 with m
.If(rising_fetch_failed
):
1104 sync
+= pdecode2
.instr_fault
.eq(1)
1106 fetch_failed
= Const(0, 1)
1107 flush_needed
= False
1109 with m
.FSM(name
="issue_fsm"):
1111 # sync with the "fetch" phase which is reading the instruction
1112 # at this point, there is no instruction running, that
1113 # could inadvertently update the PC.
1114 with m
.State("ISSUE_START"):
1115 # reset instruction fault
1116 sync
+= pdecode2
.instr_fault
.eq(0)
1117 # wait on "core stop" release, before next fetch
1118 # need to do this here, in case we are in a VL==0 loop
1119 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
1120 comb
+= fetch_pc_i_valid
.eq(1) # tell fetch to start
1121 with m
.If(fetch_pc_o_ready
): # fetch acknowledged us
1122 m
.next
= "INSN_WAIT"
1124 # tell core it's stopped, and acknowledge debug handshake
1125 comb
+= dbg
.core_stopped_i
.eq(1)
1126 # while stopped, allow updating SVSTATE
1127 with m
.If(self
.svstate_i
.ok
):
1128 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
1129 comb
+= self
.update_svstate
.eq(1)
1130 sync
+= self
.sv_changed
.eq(1)
1132 # wait for an instruction to arrive from Fetch
1133 with m
.State("INSN_WAIT"):
1134 # when using "single-step" mode, checking dbg.stopping_o
1135 # prevents progress. allow issue to proceed once started
1137 #if self.allow_overlap:
1138 # stopping = dbg.stopping_o
1139 with m
.If(stopping
):
1140 # stopping: jump back to idle
1141 m
.next
= "ISSUE_START"
1143 # request the icache to stop asserting "failed"
1144 comb
+= core
.icache
.flush_in
.eq(1)
1145 # stop instruction fault
1146 sync
+= pdecode2
.instr_fault
.eq(0)
1148 comb
+= fetch_insn_i_ready
.eq(1)
1149 with m
.If(fetch_insn_o_valid
):
1150 # loop into ISSUE_START if it's a SVP64 instruction
1151 # and VL == 0. this because VL==0 is a for-loop
1152 # from 0 to 0 i.e. always, always a NOP.
1153 cur_vl
= cur_state
.svstate
.vl
1154 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
1155 # update the PC before fetching the next instruction
1156 # since we are in a VL==0 loop, no instruction was
1157 # executed that we could be overwriting
1158 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1159 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1160 comb
+= self
.insn_done
.eq(1)
1161 m
.next
= "ISSUE_START"
1164 m
.next
= "PRED_START" # fetching predicate
1166 m
.next
= "DECODE_SV" # skip predication
1168 with m
.State("PRED_START"):
1169 comb
+= pred_insn_i_valid
.eq(1) # tell fetch_pred to start
1170 with m
.If(pred_insn_o_ready
): # fetch_pred acknowledged us
1171 m
.next
= "MASK_WAIT"
1173 with m
.State("MASK_WAIT"):
1174 comb
+= pred_mask_i_ready
.eq(1) # ready to receive the masks
1175 with m
.If(pred_mask_o_valid
): # predication masks are ready
1176 m
.next
= "PRED_SKIP"
1178 # skip zeros in predicate
1179 with m
.State("PRED_SKIP"):
1180 with m
.If(~is_svp64_mode
):
1181 m
.next
= "DECODE_SV" # nothing to do
1184 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
1185 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
1187 # new srcstep, after skipping zeros
1188 skip_srcstep
= Signal
.like(cur_srcstep
)
1189 # value to be added to the current srcstep
1190 src_delta
= Signal
.like(cur_srcstep
)
1191 # add leading zeros to srcstep, if not in zero mode
1192 with m
.If(~pred_src_zero
):
1193 # priority encoder (count leading zeros)
1194 # append guard bit, in case the mask is all zeros
1195 pri_enc_src
= PriorityEncoder(65)
1196 m
.submodules
.pri_enc_src
= pri_enc_src
1197 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
1199 comb
+= src_delta
.eq(pri_enc_src
.o
)
1200 # apply delta to srcstep
1201 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
1202 # shift-out all leading zeros from the mask
1203 # plus the leading "one" bit
1204 # TODO count leading zeros and shift-out the zero
1205 # bits, in the same step, in hardware
1206 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
1208 # same as above, but for dststep
1209 skip_dststep
= Signal
.like(cur_dststep
)
1210 dst_delta
= Signal
.like(cur_dststep
)
1211 with m
.If(~pred_dst_zero
):
1212 pri_enc_dst
= PriorityEncoder(65)
1213 m
.submodules
.pri_enc_dst
= pri_enc_dst
1214 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
1216 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
1217 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
1218 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
1220 # TODO: initialize mask[VL]=1 to avoid passing past VL
1221 with m
.If((skip_srcstep
>= cur_vl
) |
1222 (skip_dststep
>= cur_vl
)):
1223 # end of VL loop. Update PC and reset src/dst step
1224 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1225 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1226 comb
+= new_svstate
.srcstep
.eq(0)
1227 comb
+= new_svstate
.dststep
.eq(0)
1228 comb
+= self
.update_svstate
.eq(1)
1229 # synchronize with the simulator
1230 comb
+= self
.insn_done
.eq(1)
1232 m
.next
= "ISSUE_START"
1234 # update new src/dst step
1235 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
1236 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
1237 comb
+= self
.update_svstate
.eq(1)
1239 m
.next
= "DECODE_SV"
1241 # pass predicate mask bits through to satellite decoders
1242 # TODO: for SIMD this will be *multiple* bits
1243 sync
+= core
.i
.sv_pred_sm
.eq(self
.srcmask
[0])
1244 sync
+= core
.i
.sv_pred_dm
.eq(self
.dstmask
[0])
1246 # after src/dst step have been updated, we are ready
1247 # to decode the instruction
1248 with m
.State("DECODE_SV"):
1249 # decode the instruction
1250 with m
.If(~fetch_failed
):
1251 sync
+= pdecode2
.instr_fault
.eq(0)
1252 sync
+= core
.i
.e
.eq(pdecode2
.e
)
1253 sync
+= core
.i
.state
.eq(cur_state
)
1254 sync
+= core
.i
.raw_insn_i
.eq(dec_opcode_i
)
1255 sync
+= core
.i
.bigendian_i
.eq(self
.core_bigendian_i
)
1257 sync
+= core
.i
.sv_rm
.eq(pdecode2
.sv_rm
)
1258 # set RA_OR_ZERO detection in satellite decoders
1259 sync
+= core
.i
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
1260 # and svp64 detection
1261 sync
+= core
.i
.is_svp64_mode
.eq(is_svp64_mode
)
1262 # and svp64 bit-rev'd ldst mode
1263 ldst_dec
= pdecode2
.use_svp64_ldst_dec
1264 sync
+= core
.i
.use_svp64_ldst_dec
.eq(ldst_dec
)
1265 # after decoding, reset any previous exception condition,
1266 # allowing it to be set again during the next execution
1267 sync
+= pdecode2
.ldst_exc
.eq(0)
1269 m
.next
= "INSN_EXECUTE" # move to "execute"
1271 # handshake with execution FSM, move to "wait" once acknowledged
1272 with m
.State("INSN_EXECUTE"):
1273 # when using "single-step" mode, checking dbg.stopping_o
1274 # prevents progress. allow execute to proceed once started
1276 #if self.allow_overlap:
1277 # stopping = dbg.stopping_o
1278 with m
.If(stopping
):
1279 # stopping: jump back to idle
1280 m
.next
= "ISSUE_START"
1282 # request the icache to stop asserting "failed"
1283 comb
+= core
.icache
.flush_in
.eq(1)
1284 # stop instruction fault
1285 sync
+= pdecode2
.instr_fault
.eq(0)
1287 comb
+= exec_insn_i_valid
.eq(1) # trigger execute
1288 with m
.If(exec_insn_o_ready
): # execute acknowledged us
1289 m
.next
= "EXECUTE_WAIT"
1291 with m
.State("EXECUTE_WAIT"):
1292 comb
+= exec_pc_i_ready
.eq(1)
1293 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1294 # the exception info needs to be blatted into
1295 # pdecode.ldst_exc, and the instruction "re-run".
1296 # when ldst_exc.happened is set, the PowerDecoder2
1297 # reacts very differently: it re-writes the instruction
1298 # with a "trap" (calls PowerDecoder2.trap()) which
1299 # will *overwrite* whatever was requested and jump the
1300 # PC to the exception address, as well as alter MSR.
1301 # nothing else needs to be done other than to note
1302 # the change of PC and MSR (and, later, SVSTATE)
1303 with m
.If(exc_happened
):
1304 mmu
= core
.fus
.get_exc("mmu0")
1305 ldst
= core
.fus
.get_exc("ldst0")
1307 with m
.If(fetch_failed
):
1308 # instruction fetch: exception is from MMU
1309 # reset instr_fault (highest priority)
1310 sync
+= pdecode2
.ldst_exc
.eq(mmu
)
1311 sync
+= pdecode2
.instr_fault
.eq(0)
1313 # request icache to stop asserting "failed"
1314 comb
+= core
.icache
.flush_in
.eq(1)
1315 with m
.If(~fetch_failed
):
1316 # otherwise assume it was a LDST exception
1317 sync
+= pdecode2
.ldst_exc
.eq(ldst
)
1319 with m
.If(exec_pc_o_valid
):
1321 # was this the last loop iteration?
1323 cur_vl
= cur_state
.svstate
.vl
1324 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
1326 with m
.If(pdecode2
.instr_fault
):
1327 # reset instruction fault, try again
1328 sync
+= pdecode2
.instr_fault
.eq(0)
1329 m
.next
= "ISSUE_START"
1331 # return directly to Decode if Execute generated an
1333 with m
.Elif(pdecode2
.ldst_exc
.happened
):
1334 m
.next
= "DECODE_SV"
1336 # if MSR, PC or SVSTATE were changed by the previous
1337 # instruction, go directly back to Fetch, without
1338 # updating either MSR PC or SVSTATE
1339 with m
.Elif(self
.msr_changed | self
.pc_changed |
1341 m
.next
= "ISSUE_START"
1343 # also return to Fetch, when no output was a vector
1344 # (regardless of SRCSTEP and VL), or when the last
1345 # instruction was really the last one of the VL loop
1346 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
1347 # before going back to fetch, update the PC state
1348 # register with the NIA.
1349 # ok here we are not reading the branch unit.
1350 # TODO: this just blithely overwrites whatever
1351 # pipeline updated the PC
1352 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
1353 comb
+= self
.state_w_pc
.i_data
.eq(nia
)
1354 # reset SRCSTEP before returning to Fetch
1356 with m
.If(pdecode2
.loop_continue
):
1357 comb
+= new_svstate
.srcstep
.eq(0)
1358 comb
+= new_svstate
.dststep
.eq(0)
1359 comb
+= self
.update_svstate
.eq(1)
1361 comb
+= new_svstate
.srcstep
.eq(0)
1362 comb
+= new_svstate
.dststep
.eq(0)
1363 comb
+= self
.update_svstate
.eq(1)
1364 m
.next
= "ISSUE_START"
1366 # returning to Execute? then, first update SRCSTEP
1368 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
1369 comb
+= new_svstate
.dststep
.eq(next_dststep
)
1370 comb
+= self
.update_svstate
.eq(1)
1371 # return to mask skip loop
1372 m
.next
= "PRED_SKIP"
1375 # check if svstate needs updating: if so, write it to State Regfile
1376 with m
.If(self
.update_svstate
):
1377 sync
+= cur_state
.svstate
.eq(self
.new_svstate
) # for next clock
1379 def execute_fsm(self
, m
, core
,
1380 exec_insn_i_valid
, exec_insn_o_ready
,
1381 exec_pc_o_valid
, exec_pc_i_ready
):
1384 execute FSM. this interacts with the "issue" FSM
1385 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1386 (outgoing). SVP64 RM prefixes have already been set up by the
1387 "issue" phase, so execute is fairly straightforward.
1393 pdecode2
= self
.pdecode2
1396 core_busy_o
= core
.n
.o_data
.busy_o
# core is busy
1397 core_ivalid_i
= core
.p
.i_valid
# instruction is valid
1399 if hasattr(core
, "icache"):
1400 fetch_failed
= core
.icache
.i_out
.fetch_failed
1402 fetch_failed
= Const(0, 1)
1404 with m
.FSM(name
="exec_fsm"):
1406 # waiting for instruction bus (stays there until not busy)
1407 with m
.State("INSN_START"):
1408 comb
+= exec_insn_o_ready
.eq(1)
1409 with m
.If(exec_insn_i_valid
):
1410 comb
+= core_ivalid_i
.eq(1) # instruction is valid/issued
1411 sync
+= self
.sv_changed
.eq(0)
1412 sync
+= self
.pc_changed
.eq(0)
1413 sync
+= self
.msr_changed
.eq(0)
1414 with m
.If(core
.p
.o_ready
): # only move if accepted
1415 m
.next
= "INSN_ACTIVE" # move to "wait completion"
1417 # instruction started: must wait till it finishes
1418 with m
.State("INSN_ACTIVE"):
1419 # note changes to MSR, PC and SVSTATE
1420 # XXX oops, really must monitor *all* State Regfile write
1421 # ports looking for changes!
1422 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.SVSTATE
)):
1423 sync
+= self
.sv_changed
.eq(1)
1424 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.MSR
)):
1425 sync
+= self
.msr_changed
.eq(1)
1426 with m
.If(self
.state_nia
.wen
& (1 << StateRegs
.PC
)):
1427 sync
+= self
.pc_changed
.eq(1)
1428 with m
.If(~core_busy_o
): # instruction done!
1429 comb
+= exec_pc_o_valid
.eq(1)
1430 with m
.If(exec_pc_i_ready
):
1431 # when finished, indicate "done".
1432 # however, if there was an exception, the instruction
1433 # is *not* yet done. this is an implementation
1434 # detail: we choose to implement exceptions by
1435 # taking the exception information from the LDST
1436 # unit, putting that *back* into the PowerDecoder2,
1437 # and *re-running the entire instruction*.
1438 # if we erroneously indicate "done" here, it is as if
1439 # there were *TWO* instructions:
1440 # 1) the failed LDST 2) a TRAP.
1441 with m
.If(~pdecode2
.ldst_exc
.happened
&
1442 ~pdecode2
.instr_fault
):
1443 comb
+= self
.insn_done
.eq(1)
1444 m
.next
= "INSN_START" # back to fetch
1445 # terminate returns directly to INSN_START
1446 with m
.If(dbg
.terminate_i
):
1447 # comb += self.insn_done.eq(1) - no because it's not
1448 m
.next
= "INSN_START" # back to fetch
1450 def elaborate(self
, platform
):
1451 m
= super().elaborate(platform
)
1453 comb
, sync
= m
.d
.comb
, m
.d
.sync
1454 cur_state
= self
.cur_state
1455 pdecode2
= self
.pdecode2
1459 # set up peripherals and core
1460 core_rst
= self
.core_rst
1462 # indicate to outside world if any FU is still executing
1463 comb
+= self
.any_busy
.eq(core
.n
.o_data
.any_busy_o
) # any FU executing
1465 # address of the next instruction, in the absence of a branch
1466 # depends on the instruction size
1469 # connect up debug signals
1470 with m
.If(core
.o
.core_terminate_o
):
1471 comb
+= dbg
.terminate_i
.eq(1)
1473 # pass the prefix mode from Fetch to Issue, so the latter can loop
1475 is_svp64_mode
= Signal()
1477 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1478 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1479 # these are the handshake signals between each
1481 # fetch FSM can run as soon as the PC is valid
1482 fetch_pc_i_valid
= Signal() # Execute tells Fetch "start next read"
1483 fetch_pc_o_ready
= Signal() # Fetch Tells SVSTATE "proceed"
1485 # fetch FSM hands over the instruction to be decoded / issued
1486 fetch_insn_o_valid
= Signal()
1487 fetch_insn_i_ready
= Signal()
1489 # predicate fetch FSM decodes and fetches the predicate
1490 pred_insn_i_valid
= Signal()
1491 pred_insn_o_ready
= Signal()
1493 # predicate fetch FSM delivers the masks
1494 pred_mask_o_valid
= Signal()
1495 pred_mask_i_ready
= Signal()
1497 # issue FSM delivers the instruction to the be executed
1498 exec_insn_i_valid
= Signal()
1499 exec_insn_o_ready
= Signal()
1501 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1502 exec_pc_o_valid
= Signal()
1503 exec_pc_i_ready
= Signal()
1505 # the FSMs here are perhaps unusual in that they detect conditions
1506 # then "hold" information, combinatorially, for the core
1507 # (as opposed to using sync - which would be on a clock's delay)
1508 # this includes the actual opcode, valid flags and so on.
1510 # Fetch, then predicate fetch, then Issue, then Execute.
1511 # Issue is where the VL for-loop # lives. the ready/valid
1512 # signalling is used to communicate between the four.
1514 self
.fetch_fsm(m
, dbg
, core
, dbg
.state
.pc
, dbg
.state
.msr
,
1515 dbg
.state
.svstate
, nia
, is_svp64_mode
,
1516 fetch_pc_o_ready
, fetch_pc_i_valid
,
1517 fetch_insn_o_valid
, fetch_insn_i_ready
)
1519 self
.issue_fsm(m
, core
, nia
,
1520 dbg
, core_rst
, is_svp64_mode
,
1521 fetch_pc_o_ready
, fetch_pc_i_valid
,
1522 fetch_insn_o_valid
, fetch_insn_i_ready
,
1523 pred_insn_i_valid
, pred_insn_o_ready
,
1524 pred_mask_o_valid
, pred_mask_i_ready
,
1525 exec_insn_i_valid
, exec_insn_o_ready
,
1526 exec_pc_o_valid
, exec_pc_i_ready
)
1529 self
.fetch_predicate_fsm(m
,
1530 pred_insn_i_valid
, pred_insn_o_ready
,
1531 pred_mask_o_valid
, pred_mask_i_ready
)
1533 self
.execute_fsm(m
, core
,
1534 exec_insn_i_valid
, exec_insn_o_ready
,
1535 exec_pc_o_valid
, exec_pc_i_ready
)
1537 # whatever was done above, over-ride it if core reset is held
1538 with m
.If(core_rst
):
1544 class TestIssuer(Elaboratable
):
1545 def __init__(self
, pspec
):
1546 self
.ti
= TestIssuerInternal(pspec
)
1547 self
.pll
= DummyPLL(instance
=True)
1549 self
.dbg_rst_i
= Signal(reset_less
=True)
1551 # PLL direct clock or not
1552 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1554 self
.pll_test_o
= Signal(reset_less
=True)
1555 self
.pll_vco_o
= Signal(reset_less
=True)
1556 self
.clk_sel_i
= Signal(2, reset_less
=True)
1557 self
.ref_clk
= ClockSignal() # can't rename it but that's ok
1558 self
.pllclk_clk
= ClockSignal("pllclk")
1560 def elaborate(self
, platform
):
1564 # TestIssuer nominally runs at main clock, actually it is
1565 # all combinatorial internally except for coresync'd components
1566 m
.submodules
.ti
= ti
= self
.ti
1569 # ClockSelect runs at PLL output internal clock rate
1570 m
.submodules
.wrappll
= pll
= self
.pll
1572 # add clock domains from PLL
1573 cd_pll
= ClockDomain("pllclk")
1576 # PLL clock established. has the side-effect of running clklsel
1577 # at the PLL's speed (see DomainRenamer("pllclk") above)
1578 pllclk
= self
.pllclk_clk
1579 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1581 # wire up external 24mhz to PLL
1582 #comb += pll.clk_24_i.eq(self.ref_clk)
1583 # output 18 mhz PLL test signal, and analog oscillator out
1584 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1585 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1587 # input to pll clock selection
1588 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1590 # now wire up ResetSignals. don't mind them being in this domain
1591 pll_rst
= ResetSignal("pllclk")
1592 comb
+= pll_rst
.eq(ResetSignal())
1594 # internal clock is set to selector clock-out. has the side-effect of
1595 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1596 # debug clock runs at coresync internal clock
1597 if self
.ti
.dbg_domain
!= 'sync':
1598 cd_dbgsync
= ClockDomain("dbgsync")
1599 intclk
= ClockSignal(self
.ti
.core_domain
)
1600 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1601 # XXX BYPASS PLL XXX
1602 # XXX BYPASS PLL XXX
1603 # XXX BYPASS PLL XXX
1605 comb
+= intclk
.eq(self
.ref_clk
)
1606 assert self
.ti
.core_domain
!= 'sync', \
1607 "cannot set core_domain to sync and use pll at the same time"
1609 if self
.ti
.core_domain
!= 'sync':
1610 comb
+= intclk
.eq(ClockSignal())
1611 if self
.ti
.dbg_domain
!= 'sync':
1612 dbgclk
= ClockSignal(self
.ti
.dbg_domain
)
1613 comb
+= dbgclk
.eq(intclk
)
1614 comb
+= self
.ti
.dbg_rst_i
.eq(self
.dbg_rst_i
)
1619 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1620 [ClockSignal(), ResetSignal()]
1622 def external_ports(self
):
1623 ports
= self
.ti
.external_ports()
1624 ports
.append(ClockSignal())
1625 ports
.append(ResetSignal())
1627 ports
.append(self
.clk_sel_i
)
1628 ports
.append(self
.pll
.clk_24_i
)
1629 ports
.append(self
.pll_test_o
)
1630 ports
.append(self
.pll_vco_o
)
1631 ports
.append(self
.pllclk_clk
)
1632 ports
.append(self
.ref_clk
)
1636 if __name__
== '__main__':
1637 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1643 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1644 imem_ifacetype
='bare_wb',
1649 dut
= TestIssuer(pspec
)
1650 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1652 if len(sys
.argv
) == 1:
1653 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1654 with
open("test_issuer.il", "w") as f
: