Re-enable core stopped signal when stopped.
[soc.git] / src / soc / simple / issuer.py
1 """simple core issuer
2
3 not in any way intended for production use. this runs a FSM that:
4
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
9 * increments the PC
10 * does it all over again
11
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
15 improved.
16 """
17
18 from nmigen import (Elaboratable, Module, Signal, ClockSignal, ResetSignal,
19 ClockDomain, DomainRenamer, Mux, Const, Repl, Cat)
20 from nmigen.cli import rtlil
21 from nmigen.cli import main
22 import sys
23
24 from nmutil.singlepipe import ControlBase
25 from soc.simple.core_data import FetchOutput, FetchInput
26
27 from nmigen.lib.coding import PriorityEncoder
28
29 from openpower.decoder.power_decoder import create_pdecode
30 from openpower.decoder.power_decoder2 import PowerDecode2, SVP64PrefixDecoder
31 from openpower.decoder.decode2execute1 import IssuerDecode2ToOperand
32 from openpower.decoder.decode2execute1 import Data
33 from openpower.decoder.power_enums import (MicrOp, SVP64PredInt, SVP64PredCR,
34 SVP64PredMode)
35 from openpower.state import CoreState
36 from openpower.consts import (CR, SVP64CROffs, MSR)
37 from soc.experiment.testmem import TestMemory # test only for instructions
38 from soc.regfile.regfiles import StateRegs, FastRegs
39 from soc.simple.core import NonProductionCore
40 from soc.config.test.test_loadstore import TestMemPspec
41 from soc.config.ifetch import ConfigFetchUnit
42 from soc.debug.dmi import CoreDebug, DMIInterface
43 from soc.debug.jtag import JTAG
44 from soc.config.pinouts import get_pinspecs
45 from soc.interrupts.xics import XICS_ICP, XICS_ICS
46 from soc.bus.simple_gpio import SimpleGPIO
47 from soc.bus.SPBlock512W64B8W import SPBlock512W64B8W
48 from soc.clock.select import ClockSelect
49 from soc.clock.dummypll import DummyPLL
50 from openpower.sv.svstate import SVSTATERec
51 from soc.experiment.icache import ICache
52
53 from nmutil.util import rising_edge
54
55
56 def get_insn(f_instr_o, pc):
57 if f_instr_o.width == 32:
58 return f_instr_o
59 else:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o.word_select(pc[2], 32)
62
63 # gets state input or reads from state regfile
64
65
66 def state_get(m, res, core_rst, state_i, name, regfile, regnum):
67 comb = m.d.comb
68 sync = m.d.sync
69 # read the {insert state variable here}
70 res_ok_delay = Signal(name="%s_ok_delay" % name)
71 with m.If(~core_rst):
72 sync += res_ok_delay.eq(~state_i.ok)
73 with m.If(state_i.ok):
74 # incoming override (start from pc_i)
75 comb += res.eq(state_i.data)
76 with m.Else():
77 # otherwise read StateRegs regfile for {insert state here}...
78 comb += regfile.ren.eq(1 << regnum)
79 # ... but on a 1-clock delay
80 with m.If(res_ok_delay):
81 comb += res.eq(regfile.o_data)
82
83
84 def get_predint(m, mask, name):
85 """decode SVP64 predicate integer mask field to reg number and invert
86 this is identical to the equivalent function in ISACaller except that
87 it doesn't read the INT directly, it just decodes "what needs to be done"
88 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
89
90 * all1s is set to indicate that no mask is to be applied.
91 * regread indicates the GPR register number to be read
92 * invert is set to indicate that the register value is to be inverted
93 * unary indicates that the contents of the register is to be shifted 1<<r3
94 """
95 comb = m.d.comb
96 regread = Signal(5, name=name+"regread")
97 invert = Signal(name=name+"invert")
98 unary = Signal(name=name+"unary")
99 all1s = Signal(name=name+"all1s")
100 with m.Switch(mask):
101 with m.Case(SVP64PredInt.ALWAYS.value):
102 comb += all1s.eq(1) # use 0b1111 (all ones)
103 with m.Case(SVP64PredInt.R3_UNARY.value):
104 comb += regread.eq(3)
105 comb += unary.eq(1) # 1<<r3 - shift r3 (single bit)
106 with m.Case(SVP64PredInt.R3.value):
107 comb += regread.eq(3)
108 with m.Case(SVP64PredInt.R3_N.value):
109 comb += regread.eq(3)
110 comb += invert.eq(1)
111 with m.Case(SVP64PredInt.R10.value):
112 comb += regread.eq(10)
113 with m.Case(SVP64PredInt.R10_N.value):
114 comb += regread.eq(10)
115 comb += invert.eq(1)
116 with m.Case(SVP64PredInt.R30.value):
117 comb += regread.eq(30)
118 with m.Case(SVP64PredInt.R30_N.value):
119 comb += regread.eq(30)
120 comb += invert.eq(1)
121 return regread, invert, unary, all1s
122
123
124 def get_predcr(m, mask, name):
125 """decode SVP64 predicate CR to reg number field and invert status
126 this is identical to _get_predcr in ISACaller
127 """
128 comb = m.d.comb
129 idx = Signal(2, name=name+"idx")
130 invert = Signal(name=name+"crinvert")
131 with m.Switch(mask):
132 with m.Case(SVP64PredCR.LT.value):
133 comb += idx.eq(CR.LT)
134 comb += invert.eq(0)
135 with m.Case(SVP64PredCR.GE.value):
136 comb += idx.eq(CR.LT)
137 comb += invert.eq(1)
138 with m.Case(SVP64PredCR.GT.value):
139 comb += idx.eq(CR.GT)
140 comb += invert.eq(0)
141 with m.Case(SVP64PredCR.LE.value):
142 comb += idx.eq(CR.GT)
143 comb += invert.eq(1)
144 with m.Case(SVP64PredCR.EQ.value):
145 comb += idx.eq(CR.EQ)
146 comb += invert.eq(0)
147 with m.Case(SVP64PredCR.NE.value):
148 comb += idx.eq(CR.EQ)
149 comb += invert.eq(1)
150 with m.Case(SVP64PredCR.SO.value):
151 comb += idx.eq(CR.SO)
152 comb += invert.eq(0)
153 with m.Case(SVP64PredCR.NS.value):
154 comb += idx.eq(CR.SO)
155 comb += invert.eq(1)
156 return idx, invert
157
158
159 class TestIssuerBase(Elaboratable):
160 """TestIssuerBase - common base class for Issuers
161
162 takes care of power-on reset, peripherals, debug, DEC/TB,
163 and gets PC/MSR/SVSTATE from the State Regfile etc.
164 """
165
166 def __init__(self, pspec):
167
168 # test is SVP64 is to be enabled
169 self.svp64_en = hasattr(pspec, "svp64") and (pspec.svp64 == True)
170
171 # and if regfiles are reduced
172 self.regreduce_en = (hasattr(pspec, "regreduce") and
173 (pspec.regreduce == True))
174
175 # and if overlap requested
176 self.allow_overlap = (hasattr(pspec, "allow_overlap") and
177 (pspec.allow_overlap == True))
178
179 # and get the core domain
180 self.core_domain = "coresync"
181 if (hasattr(pspec, "core_domain") and
182 isinstance(pspec.core_domain, str)):
183 self.core_domain = pspec.core_domain
184
185 # JTAG interface. add this right at the start because if it's
186 # added it *modifies* the pspec, by adding enable/disable signals
187 # for parts of the rest of the core
188 self.jtag_en = hasattr(pspec, "debug") and pspec.debug == 'jtag'
189 #self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
190 self.dbg_domain = "dbgsync" # domain for DMI/JTAG clock
191 if self.jtag_en:
192 # XXX MUST keep this up-to-date with litex, and
193 # soc-cocotb-sim, and err.. all needs sorting out, argh
194 subset = ['uart',
195 'mtwi',
196 'eint', 'gpio', 'mspi0',
197 # 'mspi1', - disabled for now
198 # 'pwm', 'sd0', - disabled for now
199 'sdr']
200 self.jtag = JTAG(get_pinspecs(subset=subset),
201 domain=self.dbg_domain)
202 # add signals to pspec to enable/disable icache and dcache
203 # (or data and intstruction wishbone if icache/dcache not included)
204 # https://bugs.libre-soc.org/show_bug.cgi?id=520
205 # TODO: do we actually care if these are not domain-synchronised?
206 # honestly probably not.
207 pspec.wb_icache_en = self.jtag.wb_icache_en
208 pspec.wb_dcache_en = self.jtag.wb_dcache_en
209 self.wb_sram_en = self.jtag.wb_sram_en
210 else:
211 self.wb_sram_en = Const(1)
212
213 # add 4k sram blocks?
214 self.sram4x4k = (hasattr(pspec, "sram4x4kblock") and
215 pspec.sram4x4kblock == True)
216 if self.sram4x4k:
217 self.sram4k = []
218 for i in range(4):
219 self.sram4k.append(SPBlock512W64B8W(name="sram4k_%d" % i,
220 # features={'err'}
221 ))
222
223 # add interrupt controller?
224 self.xics = hasattr(pspec, "xics") and pspec.xics == True
225 if self.xics:
226 self.xics_icp = XICS_ICP()
227 self.xics_ics = XICS_ICS()
228 self.int_level_i = self.xics_ics.int_level_i
229
230 # add GPIO peripheral?
231 self.gpio = hasattr(pspec, "gpio") and pspec.gpio == True
232 if self.gpio:
233 self.simple_gpio = SimpleGPIO()
234 self.gpio_o = self.simple_gpio.gpio_o
235
236 # main instruction core. suitable for prototyping / demo only
237 self.core = core = NonProductionCore(pspec)
238 self.core_rst = ResetSignal(self.core_domain)
239
240 # instruction decoder. goes into Trap Record
241 #pdecode = create_pdecode()
242 self.cur_state = CoreState("cur") # current state (MSR/PC/SVSTATE)
243 self.pdecode2 = PowerDecode2(None, state=self.cur_state,
244 opkls=IssuerDecode2ToOperand,
245 svp64_en=self.svp64_en,
246 regreduce_en=self.regreduce_en)
247 pdecode = self.pdecode2.dec
248
249 if self.svp64_en:
250 self.svp64 = SVP64PrefixDecoder() # for decoding SVP64 prefix
251
252 self.update_svstate = Signal() # set this if updating svstate
253 self.new_svstate = new_svstate = SVSTATERec("new_svstate")
254
255 # Test Instruction memory
256 if hasattr(core, "icache"):
257 # XXX BLECH! use pspec to transfer the I-Cache to ConfigFetchUnit
258 # truly dreadful. needs a huge reorg.
259 pspec.icache = core.icache
260 self.imem = ConfigFetchUnit(pspec).fu
261
262 # DMI interface
263 self.dbg = CoreDebug()
264 self.dbg_rst_i = Signal(reset_less=True)
265
266 # instruction go/monitor
267 self.pc_o = Signal(64, reset_less=True)
268 self.pc_i = Data(64, "pc_i") # set "ok" to indicate "please change me"
269 self.msr_i = Data(64, "msr_i") # set "ok" to indicate "please change me"
270 self.svstate_i = Data(64, "svstate_i") # ditto
271 self.core_bigendian_i = Signal() # TODO: set based on MSR.LE
272 self.busy_o = Signal(reset_less=True)
273 self.memerr_o = Signal(reset_less=True)
274
275 # STATE regfile read /write ports for PC, MSR, SVSTATE
276 staterf = self.core.regs.rf['state']
277 self.state_r_msr = staterf.r_ports['msr'] # MSR rd
278 self.state_r_pc = staterf.r_ports['cia'] # PC rd
279 self.state_r_sv = staterf.r_ports['sv'] # SVSTATE rd
280
281 self.state_w_msr = staterf.w_ports['msr'] # MSR wr
282 self.state_w_pc = staterf.w_ports['d_wr1'] # PC wr
283 self.state_w_sv = staterf.w_ports['sv'] # SVSTATE wr
284
285 # DMI interface access
286 intrf = self.core.regs.rf['int']
287 crrf = self.core.regs.rf['cr']
288 xerrf = self.core.regs.rf['xer']
289 self.int_r = intrf.r_ports['dmi'] # INT read
290 self.cr_r = crrf.r_ports['full_cr_dbg'] # CR read
291 self.xer_r = xerrf.r_ports['full_xer'] # XER read
292
293 if self.svp64_en:
294 # for predication
295 self.int_pred = intrf.r_ports['pred'] # INT predicate read
296 self.cr_pred = crrf.r_ports['cr_pred'] # CR predicate read
297
298 # hack method of keeping an eye on whether branch/trap set the PC
299 self.state_nia = self.core.regs.rf['state'].w_ports['nia']
300 self.state_nia.wen.name = 'state_nia_wen'
301
302 # pulse to synchronize the simulator at instruction end
303 self.insn_done = Signal()
304
305 # indicate any instruction still outstanding, in execution
306 self.any_busy = Signal()
307
308 if self.svp64_en:
309 # store copies of predicate masks
310 self.srcmask = Signal(64)
311 self.dstmask = Signal(64)
312
313 def setup_peripherals(self, m):
314 comb, sync = m.d.comb, m.d.sync
315
316 # okaaaay so the debug module must be in coresync clock domain
317 # but NOT its reset signal. to cope with this, set every single
318 # submodule explicitly in coresync domain, debug and JTAG
319 # in their own one but using *external* reset.
320 csd = DomainRenamer(self.core_domain)
321 dbd = DomainRenamer(self.dbg_domain)
322
323 m.submodules.core = core = csd(self.core)
324 # this _so_ needs sorting out. ICache is added down inside
325 # LoadStore1 and is already a submodule of LoadStore1
326 if not isinstance(self.imem, ICache):
327 m.submodules.imem = imem = csd(self.imem)
328 m.submodules.dbg = dbg = dbd(self.dbg)
329 if self.jtag_en:
330 m.submodules.jtag = jtag = dbd(self.jtag)
331 # TODO: UART2GDB mux, here, from external pin
332 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
333 sync += dbg.dmi.connect_to(jtag.dmi)
334
335 cur_state = self.cur_state
336
337 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
338 if self.sram4x4k:
339 for i, sram in enumerate(self.sram4k):
340 m.submodules["sram4k_%d" % i] = csd(sram)
341 comb += sram.enable.eq(self.wb_sram_en)
342
343 # XICS interrupt handler
344 if self.xics:
345 m.submodules.xics_icp = icp = csd(self.xics_icp)
346 m.submodules.xics_ics = ics = csd(self.xics_ics)
347 comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
348 sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
349
350 # GPIO test peripheral
351 if self.gpio:
352 m.submodules.simple_gpio = simple_gpio = csd(self.simple_gpio)
353
354 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
355 # XXX causes litex ECP5 test to get wrong idea about input and output
356 # (but works with verilator sim *sigh*)
357 # if self.gpio and self.xics:
358 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
359
360 # instruction decoder
361 pdecode = create_pdecode()
362 m.submodules.dec2 = pdecode2 = csd(self.pdecode2)
363 if self.svp64_en:
364 m.submodules.svp64 = svp64 = csd(self.svp64)
365
366 # convenience
367 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
368 intrf = self.core.regs.rf['int']
369
370 # clock delay power-on reset
371 cd_por = ClockDomain(reset_less=True)
372 cd_sync = ClockDomain()
373 m.domains += cd_por, cd_sync
374 core_sync = ClockDomain(self.core_domain)
375 if self.core_domain != "sync":
376 m.domains += core_sync
377 if self.dbg_domain != "sync":
378 dbg_sync = ClockDomain(self.dbg_domain)
379 m.domains += dbg_sync
380
381 ti_rst = Signal(reset_less=True)
382 delay = Signal(range(4), reset=3)
383 with m.If(delay != 0):
384 m.d.por += delay.eq(delay - 1)
385 comb += cd_por.clk.eq(ClockSignal())
386
387 # power-on reset delay
388 core_rst = ResetSignal(self.core_domain)
389 if self.core_domain != "sync":
390 comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
391 comb += core_rst.eq(ti_rst)
392 else:
393 with m.If(delay != 0 | dbg.core_rst_o):
394 comb += core_rst.eq(1)
395
396 # connect external reset signal to DMI Reset
397 if self.dbg_domain != "sync":
398 dbg_rst = ResetSignal(self.dbg_domain)
399 comb += dbg_rst.eq(self.dbg_rst_i)
400
401 # busy/halted signals from core
402 core_busy_o = ~core.p.o_ready | core.n.o_data.busy_o # core is busy
403 comb += self.busy_o.eq(core_busy_o)
404 comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
405
406 # temporary hack: says "go" immediately for both address gen and ST
407 l0 = core.l0
408 ldst = core.fus.fus['ldst0']
409 st_go_edge = rising_edge(m, ldst.st.rel_o)
410 # link addr-go direct to rel
411 m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o)
412 m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
413
414 def do_dmi(self, m, dbg):
415 """deals with DMI debug requests
416
417 currently only provides read requests for the INT regfile, CR and XER
418 it will later also deal with *writing* to these regfiles.
419 """
420 comb = m.d.comb
421 sync = m.d.sync
422 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
423 intrf = self.core.regs.rf['int']
424
425 with m.If(d_reg.req): # request for regfile access being made
426 # TODO: error-check this
427 # XXX should this be combinatorial? sync better?
428 if intrf.unary:
429 comb += self.int_r.ren.eq(1 << d_reg.addr)
430 else:
431 comb += self.int_r.addr.eq(d_reg.addr)
432 comb += self.int_r.ren.eq(1)
433 d_reg_delay = Signal()
434 sync += d_reg_delay.eq(d_reg.req)
435 with m.If(d_reg_delay):
436 # data arrives one clock later
437 comb += d_reg.data.eq(self.int_r.o_data)
438 comb += d_reg.ack.eq(1)
439
440 # sigh same thing for CR debug
441 with m.If(d_cr.req): # request for regfile access being made
442 comb += self.cr_r.ren.eq(0b11111111) # enable all
443 d_cr_delay = Signal()
444 sync += d_cr_delay.eq(d_cr.req)
445 with m.If(d_cr_delay):
446 # data arrives one clock later
447 comb += d_cr.data.eq(self.cr_r.o_data)
448 comb += d_cr.ack.eq(1)
449
450 # aaand XER...
451 with m.If(d_xer.req): # request for regfile access being made
452 comb += self.xer_r.ren.eq(0b111111) # enable all
453 d_xer_delay = Signal()
454 sync += d_xer_delay.eq(d_xer.req)
455 with m.If(d_xer_delay):
456 # data arrives one clock later
457 comb += d_xer.data.eq(self.xer_r.o_data)
458 comb += d_xer.ack.eq(1)
459
460 def tb_dec_fsm(self, m, spr_dec):
461 """tb_dec_fsm
462
463 this is a FSM for updating either dec or tb. it runs alternately
464 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
465 value to DEC, however the regfile has "passthrough" on it so this
466 *should* be ok.
467
468 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
469 """
470
471 comb, sync = m.d.comb, m.d.sync
472 fast_rf = self.core.regs.rf['fast']
473 fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
474 fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
475
476 with m.FSM() as fsm:
477
478 # initiates read of current DEC
479 with m.State("DEC_READ"):
480 comb += fast_r_dectb.addr.eq(FastRegs.DEC)
481 comb += fast_r_dectb.ren.eq(1)
482 m.next = "DEC_WRITE"
483
484 # waits for DEC read to arrive (1 cycle), updates with new value
485 with m.State("DEC_WRITE"):
486 new_dec = Signal(64)
487 # TODO: MSR.LPCR 32-bit decrement mode
488 comb += new_dec.eq(fast_r_dectb.o_data - 1)
489 comb += fast_w_dectb.addr.eq(FastRegs.DEC)
490 comb += fast_w_dectb.wen.eq(1)
491 comb += fast_w_dectb.i_data.eq(new_dec)
492 sync += spr_dec.eq(new_dec) # copy into cur_state for decoder
493 m.next = "TB_READ"
494
495 # initiates read of current TB
496 with m.State("TB_READ"):
497 comb += fast_r_dectb.addr.eq(FastRegs.TB)
498 comb += fast_r_dectb.ren.eq(1)
499 m.next = "TB_WRITE"
500
501 # waits for read TB to arrive, initiates write of current TB
502 with m.State("TB_WRITE"):
503 new_tb = Signal(64)
504 comb += new_tb.eq(fast_r_dectb.o_data + 1)
505 comb += fast_w_dectb.addr.eq(FastRegs.TB)
506 comb += fast_w_dectb.wen.eq(1)
507 comb += fast_w_dectb.i_data.eq(new_tb)
508 m.next = "DEC_READ"
509
510 return m
511
512 def elaborate(self, platform):
513 m = Module()
514 # convenience
515 comb, sync = m.d.comb, m.d.sync
516 cur_state = self.cur_state
517 pdecode2 = self.pdecode2
518 dbg = self.dbg
519
520 # set up peripherals and core
521 core_rst = self.core_rst
522 self.setup_peripherals(m)
523
524 # reset current state if core reset requested
525 with m.If(core_rst):
526 m.d.sync += self.cur_state.eq(0)
527
528 # check halted condition: requested PC to execute matches DMI stop addr
529 # and immediately stop. address of 0xffff_ffff_ffff_ffff can never
530 # match
531 halted = Signal()
532 comb += halted.eq(dbg.stop_addr_o == dbg.state.pc)
533 with m.If(halted):
534 comb += dbg.core_stopped_i.eq(1)
535 comb += dbg.terminate_i.eq(1)
536
537 # PC and instruction from I-Memory
538 comb += self.pc_o.eq(cur_state.pc)
539 self.pc_changed = Signal() # note write to PC
540 self.msr_changed = Signal() # note write to MSR
541 self.sv_changed = Signal() # note write to SVSTATE
542
543 # read state either from incoming override or from regfile
544 state = CoreState("get") # current state (MSR/PC/SVSTATE)
545 state_get(m, state.msr, core_rst, self.msr_i,
546 "msr", # read MSR
547 self.state_r_msr, StateRegs.MSR)
548 state_get(m, state.pc, core_rst, self.pc_i,
549 "pc", # read PC
550 self.state_r_pc, StateRegs.PC)
551 state_get(m, state.svstate, core_rst, self.svstate_i,
552 "svstate", # read SVSTATE
553 self.state_r_sv, StateRegs.SVSTATE)
554
555 # don't write pc every cycle
556 comb += self.state_w_pc.wen.eq(0)
557 comb += self.state_w_pc.i_data.eq(0)
558
559 # connect up debug state. note "combinatorially same" below,
560 # this is a bit naff, passing state over in the dbg class, but
561 # because it is combinatorial it achieves the desired goal
562 comb += dbg.state.eq(state)
563
564 # this bit doesn't have to be in the FSM: connect up to read
565 # regfiles on demand from DMI
566 self.do_dmi(m, dbg)
567
568 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
569 # (which uses that in PowerDecoder2 to raise 0x900 exception)
570 self.tb_dec_fsm(m, cur_state.dec)
571
572 # while stopped, allow updating the MSR, PC and SVSTATE.
573 # these are mainly for debugging purposes (including DMI/JTAG)
574 with m.If(dbg.core_stopped_i):
575 with m.If(self.pc_i.ok):
576 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
577 comb += self.state_w_pc.i_data.eq(self.pc_i.data)
578 sync += self.pc_changed.eq(1)
579 with m.If(self.msr_i.ok):
580 comb += self.state_w_msr.wen.eq(1 << StateRegs.MSR)
581 comb += self.state_w_msr.i_data.eq(self.msr_i.data)
582 sync += self.msr_changed.eq(1)
583 with m.If(self.svstate_i.ok | self.update_svstate):
584 with m.If(self.svstate_i.ok): # over-ride from external source
585 comb += self.new_svstate.eq(self.svstate_i.data)
586 comb += self.state_w_sv.wen.eq(1 << StateRegs.SVSTATE)
587 comb += self.state_w_sv.i_data.eq(self.new_svstate)
588 sync += self.sv_changed.eq(1)
589
590 return m
591
592 def __iter__(self):
593 yield from self.pc_i.ports()
594 yield from self.msr_i.ports()
595 yield self.pc_o
596 yield self.memerr_o
597 yield from self.core.ports()
598 yield from self.imem.ports()
599 yield self.core_bigendian_i
600 yield self.busy_o
601
602 def ports(self):
603 return list(self)
604
605 def external_ports(self):
606 ports = self.pc_i.ports()
607 ports = self.msr_i.ports()
608 ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
609 ]
610
611 if self.jtag_en:
612 ports += list(self.jtag.external_ports())
613 else:
614 # don't add DMI if JTAG is enabled
615 ports += list(self.dbg.dmi.ports())
616
617 ports += list(self.imem.ibus.fields.values())
618 ports += list(self.core.l0.cmpi.wb_bus().fields.values())
619
620 if self.sram4x4k:
621 for sram in self.sram4k:
622 ports += list(sram.bus.fields.values())
623
624 if self.xics:
625 ports += list(self.xics_icp.bus.fields.values())
626 ports += list(self.xics_ics.bus.fields.values())
627 ports.append(self.int_level_i)
628
629 if self.gpio:
630 ports += list(self.simple_gpio.bus.fields.values())
631 ports.append(self.gpio_o)
632
633 return ports
634
635 def ports(self):
636 return list(self)
637
638
639
640 # Fetch Finite State Machine.
641 # WARNING: there are currently DriverConflicts but it's actually working.
642 # TODO, here: everything that is global in nature, information from the
643 # main TestIssuerInternal, needs to move to either ispec() or ospec().
644 # not only that: TestIssuerInternal.imem can entirely move into here
645 # because imem is only ever accessed inside the FetchFSM.
646 class FetchFSM(ControlBase):
647 def __init__(self, allow_overlap, svp64_en, imem, core_rst,
648 pdecode2, cur_state,
649 dbg, core, svstate, nia, is_svp64_mode):
650 self.allow_overlap = allow_overlap
651 self.svp64_en = svp64_en
652 self.imem = imem
653 self.core_rst = core_rst
654 self.pdecode2 = pdecode2
655 self.cur_state = cur_state
656 self.dbg = dbg
657 self.core = core
658 self.svstate = svstate
659 self.nia = nia
660 self.is_svp64_mode = is_svp64_mode
661
662 # set up pipeline ControlBase and allocate i/o specs
663 # (unusual: normally done by the Pipeline API)
664 super().__init__(stage=self)
665 self.p.i_data, self.n.o_data = self.new_specs(None)
666 self.i, self.o = self.p.i_data, self.n.o_data
667
668 # next 3 functions are Stage API Compliance
669 def setup(self, m, i):
670 pass
671
672 def ispec(self):
673 return FetchInput()
674
675 def ospec(self):
676 return FetchOutput()
677
678 def elaborate(self, platform):
679 """fetch FSM
680
681 this FSM performs fetch of raw instruction data, partial-decodes
682 it 32-bit at a time to detect SVP64 prefixes, and will optionally
683 read a 2nd 32-bit quantity if that occurs.
684 """
685 m = super().elaborate(platform)
686
687 dbg = self.dbg
688 core = self.core
689 pc = self.i.pc
690 msr = self.i.msr
691 svstate = self.svstate
692 nia = self.nia
693 is_svp64_mode = self.is_svp64_mode
694 fetch_pc_o_ready = self.p.o_ready
695 fetch_pc_i_valid = self.p.i_valid
696 fetch_insn_o_valid = self.n.o_valid
697 fetch_insn_i_ready = self.n.i_ready
698
699 comb = m.d.comb
700 sync = m.d.sync
701 pdecode2 = self.pdecode2
702 cur_state = self.cur_state
703 dec_opcode_o = pdecode2.dec.raw_opcode_in # raw opcode
704
705 # also note instruction fetch failed
706 if hasattr(core, "icache"):
707 fetch_failed = core.icache.i_out.fetch_failed
708 flush_needed = True
709 else:
710 fetch_failed = Const(0, 1)
711 flush_needed = False
712
713 # set priv / virt mode on I-Cache, sigh
714 if isinstance(self.imem, ICache):
715 comb += self.imem.i_in.priv_mode.eq(~msr[MSR.PR])
716 comb += self.imem.i_in.virt_mode.eq(msr[MSR.IR]) # Instr. Redir (VM)
717
718 with m.FSM(name='fetch_fsm'):
719
720 # waiting (zzz)
721 with m.State("IDLE"):
722 # fetch allowed if not failed and stopped but not stepping
723 # (see dmi.py for how core_stop_o is generated)
724 with m.If(~fetch_failed & ~dbg.core_stop_o):
725 comb += fetch_pc_o_ready.eq(1)
726 with m.If(fetch_pc_i_valid & ~pdecode2.instr_fault
727 & ~dbg.core_stop_o):
728 # instruction allowed to go: start by reading the PC
729 # capture the PC and also drop it into Insn Memory
730 # we have joined a pair of combinatorial memory
731 # lookups together. this is Generally Bad.
732 comb += self.imem.a_pc_i.eq(pc)
733 comb += self.imem.a_i_valid.eq(1)
734 comb += self.imem.f_i_valid.eq(1)
735 # transfer state to output
736 sync += cur_state.pc.eq(pc)
737 sync += cur_state.svstate.eq(svstate) # and svstate
738 sync += cur_state.msr.eq(msr) # and msr
739
740 m.next = "INSN_READ" # move to "wait for bus" phase
741
742 # dummy pause to find out why simulation is not keeping up
743 with m.State("INSN_READ"):
744 # when using "single-step" mode, checking dbg.stopping_o
745 # prevents progress. allow fetch to proceed once started
746 stopping = Const(0)
747 #if self.allow_overlap:
748 # stopping = dbg.stopping_o
749 with m.If(stopping):
750 # stopping: jump back to idle
751 m.next = "IDLE"
752 with m.Else():
753 with m.If(self.imem.f_busy_o &
754 ~pdecode2.instr_fault): # zzz...
755 # busy but not fetch failed: stay in wait-read
756 comb += self.imem.a_pc_i.eq(pc)
757 comb += self.imem.a_i_valid.eq(1)
758 comb += self.imem.f_i_valid.eq(1)
759 with m.Else():
760 # not busy (or fetch failed!): instruction fetched
761 # when fetch failed, the instruction gets ignored
762 # by the decoder
763 if hasattr(core, "icache"):
764 # blech, icache returns actual instruction
765 insn = self.imem.f_instr_o
766 else:
767 # but these return raw memory
768 insn = get_insn(self.imem.f_instr_o, cur_state.pc)
769 if self.svp64_en:
770 svp64 = self.svp64
771 # decode the SVP64 prefix, if any
772 comb += svp64.raw_opcode_in.eq(insn)
773 comb += svp64.bigendian.eq(self.core_bigendian_i)
774 # pass the decoded prefix (if any) to PowerDecoder2
775 sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
776 sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
777 # remember whether this is a prefixed instruction,
778 # so the FSM can readily loop when VL==0
779 sync += is_svp64_mode.eq(svp64.is_svp64_mode)
780 # calculate the address of the following instruction
781 insn_size = Mux(svp64.is_svp64_mode, 8, 4)
782 sync += nia.eq(cur_state.pc + insn_size)
783 with m.If(~svp64.is_svp64_mode):
784 # with no prefix, store the instruction
785 # and hand it directly to the next FSM
786 sync += dec_opcode_o.eq(insn)
787 m.next = "INSN_READY"
788 with m.Else():
789 # fetch the rest of the instruction from memory
790 comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
791 comb += self.imem.a_i_valid.eq(1)
792 comb += self.imem.f_i_valid.eq(1)
793 m.next = "INSN_READ2"
794 else:
795 # not SVP64 - 32-bit only
796 sync += nia.eq(cur_state.pc + 4)
797 sync += dec_opcode_o.eq(insn)
798 m.next = "INSN_READY"
799
800 with m.State("INSN_READ2"):
801 with m.If(self.imem.f_busy_o): # zzz...
802 # busy: stay in wait-read
803 comb += self.imem.a_i_valid.eq(1)
804 comb += self.imem.f_i_valid.eq(1)
805 with m.Else():
806 # not busy: instruction fetched
807 if hasattr(core, "icache"):
808 # blech, icache returns actual instruction
809 insn = self.imem.f_instr_o
810 else:
811 insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
812 sync += dec_opcode_o.eq(insn)
813 m.next = "INSN_READY"
814 # TODO: probably can start looking at pdecode2.rm_dec
815 # here or maybe even in INSN_READ state, if svp64_mode
816 # detected, in order to trigger - and wait for - the
817 # predicate reading.
818 if self.svp64_en:
819 pmode = pdecode2.rm_dec.predmode
820 """
821 if pmode != SVP64PredMode.ALWAYS.value:
822 fire predicate loading FSM and wait before
823 moving to INSN_READY
824 else:
825 sync += self.srcmask.eq(-1) # set to all 1s
826 sync += self.dstmask.eq(-1) # set to all 1s
827 m.next = "INSN_READY"
828 """
829
830 with m.State("INSN_READY"):
831 # hand over the instruction, to be decoded
832 comb += fetch_insn_o_valid.eq(1)
833 with m.If(fetch_insn_i_ready):
834 m.next = "IDLE"
835
836 # whatever was done above, over-ride it if core reset is held
837 with m.If(self.core_rst):
838 sync += nia.eq(0)
839
840 return m
841
842
843 class TestIssuerInternal(TestIssuerBase):
844 """TestIssuer - reads instructions from TestMemory and issues them
845
846 efficiency and speed is not the main goal here: functional correctness
847 and code clarity is. optimisations (which almost 100% interfere with
848 easy understanding) come later.
849 """
850
851 def fetch_predicate_fsm(self, m,
852 pred_insn_i_valid, pred_insn_o_ready,
853 pred_mask_o_valid, pred_mask_i_ready):
854 """fetch_predicate_fsm - obtains (constructs in the case of CR)
855 src/dest predicate masks
856
857 https://bugs.libre-soc.org/show_bug.cgi?id=617
858 the predicates can be read here, by using IntRegs r_ports['pred']
859 or CRRegs r_ports['pred']. in the case of CRs it will have to
860 be done through multiple reads, extracting one relevant at a time.
861 later, a faster way would be to use the 32-bit-wide CR port but
862 this is more complex decoding, here. equivalent code used in
863 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
864
865 note: this ENTIRE FSM is not to be called when svp64 is disabled
866 """
867 comb = m.d.comb
868 sync = m.d.sync
869 pdecode2 = self.pdecode2
870 rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
871 predmode = rm_dec.predmode
872 srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
873 cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
874 # get src/dst step, so we can skip already used mask bits
875 cur_state = self.cur_state
876 srcstep = cur_state.svstate.srcstep
877 dststep = cur_state.svstate.dststep
878 cur_vl = cur_state.svstate.vl
879
880 # decode predicates
881 sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
882 dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
883 sidx, scrinvert = get_predcr(m, srcpred, 's')
884 didx, dcrinvert = get_predcr(m, dstpred, 'd')
885
886 # store fetched masks, for either intpred or crpred
887 # when src/dst step is not zero, the skipped mask bits need to be
888 # shifted-out, before actually storing them in src/dest mask
889 new_srcmask = Signal(64, reset_less=True)
890 new_dstmask = Signal(64, reset_less=True)
891
892 with m.FSM(name="fetch_predicate"):
893
894 with m.State("FETCH_PRED_IDLE"):
895 comb += pred_insn_o_ready.eq(1)
896 with m.If(pred_insn_i_valid):
897 with m.If(predmode == SVP64PredMode.INT):
898 # skip fetching destination mask register, when zero
899 with m.If(dall1s):
900 sync += new_dstmask.eq(-1)
901 # directly go to fetch source mask register
902 # guaranteed not to be zero (otherwise predmode
903 # would be SVP64PredMode.ALWAYS, not INT)
904 comb += int_pred.addr.eq(sregread)
905 comb += int_pred.ren.eq(1)
906 m.next = "INT_SRC_READ"
907 # fetch destination predicate register
908 with m.Else():
909 comb += int_pred.addr.eq(dregread)
910 comb += int_pred.ren.eq(1)
911 m.next = "INT_DST_READ"
912 with m.Elif(predmode == SVP64PredMode.CR):
913 # go fetch masks from the CR register file
914 sync += new_srcmask.eq(0)
915 sync += new_dstmask.eq(0)
916 m.next = "CR_READ"
917 with m.Else():
918 sync += self.srcmask.eq(-1)
919 sync += self.dstmask.eq(-1)
920 m.next = "FETCH_PRED_DONE"
921
922 with m.State("INT_DST_READ"):
923 # store destination mask
924 inv = Repl(dinvert, 64)
925 with m.If(dunary):
926 # set selected mask bit for 1<<r3 mode
927 dst_shift = Signal(range(64))
928 comb += dst_shift.eq(self.int_pred.o_data & 0b111111)
929 sync += new_dstmask.eq(1 << dst_shift)
930 with m.Else():
931 # invert mask if requested
932 sync += new_dstmask.eq(self.int_pred.o_data ^ inv)
933 # skip fetching source mask register, when zero
934 with m.If(sall1s):
935 sync += new_srcmask.eq(-1)
936 m.next = "FETCH_PRED_SHIFT_MASK"
937 # fetch source predicate register
938 with m.Else():
939 comb += int_pred.addr.eq(sregread)
940 comb += int_pred.ren.eq(1)
941 m.next = "INT_SRC_READ"
942
943 with m.State("INT_SRC_READ"):
944 # store source mask
945 inv = Repl(sinvert, 64)
946 with m.If(sunary):
947 # set selected mask bit for 1<<r3 mode
948 src_shift = Signal(range(64))
949 comb += src_shift.eq(self.int_pred.o_data & 0b111111)
950 sync += new_srcmask.eq(1 << src_shift)
951 with m.Else():
952 # invert mask if requested
953 sync += new_srcmask.eq(self.int_pred.o_data ^ inv)
954 m.next = "FETCH_PRED_SHIFT_MASK"
955
956 # fetch masks from the CR register file
957 # implements the following loop:
958 # idx, inv = get_predcr(mask)
959 # mask = 0
960 # for cr_idx in range(vl):
961 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
962 # if cr[idx] ^ inv:
963 # mask |= 1 << cr_idx
964 # return mask
965 with m.State("CR_READ"):
966 # CR index to be read, which will be ready by the next cycle
967 cr_idx = Signal.like(cur_vl, reset_less=True)
968 # submit the read operation to the regfile
969 with m.If(cr_idx != cur_vl):
970 # the CR read port is unary ...
971 # ren = 1 << cr_idx
972 # ... in MSB0 convention ...
973 # ren = 1 << (7 - cr_idx)
974 # ... and with an offset:
975 # ren = 1 << (7 - off - cr_idx)
976 idx = SVP64CROffs.CRPred + cr_idx
977 comb += cr_pred.ren.eq(1 << (7 - idx))
978 # signal data valid in the next cycle
979 cr_read = Signal(reset_less=True)
980 sync += cr_read.eq(1)
981 # load the next index
982 sync += cr_idx.eq(cr_idx + 1)
983 with m.Else():
984 # exit on loop end
985 sync += cr_read.eq(0)
986 sync += cr_idx.eq(0)
987 m.next = "FETCH_PRED_SHIFT_MASK"
988 with m.If(cr_read):
989 # compensate for the one cycle delay on the regfile
990 cur_cr_idx = Signal.like(cur_vl)
991 comb += cur_cr_idx.eq(cr_idx - 1)
992 # read the CR field, select the appropriate bit
993 cr_field = Signal(4)
994 scr_bit = Signal()
995 dcr_bit = Signal()
996 comb += cr_field.eq(cr_pred.o_data)
997 comb += scr_bit.eq(cr_field.bit_select(sidx, 1)
998 ^ scrinvert)
999 comb += dcr_bit.eq(cr_field.bit_select(didx, 1)
1000 ^ dcrinvert)
1001 # set the corresponding mask bit
1002 bit_to_set = Signal.like(self.srcmask)
1003 comb += bit_to_set.eq(1 << cur_cr_idx)
1004 with m.If(scr_bit):
1005 sync += new_srcmask.eq(new_srcmask | bit_to_set)
1006 with m.If(dcr_bit):
1007 sync += new_dstmask.eq(new_dstmask | bit_to_set)
1008
1009 with m.State("FETCH_PRED_SHIFT_MASK"):
1010 # shift-out skipped mask bits
1011 sync += self.srcmask.eq(new_srcmask >> srcstep)
1012 sync += self.dstmask.eq(new_dstmask >> dststep)
1013 m.next = "FETCH_PRED_DONE"
1014
1015 with m.State("FETCH_PRED_DONE"):
1016 comb += pred_mask_o_valid.eq(1)
1017 with m.If(pred_mask_i_ready):
1018 m.next = "FETCH_PRED_IDLE"
1019
1020 def issue_fsm(self, m, core, nia,
1021 dbg, core_rst, is_svp64_mode,
1022 fetch_pc_o_ready, fetch_pc_i_valid,
1023 fetch_insn_o_valid, fetch_insn_i_ready,
1024 pred_insn_i_valid, pred_insn_o_ready,
1025 pred_mask_o_valid, pred_mask_i_ready,
1026 exec_insn_i_valid, exec_insn_o_ready,
1027 exec_pc_o_valid, exec_pc_i_ready):
1028 """issue FSM
1029
1030 decode / issue FSM. this interacts with the "fetch" FSM
1031 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
1032 (outgoing). also interacts with the "execute" FSM
1033 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
1034 (incoming).
1035 SVP64 RM prefixes have already been set up by the
1036 "fetch" phase, so execute is fairly straightforward.
1037 """
1038
1039 comb = m.d.comb
1040 sync = m.d.sync
1041 pdecode2 = self.pdecode2
1042 cur_state = self.cur_state
1043 new_svstate = self.new_svstate
1044
1045 # temporaries
1046 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
1047
1048 # for updating svstate (things like srcstep etc.)
1049 comb += new_svstate.eq(cur_state.svstate)
1050
1051 # precalculate srcstep+1 and dststep+1
1052 cur_srcstep = cur_state.svstate.srcstep
1053 cur_dststep = cur_state.svstate.dststep
1054 next_srcstep = Signal.like(cur_srcstep)
1055 next_dststep = Signal.like(cur_dststep)
1056 comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
1057 comb += next_dststep.eq(cur_state.svstate.dststep+1)
1058
1059 # note if an exception happened. in a pipelined or OoO design
1060 # this needs to be accompanied by "shadowing" (or stalling)
1061 exc_happened = self.core.o.exc_happened
1062 # also note instruction fetch failed
1063 if hasattr(core, "icache"):
1064 fetch_failed = core.icache.i_out.fetch_failed
1065 flush_needed = True
1066 # set to fault in decoder
1067 # update (highest priority) instruction fault
1068 rising_fetch_failed = rising_edge(m, fetch_failed)
1069 with m.If(rising_fetch_failed):
1070 sync += pdecode2.instr_fault.eq(1)
1071 else:
1072 fetch_failed = Const(0, 1)
1073 flush_needed = False
1074
1075 with m.FSM(name="issue_fsm"):
1076
1077 # sync with the "fetch" phase which is reading the instruction
1078 # at this point, there is no instruction running, that
1079 # could inadvertently update the PC.
1080 with m.State("ISSUE_START"):
1081 # reset instruction fault
1082 sync += pdecode2.instr_fault.eq(0)
1083 # wait on "core stop" release, before next fetch
1084 # need to do this here, in case we are in a VL==0 loop
1085 with m.If(~dbg.core_stop_o & ~core_rst):
1086 comb += fetch_pc_i_valid.eq(1) # tell fetch to start
1087 with m.If(fetch_pc_o_ready): # fetch acknowledged us
1088 m.next = "INSN_WAIT"
1089 with m.Else():
1090 # tell core it's stopped, and acknowledge debug handshake
1091 comb += dbg.core_stopped_i.eq(1)
1092 # while stopped, allow updating SVSTATE
1093 with m.If(self.svstate_i.ok):
1094 comb += new_svstate.eq(self.svstate_i.data)
1095 comb += self.update_svstate.eq(1)
1096 sync += self.sv_changed.eq(1)
1097
1098 # wait for an instruction to arrive from Fetch
1099 with m.State("INSN_WAIT"):
1100 # when using "single-step" mode, checking dbg.stopping_o
1101 # prevents progress. allow issue to proceed once started
1102 stopping = Const(0)
1103 #if self.allow_overlap:
1104 # stopping = dbg.stopping_o
1105 with m.If(stopping):
1106 # stopping: jump back to idle
1107 m.next = "ISSUE_START"
1108 if flush_needed:
1109 # request the icache to stop asserting "failed"
1110 comb += core.icache.flush_in.eq(1)
1111 # stop instruction fault
1112 sync += pdecode2.instr_fault.eq(0)
1113 with m.Else():
1114 comb += fetch_insn_i_ready.eq(1)
1115 with m.If(fetch_insn_o_valid):
1116 # loop into ISSUE_START if it's a SVP64 instruction
1117 # and VL == 0. this because VL==0 is a for-loop
1118 # from 0 to 0 i.e. always, always a NOP.
1119 cur_vl = cur_state.svstate.vl
1120 with m.If(is_svp64_mode & (cur_vl == 0)):
1121 # update the PC before fetching the next instruction
1122 # since we are in a VL==0 loop, no instruction was
1123 # executed that we could be overwriting
1124 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1125 comb += self.state_w_pc.i_data.eq(nia)
1126 comb += self.insn_done.eq(1)
1127 m.next = "ISSUE_START"
1128 with m.Else():
1129 if self.svp64_en:
1130 m.next = "PRED_START" # fetching predicate
1131 else:
1132 m.next = "DECODE_SV" # skip predication
1133
1134 with m.State("PRED_START"):
1135 comb += pred_insn_i_valid.eq(1) # tell fetch_pred to start
1136 with m.If(pred_insn_o_ready): # fetch_pred acknowledged us
1137 m.next = "MASK_WAIT"
1138
1139 with m.State("MASK_WAIT"):
1140 comb += pred_mask_i_ready.eq(1) # ready to receive the masks
1141 with m.If(pred_mask_o_valid): # predication masks are ready
1142 m.next = "PRED_SKIP"
1143
1144 # skip zeros in predicate
1145 with m.State("PRED_SKIP"):
1146 with m.If(~is_svp64_mode):
1147 m.next = "DECODE_SV" # nothing to do
1148 with m.Else():
1149 if self.svp64_en:
1150 pred_src_zero = pdecode2.rm_dec.pred_sz
1151 pred_dst_zero = pdecode2.rm_dec.pred_dz
1152
1153 # new srcstep, after skipping zeros
1154 skip_srcstep = Signal.like(cur_srcstep)
1155 # value to be added to the current srcstep
1156 src_delta = Signal.like(cur_srcstep)
1157 # add leading zeros to srcstep, if not in zero mode
1158 with m.If(~pred_src_zero):
1159 # priority encoder (count leading zeros)
1160 # append guard bit, in case the mask is all zeros
1161 pri_enc_src = PriorityEncoder(65)
1162 m.submodules.pri_enc_src = pri_enc_src
1163 comb += pri_enc_src.i.eq(Cat(self.srcmask,
1164 Const(1, 1)))
1165 comb += src_delta.eq(pri_enc_src.o)
1166 # apply delta to srcstep
1167 comb += skip_srcstep.eq(cur_srcstep + src_delta)
1168 # shift-out all leading zeros from the mask
1169 # plus the leading "one" bit
1170 # TODO count leading zeros and shift-out the zero
1171 # bits, in the same step, in hardware
1172 sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
1173
1174 # same as above, but for dststep
1175 skip_dststep = Signal.like(cur_dststep)
1176 dst_delta = Signal.like(cur_dststep)
1177 with m.If(~pred_dst_zero):
1178 pri_enc_dst = PriorityEncoder(65)
1179 m.submodules.pri_enc_dst = pri_enc_dst
1180 comb += pri_enc_dst.i.eq(Cat(self.dstmask,
1181 Const(1, 1)))
1182 comb += dst_delta.eq(pri_enc_dst.o)
1183 comb += skip_dststep.eq(cur_dststep + dst_delta)
1184 sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
1185
1186 # TODO: initialize mask[VL]=1 to avoid passing past VL
1187 with m.If((skip_srcstep >= cur_vl) |
1188 (skip_dststep >= cur_vl)):
1189 # end of VL loop. Update PC and reset src/dst step
1190 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1191 comb += self.state_w_pc.i_data.eq(nia)
1192 comb += new_svstate.srcstep.eq(0)
1193 comb += new_svstate.dststep.eq(0)
1194 comb += self.update_svstate.eq(1)
1195 # synchronize with the simulator
1196 comb += self.insn_done.eq(1)
1197 # go back to Issue
1198 m.next = "ISSUE_START"
1199 with m.Else():
1200 # update new src/dst step
1201 comb += new_svstate.srcstep.eq(skip_srcstep)
1202 comb += new_svstate.dststep.eq(skip_dststep)
1203 comb += self.update_svstate.eq(1)
1204 # proceed to Decode
1205 m.next = "DECODE_SV"
1206
1207 # pass predicate mask bits through to satellite decoders
1208 # TODO: for SIMD this will be *multiple* bits
1209 sync += core.i.sv_pred_sm.eq(self.srcmask[0])
1210 sync += core.i.sv_pred_dm.eq(self.dstmask[0])
1211
1212 # after src/dst step have been updated, we are ready
1213 # to decode the instruction
1214 with m.State("DECODE_SV"):
1215 # decode the instruction
1216 with m.If(~fetch_failed):
1217 sync += pdecode2.instr_fault.eq(0)
1218 sync += core.i.e.eq(pdecode2.e)
1219 sync += core.i.state.eq(cur_state)
1220 sync += core.i.raw_insn_i.eq(dec_opcode_i)
1221 sync += core.i.bigendian_i.eq(self.core_bigendian_i)
1222 if self.svp64_en:
1223 sync += core.i.sv_rm.eq(pdecode2.sv_rm)
1224 # set RA_OR_ZERO detection in satellite decoders
1225 sync += core.i.sv_a_nz.eq(pdecode2.sv_a_nz)
1226 # and svp64 detection
1227 sync += core.i.is_svp64_mode.eq(is_svp64_mode)
1228 # and svp64 bit-rev'd ldst mode
1229 ldst_dec = pdecode2.use_svp64_ldst_dec
1230 sync += core.i.use_svp64_ldst_dec.eq(ldst_dec)
1231 # after decoding, reset any previous exception condition,
1232 # allowing it to be set again during the next execution
1233 sync += pdecode2.ldst_exc.eq(0)
1234
1235 m.next = "INSN_EXECUTE" # move to "execute"
1236
1237 # handshake with execution FSM, move to "wait" once acknowledged
1238 with m.State("INSN_EXECUTE"):
1239 # when using "single-step" mode, checking dbg.stopping_o
1240 # prevents progress. allow execute to proceed once started
1241 stopping = Const(0)
1242 #if self.allow_overlap:
1243 # stopping = dbg.stopping_o
1244 with m.If(stopping):
1245 # stopping: jump back to idle
1246 m.next = "ISSUE_START"
1247 if flush_needed:
1248 # request the icache to stop asserting "failed"
1249 comb += core.icache.flush_in.eq(1)
1250 # stop instruction fault
1251 sync += pdecode2.instr_fault.eq(0)
1252 with m.Else():
1253 comb += exec_insn_i_valid.eq(1) # trigger execute
1254 with m.If(exec_insn_o_ready): # execute acknowledged us
1255 m.next = "EXECUTE_WAIT"
1256
1257 with m.State("EXECUTE_WAIT"):
1258 # wait on "core stop" release, at instruction end
1259 # need to do this here, in case we are in a VL>1 loop
1260 with m.If(~dbg.core_stop_o & ~core_rst):
1261 comb += exec_pc_i_ready.eq(1)
1262 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
1263 # the exception info needs to be blatted into
1264 # pdecode.ldst_exc, and the instruction "re-run".
1265 # when ldst_exc.happened is set, the PowerDecoder2
1266 # reacts very differently: it re-writes the instruction
1267 # with a "trap" (calls PowerDecoder2.trap()) which
1268 # will *overwrite* whatever was requested and jump the
1269 # PC to the exception address, as well as alter MSR.
1270 # nothing else needs to be done other than to note
1271 # the change of PC and MSR (and, later, SVSTATE)
1272 with m.If(exc_happened):
1273 mmu = core.fus.get_exc("mmu0")
1274 ldst = core.fus.get_exc("ldst0")
1275 if mmu is not None:
1276 with m.If(fetch_failed):
1277 # instruction fetch: exception is from MMU
1278 # reset instr_fault (highest priority)
1279 sync += pdecode2.ldst_exc.eq(mmu)
1280 sync += pdecode2.instr_fault.eq(0)
1281 if flush_needed:
1282 # request icache to stop asserting "failed"
1283 comb += core.icache.flush_in.eq(1)
1284 with m.If(~fetch_failed):
1285 # otherwise assume it was a LDST exception
1286 sync += pdecode2.ldst_exc.eq(ldst)
1287
1288 with m.If(exec_pc_o_valid):
1289
1290 # was this the last loop iteration?
1291 is_last = Signal()
1292 cur_vl = cur_state.svstate.vl
1293 comb += is_last.eq(next_srcstep == cur_vl)
1294
1295 with m.If(pdecode2.instr_fault):
1296 # reset instruction fault, try again
1297 sync += pdecode2.instr_fault.eq(0)
1298 m.next = "ISSUE_START"
1299
1300 # return directly to Decode if Execute generated an
1301 # exception.
1302 with m.Elif(pdecode2.ldst_exc.happened):
1303 m.next = "DECODE_SV"
1304
1305 # if MSR, PC or SVSTATE were changed by the previous
1306 # instruction, go directly back to Fetch, without
1307 # updating either MSR PC or SVSTATE
1308 with m.Elif(self.msr_changed | self.pc_changed |
1309 self.sv_changed):
1310 m.next = "ISSUE_START"
1311
1312 # also return to Fetch, when no output was a vector
1313 # (regardless of SRCSTEP and VL), or when the last
1314 # instruction was really the last one of the VL loop
1315 with m.Elif((~pdecode2.loop_continue) | is_last):
1316 # before going back to fetch, update the PC state
1317 # register with the NIA.
1318 # ok here we are not reading the branch unit.
1319 # TODO: this just blithely overwrites whatever
1320 # pipeline updated the PC
1321 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
1322 comb += self.state_w_pc.i_data.eq(nia)
1323 # reset SRCSTEP before returning to Fetch
1324 if self.svp64_en:
1325 with m.If(pdecode2.loop_continue):
1326 comb += new_svstate.srcstep.eq(0)
1327 comb += new_svstate.dststep.eq(0)
1328 comb += self.update_svstate.eq(1)
1329 else:
1330 comb += new_svstate.srcstep.eq(0)
1331 comb += new_svstate.dststep.eq(0)
1332 comb += self.update_svstate.eq(1)
1333 m.next = "ISSUE_START"
1334
1335 # returning to Execute? then, first update SRCSTEP
1336 with m.Else():
1337 comb += new_svstate.srcstep.eq(next_srcstep)
1338 comb += new_svstate.dststep.eq(next_dststep)
1339 comb += self.update_svstate.eq(1)
1340 # return to mask skip loop
1341 m.next = "PRED_SKIP"
1342
1343 with m.Else():
1344 comb += dbg.core_stopped_i.eq(1)
1345 if flush_needed:
1346 # request the icache to stop asserting "failed"
1347 comb += core.icache.flush_in.eq(1)
1348 # stop instruction fault
1349 sync += pdecode2.instr_fault.eq(0)
1350 # if terminated return to idle
1351 with m.If(dbg.terminate_i):
1352 m.next = "ISSUE_START"
1353
1354 # check if svstate needs updating: if so, write it to State Regfile
1355 with m.If(self.update_svstate):
1356 sync += cur_state.svstate.eq(self.new_svstate) # for next clock
1357
1358 def execute_fsm(self, m, core,
1359 exec_insn_i_valid, exec_insn_o_ready,
1360 exec_pc_o_valid, exec_pc_i_ready):
1361 """execute FSM
1362
1363 execute FSM. this interacts with the "issue" FSM
1364 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
1365 (outgoing). SVP64 RM prefixes have already been set up by the
1366 "issue" phase, so execute is fairly straightforward.
1367 """
1368
1369 comb = m.d.comb
1370 sync = m.d.sync
1371 dbg = self.dbg
1372 pdecode2 = self.pdecode2
1373
1374 # temporaries
1375 core_busy_o = core.n.o_data.busy_o # core is busy
1376 core_ivalid_i = core.p.i_valid # instruction is valid
1377
1378 if hasattr(core, "icache"):
1379 fetch_failed = core.icache.i_out.fetch_failed
1380 else:
1381 fetch_failed = Const(0, 1)
1382
1383 with m.FSM(name="exec_fsm"):
1384
1385 # waiting for instruction bus (stays there until not busy)
1386 with m.State("INSN_START"):
1387 comb += exec_insn_o_ready.eq(1)
1388 with m.If(exec_insn_i_valid):
1389 comb += core_ivalid_i.eq(1) # instruction is valid/issued
1390 sync += self.sv_changed.eq(0)
1391 sync += self.pc_changed.eq(0)
1392 sync += self.msr_changed.eq(0)
1393 with m.If(core.p.o_ready): # only move if accepted
1394 m.next = "INSN_ACTIVE" # move to "wait completion"
1395
1396 # instruction started: must wait till it finishes
1397 with m.State("INSN_ACTIVE"):
1398 # note changes to MSR, PC and SVSTATE
1399 # XXX oops, really must monitor *all* State Regfile write
1400 # ports looking for changes!
1401 with m.If(self.state_nia.wen & (1 << StateRegs.SVSTATE)):
1402 sync += self.sv_changed.eq(1)
1403 with m.If(self.state_nia.wen & (1 << StateRegs.MSR)):
1404 sync += self.msr_changed.eq(1)
1405 with m.If(self.state_nia.wen & (1 << StateRegs.PC)):
1406 sync += self.pc_changed.eq(1)
1407 with m.If(~core_busy_o): # instruction done!
1408 comb += exec_pc_o_valid.eq(1)
1409 with m.If(exec_pc_i_ready):
1410 # when finished, indicate "done".
1411 # however, if there was an exception, the instruction
1412 # is *not* yet done. this is an implementation
1413 # detail: we choose to implement exceptions by
1414 # taking the exception information from the LDST
1415 # unit, putting that *back* into the PowerDecoder2,
1416 # and *re-running the entire instruction*.
1417 # if we erroneously indicate "done" here, it is as if
1418 # there were *TWO* instructions:
1419 # 1) the failed LDST 2) a TRAP.
1420 with m.If(~pdecode2.ldst_exc.happened &
1421 ~pdecode2.instr_fault):
1422 comb += self.insn_done.eq(1)
1423 m.next = "INSN_START" # back to fetch
1424 # terminate returns directly to INSN_START
1425 with m.If(dbg.terminate_i):
1426 # comb += self.insn_done.eq(1) - no because it's not
1427 m.next = "INSN_START" # back to fetch
1428
1429 def elaborate(self, platform):
1430 m = super().elaborate(platform)
1431 # convenience
1432 comb, sync = m.d.comb, m.d.sync
1433 cur_state = self.cur_state
1434 pdecode2 = self.pdecode2
1435 dbg = self.dbg
1436 core = self.core
1437
1438 # set up peripherals and core
1439 core_rst = self.core_rst
1440
1441 # indicate to outside world if any FU is still executing
1442 comb += self.any_busy.eq(core.n.o_data.any_busy_o) # any FU executing
1443
1444 # address of the next instruction, in the absence of a branch
1445 # depends on the instruction size
1446 nia = Signal(64)
1447
1448 # connect up debug signals
1449 with m.If(core.o.core_terminate_o):
1450 comb += dbg.terminate_i.eq(1)
1451
1452 # pass the prefix mode from Fetch to Issue, so the latter can loop
1453 # on VL==0
1454 is_svp64_mode = Signal()
1455
1456 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1457 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1458 # these are the handshake signals between each
1459
1460 # fetch FSM can run as soon as the PC is valid
1461 fetch_pc_i_valid = Signal() # Execute tells Fetch "start next read"
1462 fetch_pc_o_ready = Signal() # Fetch Tells SVSTATE "proceed"
1463
1464 # fetch FSM hands over the instruction to be decoded / issued
1465 fetch_insn_o_valid = Signal()
1466 fetch_insn_i_ready = Signal()
1467
1468 # predicate fetch FSM decodes and fetches the predicate
1469 pred_insn_i_valid = Signal()
1470 pred_insn_o_ready = Signal()
1471
1472 # predicate fetch FSM delivers the masks
1473 pred_mask_o_valid = Signal()
1474 pred_mask_i_ready = Signal()
1475
1476 # issue FSM delivers the instruction to the be executed
1477 exec_insn_i_valid = Signal()
1478 exec_insn_o_ready = Signal()
1479
1480 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1481 exec_pc_o_valid = Signal()
1482 exec_pc_i_ready = Signal()
1483
1484 # the FSMs here are perhaps unusual in that they detect conditions
1485 # then "hold" information, combinatorially, for the core
1486 # (as opposed to using sync - which would be on a clock's delay)
1487 # this includes the actual opcode, valid flags and so on.
1488
1489 # Fetch, then predicate fetch, then Issue, then Execute.
1490 # Issue is where the VL for-loop # lives. the ready/valid
1491 # signalling is used to communicate between the four.
1492
1493 # set up Fetch FSM
1494 fetch = FetchFSM(self.allow_overlap, self.svp64_en,
1495 self.imem, core_rst, pdecode2, cur_state,
1496 dbg, core,
1497 dbg.state.svstate, # combinatorially same
1498 nia, is_svp64_mode)
1499 m.submodules.fetch = fetch
1500 # connect up in/out data to existing Signals
1501 comb += fetch.p.i_data.pc.eq(dbg.state.pc) # combinatorially same
1502 comb += fetch.p.i_data.msr.eq(dbg.state.msr) # combinatorially same
1503 # and the ready/valid signalling
1504 comb += fetch_pc_o_ready.eq(fetch.p.o_ready)
1505 comb += fetch.p.i_valid.eq(fetch_pc_i_valid)
1506 comb += fetch_insn_o_valid.eq(fetch.n.o_valid)
1507 comb += fetch.n.i_ready.eq(fetch_insn_i_ready)
1508
1509 self.issue_fsm(m, core, nia,
1510 dbg, core_rst, is_svp64_mode,
1511 fetch_pc_o_ready, fetch_pc_i_valid,
1512 fetch_insn_o_valid, fetch_insn_i_ready,
1513 pred_insn_i_valid, pred_insn_o_ready,
1514 pred_mask_o_valid, pred_mask_i_ready,
1515 exec_insn_i_valid, exec_insn_o_ready,
1516 exec_pc_o_valid, exec_pc_i_ready)
1517
1518 if self.svp64_en:
1519 self.fetch_predicate_fsm(m,
1520 pred_insn_i_valid, pred_insn_o_ready,
1521 pred_mask_o_valid, pred_mask_i_ready)
1522
1523 self.execute_fsm(m, core,
1524 exec_insn_i_valid, exec_insn_o_ready,
1525 exec_pc_o_valid, exec_pc_i_ready)
1526
1527 return m
1528
1529
1530 class TestIssuer(Elaboratable):
1531 def __init__(self, pspec):
1532 self.ti = TestIssuerInternal(pspec)
1533 # XXX TODO: make this a command-line selectable option from pspec
1534 #from soc.simple.inorder import TestIssuerInternalInOrder
1535 #self.ti = TestIssuerInternalInOrder(pspec)
1536 self.pll = DummyPLL(instance=True)
1537
1538 self.dbg_rst_i = Signal(reset_less=True)
1539
1540 # PLL direct clock or not
1541 self.pll_en = hasattr(pspec, "use_pll") and pspec.use_pll
1542 if self.pll_en:
1543 self.pll_test_o = Signal(reset_less=True)
1544 self.pll_vco_o = Signal(reset_less=True)
1545 self.clk_sel_i = Signal(2, reset_less=True)
1546 self.ref_clk = ClockSignal() # can't rename it but that's ok
1547 self.pllclk_clk = ClockSignal("pllclk")
1548
1549 def elaborate(self, platform):
1550 m = Module()
1551 comb = m.d.comb
1552
1553 # TestIssuer nominally runs at main clock, actually it is
1554 # all combinatorial internally except for coresync'd components
1555 m.submodules.ti = ti = self.ti
1556
1557 if self.pll_en:
1558 # ClockSelect runs at PLL output internal clock rate
1559 m.submodules.wrappll = pll = self.pll
1560
1561 # add clock domains from PLL
1562 cd_pll = ClockDomain("pllclk")
1563 m.domains += cd_pll
1564
1565 # PLL clock established. has the side-effect of running clklsel
1566 # at the PLL's speed (see DomainRenamer("pllclk") above)
1567 pllclk = self.pllclk_clk
1568 comb += pllclk.eq(pll.clk_pll_o)
1569
1570 # wire up external 24mhz to PLL
1571 #comb += pll.clk_24_i.eq(self.ref_clk)
1572 # output 18 mhz PLL test signal, and analog oscillator out
1573 comb += self.pll_test_o.eq(pll.pll_test_o)
1574 comb += self.pll_vco_o.eq(pll.pll_vco_o)
1575
1576 # input to pll clock selection
1577 comb += pll.clk_sel_i.eq(self.clk_sel_i)
1578
1579 # now wire up ResetSignals. don't mind them being in this domain
1580 pll_rst = ResetSignal("pllclk")
1581 comb += pll_rst.eq(ResetSignal())
1582
1583 # internal clock is set to selector clock-out. has the side-effect of
1584 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1585 # debug clock runs at coresync internal clock
1586 if self.ti.dbg_domain != 'sync':
1587 cd_dbgsync = ClockDomain("dbgsync")
1588 intclk = ClockSignal(self.ti.core_domain)
1589 dbgclk = ClockSignal(self.ti.dbg_domain)
1590 # XXX BYPASS PLL XXX
1591 # XXX BYPASS PLL XXX
1592 # XXX BYPASS PLL XXX
1593 if self.pll_en:
1594 comb += intclk.eq(self.ref_clk)
1595 assert self.ti.core_domain != 'sync', \
1596 "cannot set core_domain to sync and use pll at the same time"
1597 else:
1598 if self.ti.core_domain != 'sync':
1599 comb += intclk.eq(ClockSignal())
1600 if self.ti.dbg_domain != 'sync':
1601 dbgclk = ClockSignal(self.ti.dbg_domain)
1602 comb += dbgclk.eq(intclk)
1603 comb += self.ti.dbg_rst_i.eq(self.dbg_rst_i)
1604
1605 return m
1606
1607 def ports(self):
1608 return list(self.ti.ports()) + list(self.pll.ports()) + \
1609 [ClockSignal(), ResetSignal()]
1610
1611 def external_ports(self):
1612 ports = self.ti.external_ports()
1613 ports.append(ClockSignal())
1614 ports.append(ResetSignal())
1615 if self.pll_en:
1616 ports.append(self.clk_sel_i)
1617 ports.append(self.pll.clk_24_i)
1618 ports.append(self.pll_test_o)
1619 ports.append(self.pll_vco_o)
1620 ports.append(self.pllclk_clk)
1621 ports.append(self.ref_clk)
1622 return ports
1623
1624
1625 if __name__ == '__main__':
1626 units = {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1627 'spr': 1,
1628 'div': 1,
1629 'mul': 1,
1630 'shiftrot': 1
1631 }
1632 pspec = TestMemPspec(ldst_ifacetype='bare_wb',
1633 imem_ifacetype='bare_wb',
1634 addr_wid=48,
1635 mask_wid=8,
1636 reg_wid=64,
1637 units=units)
1638 dut = TestIssuer(pspec)
1639 vl = main(dut, ports=dut.ports(), name="test_issuer")
1640
1641 if len(sys.argv) == 1:
1642 vl = rtlil.convert(dut, ports=dut.external_ports(), name="test_issuer")
1643 with open("test_issuer.il", "w") as f:
1644 f.write(vl)