3 not in any way intended for production use. this runs a FSM that:
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
10 * does it all over again
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
18 from nmigen
import (Elaboratable
, Module
, Signal
, ClockSignal
, ResetSignal
,
19 ClockDomain
, DomainRenamer
, Mux
, Const
, Repl
, Cat
)
20 from nmigen
.cli
import rtlil
21 from nmigen
.cli
import main
24 from nmigen
.lib
.coding
import PriorityEncoder
26 from openpower
.decoder
.power_decoder
import create_pdecode
27 from openpower
.decoder
.power_decoder2
import PowerDecode2
, SVP64PrefixDecoder
28 from openpower
.decoder
.decode2execute1
import IssuerDecode2ToOperand
29 from openpower
.decoder
.decode2execute1
import Data
30 from openpower
.decoder
.power_enums
import (MicrOp
, SVP64PredInt
, SVP64PredCR
,
32 from openpower
.state
import CoreState
33 from openpower
.consts
import (CR
, SVP64CROffs
)
34 from soc
.experiment
.testmem
import TestMemory
# test only for instructions
35 from soc
.regfile
.regfiles
import StateRegs
, FastRegs
36 from soc
.simple
.core
import NonProductionCore
37 from soc
.config
.test
.test_loadstore
import TestMemPspec
38 from soc
.config
.ifetch
import ConfigFetchUnit
39 from soc
.debug
.dmi
import CoreDebug
, DMIInterface
40 from soc
.debug
.jtag
import JTAG
41 from soc
.config
.pinouts
import get_pinspecs
42 from soc
.interrupts
.xics
import XICS_ICP
, XICS_ICS
43 from soc
.bus
.simple_gpio
import SimpleGPIO
44 from soc
.bus
.SPBlock512W64B8W
import SPBlock512W64B8W
45 from soc
.clock
.select
import ClockSelect
46 from soc
.clock
.dummypll
import DummyPLL
47 from openpower
.sv
.svstate
import SVSTATERec
50 from nmutil
.util
import rising_edge
52 def get_insn(f_instr_o
, pc
):
53 if f_instr_o
.width
== 32:
56 # 64-bit: bit 2 of pc decides which word to select
57 return f_instr_o
.word_select(pc
[2], 32)
59 # gets state input or reads from state regfile
60 def state_get(m
, core_rst
, state_i
, name
, regfile
, regnum
):
64 res
= Signal(64, reset_less
=True, name
=name
)
65 res_ok_delay
= Signal(name
="%s_ok_delay" % name
)
67 sync
+= res_ok_delay
.eq(~state_i
.ok
)
68 with m
.If(state_i
.ok
):
69 # incoming override (start from pc_i)
70 comb
+= res
.eq(state_i
.data
)
72 # otherwise read StateRegs regfile for PC...
73 comb
+= regfile
.ren
.eq(1<<regnum
)
74 # ... but on a 1-clock delay
75 with m
.If(res_ok_delay
):
76 comb
+= res
.eq(regfile
.data_o
)
79 def get_predint(m
, mask
, name
):
80 """decode SVP64 predicate integer mask field to reg number and invert
81 this is identical to the equivalent function in ISACaller except that
82 it doesn't read the INT directly, it just decodes "what needs to be done"
83 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
85 * all1s is set to indicate that no mask is to be applied.
86 * regread indicates the GPR register number to be read
87 * invert is set to indicate that the register value is to be inverted
88 * unary indicates that the contents of the register is to be shifted 1<<r3
91 regread
= Signal(5, name
=name
+"regread")
92 invert
= Signal(name
=name
+"invert")
93 unary
= Signal(name
=name
+"unary")
94 all1s
= Signal(name
=name
+"all1s")
96 with m
.Case(SVP64PredInt
.ALWAYS
.value
):
97 comb
+= all1s
.eq(1) # use 0b1111 (all ones)
98 with m
.Case(SVP64PredInt
.R3_UNARY
.value
):
100 comb
+= unary
.eq(1) # 1<<r3 - shift r3 (single bit)
101 with m
.Case(SVP64PredInt
.R3
.value
):
102 comb
+= regread
.eq(3)
103 with m
.Case(SVP64PredInt
.R3_N
.value
):
104 comb
+= regread
.eq(3)
106 with m
.Case(SVP64PredInt
.R10
.value
):
107 comb
+= regread
.eq(10)
108 with m
.Case(SVP64PredInt
.R10_N
.value
):
109 comb
+= regread
.eq(10)
111 with m
.Case(SVP64PredInt
.R30
.value
):
112 comb
+= regread
.eq(30)
113 with m
.Case(SVP64PredInt
.R30_N
.value
):
114 comb
+= regread
.eq(30)
116 return regread
, invert
, unary
, all1s
118 def get_predcr(m
, mask
, name
):
119 """decode SVP64 predicate CR to reg number field and invert status
120 this is identical to _get_predcr in ISACaller
123 idx
= Signal(2, name
=name
+"idx")
124 invert
= Signal(name
=name
+"crinvert")
126 with m
.Case(SVP64PredCR
.LT
.value
):
127 comb
+= idx
.eq(CR
.LT
)
129 with m
.Case(SVP64PredCR
.GE
.value
):
130 comb
+= idx
.eq(CR
.LT
)
132 with m
.Case(SVP64PredCR
.GT
.value
):
133 comb
+= idx
.eq(CR
.GT
)
135 with m
.Case(SVP64PredCR
.LE
.value
):
136 comb
+= idx
.eq(CR
.GT
)
138 with m
.Case(SVP64PredCR
.EQ
.value
):
139 comb
+= idx
.eq(CR
.EQ
)
141 with m
.Case(SVP64PredCR
.NE
.value
):
142 comb
+= idx
.eq(CR
.EQ
)
144 with m
.Case(SVP64PredCR
.SO
.value
):
145 comb
+= idx
.eq(CR
.SO
)
147 with m
.Case(SVP64PredCR
.NS
.value
):
148 comb
+= idx
.eq(CR
.SO
)
153 class TestIssuerInternal(Elaboratable
):
154 """TestIssuer - reads instructions from TestMemory and issues them
156 efficiency and speed is not the main goal here: functional correctness
157 and code clarity is. optimisations (which almost 100% interfere with
158 easy understanding) come later.
160 def __init__(self
, pspec
):
162 # test is SVP64 is to be enabled
163 self
.svp64_en
= hasattr(pspec
, "svp64") and (pspec
.svp64
== True)
165 # and if regfiles are reduced
166 self
.regreduce_en
= (hasattr(pspec
, "regreduce") and
167 (pspec
.regreduce
== True))
169 # JTAG interface. add this right at the start because if it's
170 # added it *modifies* the pspec, by adding enable/disable signals
171 # for parts of the rest of the core
172 self
.jtag_en
= hasattr(pspec
, "debug") and pspec
.debug
== 'jtag'
174 # XXX MUST keep this up-to-date with litex, and
175 # soc-cocotb-sim, and err.. all needs sorting out, argh
178 'eint', 'gpio', 'mspi0',
179 # 'mspi1', - disabled for now
180 # 'pwm', 'sd0', - disabled for now
182 self
.jtag
= JTAG(get_pinspecs(subset
=subset
))
183 # add signals to pspec to enable/disable icache and dcache
184 # (or data and intstruction wishbone if icache/dcache not included)
185 # https://bugs.libre-soc.org/show_bug.cgi?id=520
186 # TODO: do we actually care if these are not domain-synchronised?
187 # honestly probably not.
188 pspec
.wb_icache_en
= self
.jtag
.wb_icache_en
189 pspec
.wb_dcache_en
= self
.jtag
.wb_dcache_en
190 self
.wb_sram_en
= self
.jtag
.wb_sram_en
192 self
.wb_sram_en
= Const(1)
194 # add 4k sram blocks?
195 self
.sram4x4k
= (hasattr(pspec
, "sram4x4kblock") and
196 pspec
.sram4x4kblock
== True)
200 self
.sram4k
.append(SPBlock512W64B8W(name
="sram4k_%d" % i
,
204 # add interrupt controller?
205 self
.xics
= hasattr(pspec
, "xics") and pspec
.xics
== True
207 self
.xics_icp
= XICS_ICP()
208 self
.xics_ics
= XICS_ICS()
209 self
.int_level_i
= self
.xics_ics
.int_level_i
211 # add GPIO peripheral?
212 self
.gpio
= hasattr(pspec
, "gpio") and pspec
.gpio
== True
214 self
.simple_gpio
= SimpleGPIO()
215 self
.gpio_o
= self
.simple_gpio
.gpio_o
217 # main instruction core. suitable for prototyping / demo only
218 self
.core
= core
= NonProductionCore(pspec
)
219 self
.core_rst
= ResetSignal("coresync")
221 # instruction decoder. goes into Trap Record
222 pdecode
= create_pdecode()
223 self
.cur_state
= CoreState("cur") # current state (MSR/PC/SVSTATE)
224 self
.pdecode2
= PowerDecode2(pdecode
, state
=self
.cur_state
,
225 opkls
=IssuerDecode2ToOperand
,
226 svp64_en
=self
.svp64_en
,
227 regreduce_en
=self
.regreduce_en
)
229 self
.svp64
= SVP64PrefixDecoder() # for decoding SVP64 prefix
231 # Test Instruction memory
232 self
.imem
= ConfigFetchUnit(pspec
).fu
235 self
.dbg
= CoreDebug()
237 # instruction go/monitor
238 self
.pc_o
= Signal(64, reset_less
=True)
239 self
.pc_i
= Data(64, "pc_i") # set "ok" to indicate "please change me"
240 self
.svstate_i
= Data(32, "svstate_i") # ditto
241 self
.core_bigendian_i
= Signal() # TODO: set based on MSR.LE
242 self
.busy_o
= Signal(reset_less
=True)
243 self
.memerr_o
= Signal(reset_less
=True)
245 # STATE regfile read /write ports for PC, MSR, SVSTATE
246 staterf
= self
.core
.regs
.rf
['state']
247 self
.state_r_pc
= staterf
.r_ports
['cia'] # PC rd
248 self
.state_w_pc
= staterf
.w_ports
['d_wr1'] # PC wr
249 self
.state_r_msr
= staterf
.r_ports
['msr'] # MSR rd
250 self
.state_r_sv
= staterf
.r_ports
['sv'] # SVSTATE rd
251 self
.state_w_sv
= staterf
.w_ports
['sv'] # SVSTATE wr
253 # DMI interface access
254 intrf
= self
.core
.regs
.rf
['int']
255 crrf
= self
.core
.regs
.rf
['cr']
256 xerrf
= self
.core
.regs
.rf
['xer']
257 self
.int_r
= intrf
.r_ports
['dmi'] # INT read
258 self
.cr_r
= crrf
.r_ports
['full_cr_dbg'] # CR read
259 self
.xer_r
= xerrf
.r_ports
['full_xer'] # XER read
263 self
.int_pred
= intrf
.r_ports
['pred'] # INT predicate read
264 self
.cr_pred
= crrf
.r_ports
['cr_pred'] # CR predicate read
266 # hack method of keeping an eye on whether branch/trap set the PC
267 self
.state_nia
= self
.core
.regs
.rf
['state'].w_ports
['nia']
268 self
.state_nia
.wen
.name
= 'state_nia_wen'
270 # pulse to synchronize the simulator at instruction end
271 self
.insn_done
= Signal()
274 # store copies of predicate masks
275 self
.srcmask
= Signal(64)
276 self
.dstmask
= Signal(64)
278 def fetch_fsm(self
, m
, core
, pc
, svstate
, nia
, is_svp64_mode
,
279 fetch_pc_ready_o
, fetch_pc_valid_i
,
280 fetch_insn_valid_o
, fetch_insn_ready_i
):
283 this FSM performs fetch of raw instruction data, partial-decodes
284 it 32-bit at a time to detect SVP64 prefixes, and will optionally
285 read a 2nd 32-bit quantity if that occurs.
289 pdecode2
= self
.pdecode2
290 cur_state
= self
.cur_state
291 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
293 msr_read
= Signal(reset
=1)
295 with m
.FSM(name
='fetch_fsm'):
298 with m
.State("IDLE"):
299 comb
+= fetch_pc_ready_o
.eq(1)
300 with m
.If(fetch_pc_valid_i
):
301 # instruction allowed to go: start by reading the PC
302 # capture the PC and also drop it into Insn Memory
303 # we have joined a pair of combinatorial memory
304 # lookups together. this is Generally Bad.
305 comb
+= self
.imem
.a_pc_i
.eq(pc
)
306 comb
+= self
.imem
.a_valid_i
.eq(1)
307 comb
+= self
.imem
.f_valid_i
.eq(1)
308 sync
+= cur_state
.pc
.eq(pc
)
309 sync
+= cur_state
.svstate
.eq(svstate
) # and svstate
311 # initiate read of MSR. arrives one clock later
312 comb
+= self
.state_r_msr
.ren
.eq(1 << StateRegs
.MSR
)
313 sync
+= msr_read
.eq(0)
315 m
.next
= "INSN_READ" # move to "wait for bus" phase
317 # dummy pause to find out why simulation is not keeping up
318 with m
.State("INSN_READ"):
319 # one cycle later, msr/sv read arrives. valid only once.
320 with m
.If(~msr_read
):
321 sync
+= msr_read
.eq(1) # yeah don't read it again
322 sync
+= cur_state
.msr
.eq(self
.state_r_msr
.data_o
)
323 with m
.If(self
.imem
.f_busy_o
): # zzz...
324 # busy: stay in wait-read
325 comb
+= self
.imem
.a_valid_i
.eq(1)
326 comb
+= self
.imem
.f_valid_i
.eq(1)
328 # not busy: instruction fetched
329 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
)
332 # decode the SVP64 prefix, if any
333 comb
+= svp64
.raw_opcode_in
.eq(insn
)
334 comb
+= svp64
.bigendian
.eq(self
.core_bigendian_i
)
335 # pass the decoded prefix (if any) to PowerDecoder2
336 sync
+= pdecode2
.sv_rm
.eq(svp64
.svp64_rm
)
337 # remember whether this is a prefixed instruction, so
338 # the FSM can readily loop when VL==0
339 sync
+= is_svp64_mode
.eq(svp64
.is_svp64_mode
)
340 # calculate the address of the following instruction
341 insn_size
= Mux(svp64
.is_svp64_mode
, 8, 4)
342 sync
+= nia
.eq(cur_state
.pc
+ insn_size
)
343 with m
.If(~svp64
.is_svp64_mode
):
344 # with no prefix, store the instruction
345 # and hand it directly to the next FSM
346 sync
+= dec_opcode_i
.eq(insn
)
347 m
.next
= "INSN_READY"
349 # fetch the rest of the instruction from memory
350 comb
+= self
.imem
.a_pc_i
.eq(cur_state
.pc
+ 4)
351 comb
+= self
.imem
.a_valid_i
.eq(1)
352 comb
+= self
.imem
.f_valid_i
.eq(1)
353 m
.next
= "INSN_READ2"
355 # not SVP64 - 32-bit only
356 sync
+= nia
.eq(cur_state
.pc
+ 4)
357 sync
+= dec_opcode_i
.eq(insn
)
358 m
.next
= "INSN_READY"
360 with m
.State("INSN_READ2"):
361 with m
.If(self
.imem
.f_busy_o
): # zzz...
362 # busy: stay in wait-read
363 comb
+= self
.imem
.a_valid_i
.eq(1)
364 comb
+= self
.imem
.f_valid_i
.eq(1)
366 # not busy: instruction fetched
367 insn
= get_insn(self
.imem
.f_instr_o
, cur_state
.pc
+4)
368 sync
+= dec_opcode_i
.eq(insn
)
369 m
.next
= "INSN_READY"
370 # TODO: probably can start looking at pdecode2.rm_dec
371 # here or maybe even in INSN_READ state, if svp64_mode
372 # detected, in order to trigger - and wait for - the
375 pmode
= pdecode2
.rm_dec
.predmode
377 if pmode != SVP64PredMode.ALWAYS.value:
378 fire predicate loading FSM and wait before
381 sync += self.srcmask.eq(-1) # set to all 1s
382 sync += self.dstmask.eq(-1) # set to all 1s
383 m.next = "INSN_READY"
386 with m
.State("INSN_READY"):
387 # hand over the instruction, to be decoded
388 comb
+= fetch_insn_valid_o
.eq(1)
389 with m
.If(fetch_insn_ready_i
):
392 def fetch_predicate_fsm(self
, m
,
393 pred_insn_valid_i
, pred_insn_ready_o
,
394 pred_mask_valid_o
, pred_mask_ready_i
):
395 """fetch_predicate_fsm - obtains (constructs in the case of CR)
396 src/dest predicate masks
398 https://bugs.libre-soc.org/show_bug.cgi?id=617
399 the predicates can be read here, by using IntRegs r_ports['pred']
400 or CRRegs r_ports['pred']. in the case of CRs it will have to
401 be done through multiple reads, extracting one relevant at a time.
402 later, a faster way would be to use the 32-bit-wide CR port but
403 this is more complex decoding, here. equivalent code used in
404 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
406 note: this ENTIRE FSM is not to be called when svp64 is disabled
410 pdecode2
= self
.pdecode2
411 rm_dec
= pdecode2
.rm_dec
# SVP64RMModeDecode
412 predmode
= rm_dec
.predmode
413 srcpred
, dstpred
= rm_dec
.srcpred
, rm_dec
.dstpred
414 cr_pred
, int_pred
= self
.cr_pred
, self
.int_pred
# read regfiles
415 # get src/dst step, so we can skip already used mask bits
416 cur_state
= self
.cur_state
417 srcstep
= cur_state
.svstate
.srcstep
418 dststep
= cur_state
.svstate
.dststep
419 cur_vl
= cur_state
.svstate
.vl
422 sregread
, sinvert
, sunary
, sall1s
= get_predint(m
, srcpred
, 's')
423 dregread
, dinvert
, dunary
, dall1s
= get_predint(m
, dstpred
, 'd')
424 sidx
, scrinvert
= get_predcr(m
, srcpred
, 's')
425 didx
, dcrinvert
= get_predcr(m
, dstpred
, 'd')
427 # store fetched masks, for either intpred or crpred
428 # when src/dst step is not zero, the skipped mask bits need to be
429 # shifted-out, before actually storing them in src/dest mask
430 new_srcmask
= Signal(64, reset_less
=True)
431 new_dstmask
= Signal(64, reset_less
=True)
433 with m
.FSM(name
="fetch_predicate"):
435 with m
.State("FETCH_PRED_IDLE"):
436 comb
+= pred_insn_ready_o
.eq(1)
437 with m
.If(pred_insn_valid_i
):
438 with m
.If(predmode
== SVP64PredMode
.INT
):
439 # skip fetching destination mask register, when zero
441 sync
+= new_dstmask
.eq(-1)
442 # directly go to fetch source mask register
443 # guaranteed not to be zero (otherwise predmode
444 # would be SVP64PredMode.ALWAYS, not INT)
445 comb
+= int_pred
.addr
.eq(sregread
)
446 comb
+= int_pred
.ren
.eq(1)
447 m
.next
= "INT_SRC_READ"
448 # fetch destination predicate register
450 comb
+= int_pred
.addr
.eq(dregread
)
451 comb
+= int_pred
.ren
.eq(1)
452 m
.next
= "INT_DST_READ"
453 with m
.Elif(predmode
== SVP64PredMode
.CR
):
454 # go fetch masks from the CR register file
455 sync
+= new_srcmask
.eq(0)
456 sync
+= new_dstmask
.eq(0)
459 sync
+= self
.srcmask
.eq(-1)
460 sync
+= self
.dstmask
.eq(-1)
461 m
.next
= "FETCH_PRED_DONE"
463 with m
.State("INT_DST_READ"):
464 # store destination mask
465 inv
= Repl(dinvert
, 64)
467 # set selected mask bit for 1<<r3 mode
468 dst_shift
= Signal(range(64))
469 comb
+= dst_shift
.eq(self
.int_pred
.data_o
& 0b111111)
470 sync
+= new_dstmask
.eq(1 << dst_shift
)
472 # invert mask if requested
473 sync
+= new_dstmask
.eq(self
.int_pred
.data_o ^ inv
)
474 # skip fetching source mask register, when zero
476 sync
+= new_srcmask
.eq(-1)
477 m
.next
= "FETCH_PRED_SHIFT_MASK"
478 # fetch source predicate register
480 comb
+= int_pred
.addr
.eq(sregread
)
481 comb
+= int_pred
.ren
.eq(1)
482 m
.next
= "INT_SRC_READ"
484 with m
.State("INT_SRC_READ"):
486 inv
= Repl(sinvert
, 64)
488 # set selected mask bit for 1<<r3 mode
489 src_shift
= Signal(range(64))
490 comb
+= src_shift
.eq(self
.int_pred
.data_o
& 0b111111)
491 sync
+= new_srcmask
.eq(1 << src_shift
)
493 # invert mask if requested
494 sync
+= new_srcmask
.eq(self
.int_pred
.data_o ^ inv
)
495 m
.next
= "FETCH_PRED_SHIFT_MASK"
497 # fetch masks from the CR register file
498 # implements the following loop:
499 # idx, inv = get_predcr(mask)
501 # for cr_idx in range(vl):
502 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
504 # mask |= 1 << cr_idx
506 with m
.State("CR_READ"):
507 # CR index to be read, which will be ready by the next cycle
508 cr_idx
= Signal
.like(cur_vl
, reset_less
=True)
509 # submit the read operation to the regfile
510 with m
.If(cr_idx
!= cur_vl
):
511 # the CR read port is unary ...
513 # ... in MSB0 convention ...
514 # ren = 1 << (7 - cr_idx)
515 # ... and with an offset:
516 # ren = 1 << (7 - off - cr_idx)
517 idx
= SVP64CROffs
.CRPred
+ cr_idx
518 comb
+= cr_pred
.ren
.eq(1 << (7 - idx
))
519 # signal data valid in the next cycle
520 cr_read
= Signal(reset_less
=True)
521 sync
+= cr_read
.eq(1)
522 # load the next index
523 sync
+= cr_idx
.eq(cr_idx
+ 1)
526 sync
+= cr_read
.eq(0)
528 m
.next
= "FETCH_PRED_SHIFT_MASK"
530 # compensate for the one cycle delay on the regfile
531 cur_cr_idx
= Signal
.like(cur_vl
)
532 comb
+= cur_cr_idx
.eq(cr_idx
- 1)
533 # read the CR field, select the appropriate bit
537 comb
+= cr_field
.eq(cr_pred
.data_o
)
538 comb
+= scr_bit
.eq(cr_field
.bit_select(sidx
, 1) ^ scrinvert
)
539 comb
+= dcr_bit
.eq(cr_field
.bit_select(didx
, 1) ^ dcrinvert
)
540 # set the corresponding mask bit
541 bit_to_set
= Signal
.like(self
.srcmask
)
542 comb
+= bit_to_set
.eq(1 << cur_cr_idx
)
544 sync
+= new_srcmask
.eq(new_srcmask | bit_to_set
)
546 sync
+= new_dstmask
.eq(new_dstmask | bit_to_set
)
548 with m
.State("FETCH_PRED_SHIFT_MASK"):
549 # shift-out skipped mask bits
550 sync
+= self
.srcmask
.eq(new_srcmask
>> srcstep
)
551 sync
+= self
.dstmask
.eq(new_dstmask
>> dststep
)
552 m
.next
= "FETCH_PRED_DONE"
554 with m
.State("FETCH_PRED_DONE"):
555 comb
+= pred_mask_valid_o
.eq(1)
556 with m
.If(pred_mask_ready_i
):
557 m
.next
= "FETCH_PRED_IDLE"
559 def issue_fsm(self
, m
, core
, pc_changed
, sv_changed
, nia
,
560 dbg
, core_rst
, is_svp64_mode
,
561 fetch_pc_ready_o
, fetch_pc_valid_i
,
562 fetch_insn_valid_o
, fetch_insn_ready_i
,
563 pred_insn_valid_i
, pred_insn_ready_o
,
564 pred_mask_valid_o
, pred_mask_ready_i
,
565 exec_insn_valid_i
, exec_insn_ready_o
,
566 exec_pc_valid_o
, exec_pc_ready_i
):
569 decode / issue FSM. this interacts with the "fetch" FSM
570 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
571 (outgoing). also interacts with the "execute" FSM
572 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
574 SVP64 RM prefixes have already been set up by the
575 "fetch" phase, so execute is fairly straightforward.
580 pdecode2
= self
.pdecode2
581 cur_state
= self
.cur_state
584 dec_opcode_i
= pdecode2
.dec
.raw_opcode_in
# raw opcode
586 # for updating svstate (things like srcstep etc.)
587 update_svstate
= Signal() # set this (below) if updating
588 new_svstate
= SVSTATERec("new_svstate")
589 comb
+= new_svstate
.eq(cur_state
.svstate
)
591 # precalculate srcstep+1 and dststep+1
592 cur_srcstep
= cur_state
.svstate
.srcstep
593 cur_dststep
= cur_state
.svstate
.dststep
594 next_srcstep
= Signal
.like(cur_srcstep
)
595 next_dststep
= Signal
.like(cur_dststep
)
596 comb
+= next_srcstep
.eq(cur_state
.svstate
.srcstep
+1)
597 comb
+= next_dststep
.eq(cur_state
.svstate
.dststep
+1)
599 # note if an exception happened. in a pipelined or OoO design
600 # this needs to be accompanied by "shadowing" (or stalling)
602 for exc
in core
.fus
.excs
.values():
603 el
.append(exc
.happened
)
604 exc_happened
= Signal()
605 if len(el
) > 0: # at least one exception
606 comb
+= exc_happened
.eq(Cat(*el
).bool())
608 with m
.FSM(name
="issue_fsm"):
610 # sync with the "fetch" phase which is reading the instruction
611 # at this point, there is no instruction running, that
612 # could inadvertently update the PC.
613 with m
.State("ISSUE_START"):
614 # wait on "core stop" release, before next fetch
615 # need to do this here, in case we are in a VL==0 loop
616 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
617 comb
+= fetch_pc_valid_i
.eq(1) # tell fetch to start
618 with m
.If(fetch_pc_ready_o
): # fetch acknowledged us
621 # tell core it's stopped, and acknowledge debug handshake
622 comb
+= dbg
.core_stopped_i
.eq(1)
623 # while stopped, allow updating the PC and SVSTATE
624 with m
.If(self
.pc_i
.ok
):
625 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
626 comb
+= self
.state_w_pc
.data_i
.eq(self
.pc_i
.data
)
627 sync
+= pc_changed
.eq(1)
628 with m
.If(self
.svstate_i
.ok
):
629 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
630 comb
+= update_svstate
.eq(1)
631 sync
+= sv_changed
.eq(1)
633 # wait for an instruction to arrive from Fetch
634 with m
.State("INSN_WAIT"):
635 comb
+= fetch_insn_ready_i
.eq(1)
636 with m
.If(fetch_insn_valid_o
):
637 # loop into ISSUE_START if it's a SVP64 instruction
638 # and VL == 0. this because VL==0 is a for-loop
639 # from 0 to 0 i.e. always, always a NOP.
640 cur_vl
= cur_state
.svstate
.vl
641 with m
.If(is_svp64_mode
& (cur_vl
== 0)):
642 # update the PC before fetching the next instruction
643 # since we are in a VL==0 loop, no instruction was
644 # executed that we could be overwriting
645 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
646 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
647 comb
+= self
.insn_done
.eq(1)
648 m
.next
= "ISSUE_START"
651 m
.next
= "PRED_START" # start fetching predicate
653 m
.next
= "DECODE_SV" # skip predication
655 with m
.State("PRED_START"):
656 comb
+= pred_insn_valid_i
.eq(1) # tell fetch_pred to start
657 with m
.If(pred_insn_ready_o
): # fetch_pred acknowledged us
660 with m
.State("MASK_WAIT"):
661 comb
+= pred_mask_ready_i
.eq(1) # ready to receive the masks
662 with m
.If(pred_mask_valid_o
): # predication masks are ready
665 # skip zeros in predicate
666 with m
.State("PRED_SKIP"):
667 with m
.If(~is_svp64_mode
):
668 m
.next
= "DECODE_SV" # nothing to do
671 pred_src_zero
= pdecode2
.rm_dec
.pred_sz
672 pred_dst_zero
= pdecode2
.rm_dec
.pred_dz
674 # new srcstep, after skipping zeros
675 skip_srcstep
= Signal
.like(cur_srcstep
)
676 # value to be added to the current srcstep
677 src_delta
= Signal
.like(cur_srcstep
)
678 # add leading zeros to srcstep, if not in zero mode
679 with m
.If(~pred_src_zero
):
680 # priority encoder (count leading zeros)
681 # append guard bit, in case the mask is all zeros
682 pri_enc_src
= PriorityEncoder(65)
683 m
.submodules
.pri_enc_src
= pri_enc_src
684 comb
+= pri_enc_src
.i
.eq(Cat(self
.srcmask
,
686 comb
+= src_delta
.eq(pri_enc_src
.o
)
687 # apply delta to srcstep
688 comb
+= skip_srcstep
.eq(cur_srcstep
+ src_delta
)
689 # shift-out all leading zeros from the mask
690 # plus the leading "one" bit
691 # TODO count leading zeros and shift-out the zero
692 # bits, in the same step, in hardware
693 sync
+= self
.srcmask
.eq(self
.srcmask
>> (src_delta
+1))
695 # same as above, but for dststep
696 skip_dststep
= Signal
.like(cur_dststep
)
697 dst_delta
= Signal
.like(cur_dststep
)
698 with m
.If(~pred_dst_zero
):
699 pri_enc_dst
= PriorityEncoder(65)
700 m
.submodules
.pri_enc_dst
= pri_enc_dst
701 comb
+= pri_enc_dst
.i
.eq(Cat(self
.dstmask
,
703 comb
+= dst_delta
.eq(pri_enc_dst
.o
)
704 comb
+= skip_dststep
.eq(cur_dststep
+ dst_delta
)
705 sync
+= self
.dstmask
.eq(self
.dstmask
>> (dst_delta
+1))
707 # TODO: initialize mask[VL]=1 to avoid passing past VL
708 with m
.If((skip_srcstep
>= cur_vl
) |
709 (skip_dststep
>= cur_vl
)):
710 # end of VL loop. Update PC and reset src/dst step
711 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
712 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
713 comb
+= new_svstate
.srcstep
.eq(0)
714 comb
+= new_svstate
.dststep
.eq(0)
715 comb
+= update_svstate
.eq(1)
716 # synchronize with the simulator
717 comb
+= self
.insn_done
.eq(1)
719 m
.next
= "ISSUE_START"
721 # update new src/dst step
722 comb
+= new_svstate
.srcstep
.eq(skip_srcstep
)
723 comb
+= new_svstate
.dststep
.eq(skip_dststep
)
724 comb
+= update_svstate
.eq(1)
728 # pass predicate mask bits through to satellite decoders
729 # TODO: for SIMD this will be *multiple* bits
730 sync
+= core
.sv_pred_sm
.eq(self
.srcmask
[0])
731 sync
+= core
.sv_pred_dm
.eq(self
.dstmask
[0])
733 # after src/dst step have been updated, we are ready
734 # to decode the instruction
735 with m
.State("DECODE_SV"):
736 # decode the instruction
737 sync
+= core
.e
.eq(pdecode2
.e
)
738 sync
+= core
.state
.eq(cur_state
)
739 sync
+= core
.raw_insn_i
.eq(dec_opcode_i
)
740 sync
+= core
.bigendian_i
.eq(self
.core_bigendian_i
)
742 sync
+= core
.sv_rm
.eq(pdecode2
.sv_rm
)
743 # set RA_OR_ZERO detection in satellite decoders
744 sync
+= core
.sv_a_nz
.eq(pdecode2
.sv_a_nz
)
746 m
.next
= "INSN_EXECUTE" # move to "execute"
748 # handshake with execution FSM, move to "wait" once acknowledged
749 with m
.State("INSN_EXECUTE"):
750 comb
+= exec_insn_valid_i
.eq(1) # trigger execute
751 with m
.If(exec_insn_ready_o
): # execute acknowledged us
752 m
.next
= "EXECUTE_WAIT"
754 with m
.State("EXECUTE_WAIT"):
755 # wait on "core stop" release, at instruction end
756 # need to do this here, in case we are in a VL>1 loop
757 with m
.If(~dbg
.core_stop_o
& ~core_rst
):
758 comb
+= exec_pc_ready_i
.eq(1)
759 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
760 #with m.If(exec_pc_valid_o & exc_happened):
761 # probably something like this:
762 # sync += pdecode2.ldst_exc.eq(core.fus.get_exc("ldst0")
763 # TODO: the exception info needs to be blatted
764 # into pdecode.ldst_exc, and the instruction "re-run".
765 # when ldst_exc.happened is set, the PowerDecoder2
766 # reacts very differently: it re-writes the instruction
767 # with a "trap" (calls PowerDecoder2.trap()) which
768 # will *overwrite* whatever was requested and jump the
769 # PC to the exception address, as well as alter MSR.
770 # nothing else needs to be done other than to note
771 # the change of PC and MSR (and, later, SVSTATE)
772 #with m.Elif(exec_pc_valid_o):
773 with m
.If(exec_pc_valid_o
): # replace with Elif (above)
775 # was this the last loop iteration?
777 cur_vl
= cur_state
.svstate
.vl
778 comb
+= is_last
.eq(next_srcstep
== cur_vl
)
780 # if either PC or SVSTATE were changed by the previous
781 # instruction, go directly back to Fetch, without
782 # updating either PC or SVSTATE
783 with m
.If(pc_changed | sv_changed
):
784 m
.next
= "ISSUE_START"
786 # also return to Fetch, when no output was a vector
787 # (regardless of SRCSTEP and VL), or when the last
788 # instruction was really the last one of the VL loop
789 with m
.Elif((~pdecode2
.loop_continue
) | is_last
):
790 # before going back to fetch, update the PC state
791 # register with the NIA.
792 # ok here we are not reading the branch unit.
793 # TODO: this just blithely overwrites whatever
794 # pipeline updated the PC
795 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
796 comb
+= self
.state_w_pc
.data_i
.eq(nia
)
797 # reset SRCSTEP before returning to Fetch
799 with m
.If(pdecode2
.loop_continue
):
800 comb
+= new_svstate
.srcstep
.eq(0)
801 comb
+= new_svstate
.dststep
.eq(0)
802 comb
+= update_svstate
.eq(1)
804 comb
+= new_svstate
.srcstep
.eq(0)
805 comb
+= new_svstate
.dststep
.eq(0)
806 comb
+= update_svstate
.eq(1)
807 m
.next
= "ISSUE_START"
809 # returning to Execute? then, first update SRCSTEP
811 comb
+= new_svstate
.srcstep
.eq(next_srcstep
)
812 comb
+= new_svstate
.dststep
.eq(next_dststep
)
813 comb
+= update_svstate
.eq(1)
814 # return to mask skip loop
818 comb
+= dbg
.core_stopped_i
.eq(1)
819 # while stopped, allow updating the PC and SVSTATE
820 with m
.If(self
.pc_i
.ok
):
821 comb
+= self
.state_w_pc
.wen
.eq(1 << StateRegs
.PC
)
822 comb
+= self
.state_w_pc
.data_i
.eq(self
.pc_i
.data
)
823 sync
+= pc_changed
.eq(1)
824 with m
.If(self
.svstate_i
.ok
):
825 comb
+= new_svstate
.eq(self
.svstate_i
.data
)
826 comb
+= update_svstate
.eq(1)
827 sync
+= sv_changed
.eq(1)
829 # check if svstate needs updating: if so, write it to State Regfile
830 with m
.If(update_svstate
):
831 comb
+= self
.state_w_sv
.wen
.eq(1<<StateRegs
.SVSTATE
)
832 comb
+= self
.state_w_sv
.data_i
.eq(new_svstate
)
833 sync
+= cur_state
.svstate
.eq(new_svstate
) # for next clock
835 def execute_fsm(self
, m
, core
, pc_changed
, sv_changed
,
836 exec_insn_valid_i
, exec_insn_ready_o
,
837 exec_pc_valid_o
, exec_pc_ready_i
):
840 execute FSM. this interacts with the "issue" FSM
841 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
842 (outgoing). SVP64 RM prefixes have already been set up by the
843 "issue" phase, so execute is fairly straightforward.
848 pdecode2
= self
.pdecode2
851 core_busy_o
= core
.busy_o
# core is busy
852 core_ivalid_i
= core
.ivalid_i
# instruction is valid
853 core_issue_i
= core
.issue_i
# instruction is issued
854 insn_type
= core
.e
.do
.insn_type
# instruction MicroOp type
856 with m
.FSM(name
="exec_fsm"):
858 # waiting for instruction bus (stays there until not busy)
859 with m
.State("INSN_START"):
860 comb
+= exec_insn_ready_o
.eq(1)
861 with m
.If(exec_insn_valid_i
):
862 comb
+= core_ivalid_i
.eq(1) # instruction is valid
863 comb
+= core_issue_i
.eq(1) # and issued
864 sync
+= sv_changed
.eq(0)
865 sync
+= pc_changed
.eq(0)
866 m
.next
= "INSN_ACTIVE" # move to "wait completion"
868 # instruction started: must wait till it finishes
869 with m
.State("INSN_ACTIVE"):
870 with m
.If(insn_type
!= MicrOp
.OP_NOP
):
871 comb
+= core_ivalid_i
.eq(1) # instruction is valid
872 # note changes to PC and SVSTATE
873 with m
.If(self
.state_nia
.wen
& (1<<StateRegs
.SVSTATE
)):
874 sync
+= sv_changed
.eq(1)
875 with m
.If(self
.state_nia
.wen
& (1<<StateRegs
.PC
)):
876 sync
+= pc_changed
.eq(1)
877 with m
.If(~core_busy_o
): # instruction done!
878 comb
+= exec_pc_valid_o
.eq(1)
879 with m
.If(exec_pc_ready_i
):
880 comb
+= self
.insn_done
.eq(1)
881 m
.next
= "INSN_START" # back to fetch
883 def setup_peripherals(self
, m
):
884 comb
, sync
= m
.d
.comb
, m
.d
.sync
886 # okaaaay so the debug module must be in coresync clock domain
887 # but NOT its reset signal. to cope with this, set every single
888 # submodule explicitly in coresync domain, debug and JTAG
889 # in their own one but using *external* reset.
890 csd
= DomainRenamer("coresync")
891 dbd
= csd
#DomainRenamer("dbgsync")
893 m
.submodules
.core
= core
= csd(self
.core
)
894 m
.submodules
.imem
= imem
= csd(self
.imem
)
895 m
.submodules
.dbg
= dbg
= self
.dbg
897 m
.submodules
.jtag
= jtag
= self
.jtag
898 # TODO: UART2GDB mux, here, from external pin
899 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
900 sync
+= dbg
.dmi
.connect_to(jtag
.dmi
)
902 cur_state
= self
.cur_state
904 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
906 for i
, sram
in enumerate(self
.sram4k
):
907 m
.submodules
["sram4k_%d" % i
] = csd(sram
)
908 comb
+= sram
.enable
.eq(self
.wb_sram_en
)
910 # XICS interrupt handler
912 m
.submodules
.xics_icp
= icp
= csd(self
.xics_icp
)
913 m
.submodules
.xics_ics
= ics
= csd(self
.xics_ics
)
914 comb
+= icp
.ics_i
.eq(ics
.icp_o
) # connect ICS to ICP
915 sync
+= cur_state
.eint
.eq(icp
.core_irq_o
) # connect ICP to core
917 # GPIO test peripheral
919 m
.submodules
.simple_gpio
= simple_gpio
= csd(self
.simple_gpio
)
921 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
922 # XXX causes litex ECP5 test to get wrong idea about input and output
923 # (but works with verilator sim *sigh*)
924 #if self.gpio and self.xics:
925 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
927 # instruction decoder
928 pdecode
= create_pdecode()
929 m
.submodules
.dec2
= pdecode2
= csd(self
.pdecode2
)
931 m
.submodules
.svp64
= svp64
= csd(self
.svp64
)
934 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
935 intrf
= self
.core
.regs
.rf
['int']
937 # clock delay power-on reset
938 cd_por
= ClockDomain(reset_less
=True)
939 cd_sync
= ClockDomain()
940 core_sync
= ClockDomain("coresync")
941 m
.domains
+= cd_por
, cd_sync
, core_sync
943 ti_rst
= Signal(reset_less
=True)
944 delay
= Signal(range(4), reset
=3)
945 with m
.If(delay
!= 0):
946 m
.d
.por
+= delay
.eq(delay
- 1)
947 comb
+= cd_por
.clk
.eq(ClockSignal())
949 # power-on reset delay
950 core_rst
= ResetSignal("coresync")
951 comb
+= ti_rst
.eq(delay
!= 0 | dbg
.core_rst_o |
ResetSignal())
952 comb
+= core_rst
.eq(ti_rst
)
954 # busy/halted signals from core
955 comb
+= self
.busy_o
.eq(core
.busy_o
)
956 comb
+= pdecode2
.dec
.bigendian
.eq(self
.core_bigendian_i
)
958 # temporary hack: says "go" immediately for both address gen and ST
960 ldst
= core
.fus
.fus
['ldst0']
961 st_go_edge
= rising_edge(m
, ldst
.st
.rel_o
)
962 m
.d
.comb
+= ldst
.ad
.go_i
.eq(ldst
.ad
.rel_o
) # link addr-go direct to rel
963 m
.d
.comb
+= ldst
.st
.go_i
.eq(st_go_edge
) # link store-go to rising rel
965 def elaborate(self
, platform
):
968 comb
, sync
= m
.d
.comb
, m
.d
.sync
969 cur_state
= self
.cur_state
970 pdecode2
= self
.pdecode2
974 # set up peripherals and core
975 core_rst
= self
.core_rst
976 self
.setup_peripherals(m
)
978 # reset current state if core reset requested
980 m
.d
.sync
+= self
.cur_state
.eq(0)
982 # PC and instruction from I-Memory
983 comb
+= self
.pc_o
.eq(cur_state
.pc
)
984 pc_changed
= Signal() # note write to PC
985 sv_changed
= Signal() # note write to SVSTATE
987 # read state either from incoming override or from regfile
988 # TODO: really should be doing MSR in the same way
989 pc
= state_get(m
, core_rst
, self
.pc_i
,
991 self
.state_r_pc
, StateRegs
.PC
)
992 svstate
= state_get(m
, core_rst
, self
.svstate_i
,
993 "svstate", # read SVSTATE
994 self
.state_r_sv
, StateRegs
.SVSTATE
)
996 # don't write pc every cycle
997 comb
+= self
.state_w_pc
.wen
.eq(0)
998 comb
+= self
.state_w_pc
.data_i
.eq(0)
1000 # don't read msr every cycle
1001 comb
+= self
.state_r_msr
.ren
.eq(0)
1003 # address of the next instruction, in the absence of a branch
1004 # depends on the instruction size
1007 # connect up debug signals
1008 # TODO comb += core.icache_rst_i.eq(dbg.icache_rst_o)
1009 comb
+= dbg
.terminate_i
.eq(core
.core_terminate_o
)
1010 comb
+= dbg
.state
.pc
.eq(pc
)
1011 comb
+= dbg
.state
.svstate
.eq(svstate
)
1012 comb
+= dbg
.state
.msr
.eq(cur_state
.msr
)
1014 # pass the prefix mode from Fetch to Issue, so the latter can loop
1016 is_svp64_mode
= Signal()
1018 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1019 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1020 # these are the handshake signals between each
1022 # fetch FSM can run as soon as the PC is valid
1023 fetch_pc_valid_i
= Signal() # Execute tells Fetch "start next read"
1024 fetch_pc_ready_o
= Signal() # Fetch Tells SVSTATE "proceed"
1026 # fetch FSM hands over the instruction to be decoded / issued
1027 fetch_insn_valid_o
= Signal()
1028 fetch_insn_ready_i
= Signal()
1030 # predicate fetch FSM decodes and fetches the predicate
1031 pred_insn_valid_i
= Signal()
1032 pred_insn_ready_o
= Signal()
1034 # predicate fetch FSM delivers the masks
1035 pred_mask_valid_o
= Signal()
1036 pred_mask_ready_i
= Signal()
1038 # issue FSM delivers the instruction to the be executed
1039 exec_insn_valid_i
= Signal()
1040 exec_insn_ready_o
= Signal()
1042 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1043 exec_pc_valid_o
= Signal()
1044 exec_pc_ready_i
= Signal()
1046 # the FSMs here are perhaps unusual in that they detect conditions
1047 # then "hold" information, combinatorially, for the core
1048 # (as opposed to using sync - which would be on a clock's delay)
1049 # this includes the actual opcode, valid flags and so on.
1051 # Fetch, then predicate fetch, then Issue, then Execute.
1052 # Issue is where the VL for-loop # lives. the ready/valid
1053 # signalling is used to communicate between the four.
1055 self
.fetch_fsm(m
, core
, pc
, svstate
, nia
, is_svp64_mode
,
1056 fetch_pc_ready_o
, fetch_pc_valid_i
,
1057 fetch_insn_valid_o
, fetch_insn_ready_i
)
1059 self
.issue_fsm(m
, core
, pc_changed
, sv_changed
, nia
,
1060 dbg
, core_rst
, is_svp64_mode
,
1061 fetch_pc_ready_o
, fetch_pc_valid_i
,
1062 fetch_insn_valid_o
, fetch_insn_ready_i
,
1063 pred_insn_valid_i
, pred_insn_ready_o
,
1064 pred_mask_valid_o
, pred_mask_ready_i
,
1065 exec_insn_valid_i
, exec_insn_ready_o
,
1066 exec_pc_valid_o
, exec_pc_ready_i
)
1069 self
.fetch_predicate_fsm(m
,
1070 pred_insn_valid_i
, pred_insn_ready_o
,
1071 pred_mask_valid_o
, pred_mask_ready_i
)
1073 self
.execute_fsm(m
, core
, pc_changed
, sv_changed
,
1074 exec_insn_valid_i
, exec_insn_ready_o
,
1075 exec_pc_valid_o
, exec_pc_ready_i
)
1077 # whatever was done above, over-ride it if core reset is held
1078 with m
.If(core_rst
):
1081 # this bit doesn't have to be in the FSM: connect up to read
1082 # regfiles on demand from DMI
1085 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
1086 # (which uses that in PowerDecoder2 to raise 0x900 exception)
1087 self
.tb_dec_fsm(m
, cur_state
.dec
)
1091 def do_dmi(self
, m
, dbg
):
1092 """deals with DMI debug requests
1094 currently only provides read requests for the INT regfile, CR and XER
1095 it will later also deal with *writing* to these regfiles.
1099 dmi
, d_reg
, d_cr
, d_xer
, = dbg
.dmi
, dbg
.d_gpr
, dbg
.d_cr
, dbg
.d_xer
1100 intrf
= self
.core
.regs
.rf
['int']
1102 with m
.If(d_reg
.req
): # request for regfile access being made
1103 # TODO: error-check this
1104 # XXX should this be combinatorial? sync better?
1106 comb
+= self
.int_r
.ren
.eq(1<<d_reg
.addr
)
1108 comb
+= self
.int_r
.addr
.eq(d_reg
.addr
)
1109 comb
+= self
.int_r
.ren
.eq(1)
1110 d_reg_delay
= Signal()
1111 sync
+= d_reg_delay
.eq(d_reg
.req
)
1112 with m
.If(d_reg_delay
):
1113 # data arrives one clock later
1114 comb
+= d_reg
.data
.eq(self
.int_r
.data_o
)
1115 comb
+= d_reg
.ack
.eq(1)
1117 # sigh same thing for CR debug
1118 with m
.If(d_cr
.req
): # request for regfile access being made
1119 comb
+= self
.cr_r
.ren
.eq(0b11111111) # enable all
1120 d_cr_delay
= Signal()
1121 sync
+= d_cr_delay
.eq(d_cr
.req
)
1122 with m
.If(d_cr_delay
):
1123 # data arrives one clock later
1124 comb
+= d_cr
.data
.eq(self
.cr_r
.data_o
)
1125 comb
+= d_cr
.ack
.eq(1)
1128 with m
.If(d_xer
.req
): # request for regfile access being made
1129 comb
+= self
.xer_r
.ren
.eq(0b111111) # enable all
1130 d_xer_delay
= Signal()
1131 sync
+= d_xer_delay
.eq(d_xer
.req
)
1132 with m
.If(d_xer_delay
):
1133 # data arrives one clock later
1134 comb
+= d_xer
.data
.eq(self
.xer_r
.data_o
)
1135 comb
+= d_xer
.ack
.eq(1)
1137 def tb_dec_fsm(self
, m
, spr_dec
):
1140 this is a FSM for updating either dec or tb. it runs alternately
1141 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
1142 value to DEC, however the regfile has "passthrough" on it so this
1145 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
1148 comb
, sync
= m
.d
.comb
, m
.d
.sync
1149 fast_rf
= self
.core
.regs
.rf
['fast']
1150 fast_r_dectb
= fast_rf
.r_ports
['issue'] # DEC/TB
1151 fast_w_dectb
= fast_rf
.w_ports
['issue'] # DEC/TB
1153 with m
.FSM() as fsm
:
1155 # initiates read of current DEC
1156 with m
.State("DEC_READ"):
1157 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.DEC
)
1158 comb
+= fast_r_dectb
.ren
.eq(1)
1159 m
.next
= "DEC_WRITE"
1161 # waits for DEC read to arrive (1 cycle), updates with new value
1162 with m
.State("DEC_WRITE"):
1163 new_dec
= Signal(64)
1164 # TODO: MSR.LPCR 32-bit decrement mode
1165 comb
+= new_dec
.eq(fast_r_dectb
.data_o
- 1)
1166 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.DEC
)
1167 comb
+= fast_w_dectb
.wen
.eq(1)
1168 comb
+= fast_w_dectb
.data_i
.eq(new_dec
)
1169 sync
+= spr_dec
.eq(new_dec
) # copy into cur_state for decoder
1172 # initiates read of current TB
1173 with m
.State("TB_READ"):
1174 comb
+= fast_r_dectb
.addr
.eq(FastRegs
.TB
)
1175 comb
+= fast_r_dectb
.ren
.eq(1)
1178 # waits for read TB to arrive, initiates write of current TB
1179 with m
.State("TB_WRITE"):
1181 comb
+= new_tb
.eq(fast_r_dectb
.data_o
+ 1)
1182 comb
+= fast_w_dectb
.addr
.eq(FastRegs
.TB
)
1183 comb
+= fast_w_dectb
.wen
.eq(1)
1184 comb
+= fast_w_dectb
.data_i
.eq(new_tb
)
1190 yield from self
.pc_i
.ports()
1193 yield from self
.core
.ports()
1194 yield from self
.imem
.ports()
1195 yield self
.core_bigendian_i
1201 def external_ports(self
):
1202 ports
= self
.pc_i
.ports()
1203 ports
+= [self
.pc_o
, self
.memerr_o
, self
.core_bigendian_i
, self
.busy_o
,
1207 ports
+= list(self
.jtag
.external_ports())
1209 # don't add DMI if JTAG is enabled
1210 ports
+= list(self
.dbg
.dmi
.ports())
1212 ports
+= list(self
.imem
.ibus
.fields
.values())
1213 ports
+= list(self
.core
.l0
.cmpi
.wb_bus().fields
.values())
1216 for sram
in self
.sram4k
:
1217 ports
+= list(sram
.bus
.fields
.values())
1220 ports
+= list(self
.xics_icp
.bus
.fields
.values())
1221 ports
+= list(self
.xics_ics
.bus
.fields
.values())
1222 ports
.append(self
.int_level_i
)
1225 ports
+= list(self
.simple_gpio
.bus
.fields
.values())
1226 ports
.append(self
.gpio_o
)
1234 class TestIssuer(Elaboratable
):
1235 def __init__(self
, pspec
):
1236 self
.ti
= TestIssuerInternal(pspec
)
1238 self
.pll
= DummyPLL(instance
=True)
1240 # PLL direct clock or not
1241 self
.pll_en
= hasattr(pspec
, "use_pll") and pspec
.use_pll
1243 self
.pll_test_o
= Signal(reset_less
=True)
1244 self
.pll_vco_o
= Signal(reset_less
=True)
1245 self
.clk_sel_i
= Signal(2, reset_less
=True)
1247 def elaborate(self
, platform
):
1251 # TestIssuer runs at direct clock
1252 m
.submodules
.ti
= ti
= self
.ti
1253 cd_int
= ClockDomain("coresync")
1256 # ClockSelect runs at PLL output internal clock rate
1257 m
.submodules
.wrappll
= pll
= self
.pll
1259 # add clock domains from PLL
1260 cd_pll
= ClockDomain("pllclk")
1263 # PLL clock established. has the side-effect of running clklsel
1264 # at the PLL's speed (see DomainRenamer("pllclk") above)
1265 pllclk
= ClockSignal("pllclk")
1266 comb
+= pllclk
.eq(pll
.clk_pll_o
)
1268 # wire up external 24mhz to PLL
1269 comb
+= pll
.clk_24_i
.eq(ClockSignal())
1271 # output 18 mhz PLL test signal, and analog oscillator out
1272 comb
+= self
.pll_test_o
.eq(pll
.pll_test_o
)
1273 comb
+= self
.pll_vco_o
.eq(pll
.pll_vco_o
)
1275 # input to pll clock selection
1276 comb
+= pll
.clk_sel_i
.eq(self
.clk_sel_i
)
1278 # now wire up ResetSignals. don't mind them being in this domain
1279 pll_rst
= ResetSignal("pllclk")
1280 comb
+= pll_rst
.eq(ResetSignal())
1282 # internal clock is set to selector clock-out. has the side-effect of
1283 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1284 intclk
= ClockSignal("coresync")
1286 comb
+= intclk
.eq(pllclk
)
1288 comb
+= intclk
.eq(ClockSignal())
1293 return list(self
.ti
.ports()) + list(self
.pll
.ports()) + \
1294 [ClockSignal(), ResetSignal()]
1296 def external_ports(self
):
1297 ports
= self
.ti
.external_ports()
1298 ports
.append(ClockSignal())
1299 ports
.append(ResetSignal())
1301 ports
.append(self
.clk_sel_i
)
1302 ports
.append(self
.pll_test_o
)
1303 ports
.append(self
.pll_vco_o
)
1307 if __name__
== '__main__':
1308 units
= {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1314 pspec
= TestMemPspec(ldst_ifacetype
='bare_wb',
1315 imem_ifacetype
='bare_wb',
1320 dut
= TestIssuer(pspec
)
1321 vl
= main(dut
, ports
=dut
.ports(), name
="test_issuer")
1323 if len(sys
.argv
) == 1:
1324 vl
= rtlil
.convert(dut
, ports
=dut
.external_ports(), name
="test_issuer")
1325 with
open("test_issuer.il", "w") as f
: