wire fetch_failed from I-Cache to PowerDecoder2
[soc.git] / src / soc / simple / issuer.py
1 """simple core issuer
2
3 not in any way intended for production use. this runs a FSM that:
4
5 * reads the Program Counter from StateRegs
6 * reads an instruction from a fixed-size Test Memory
7 * issues it to the Simple Core
8 * waits for it to complete
9 * increments the PC
10 * does it all over again
11
12 the purpose of this module is to verify the functional correctness
13 of the Function Units in the absolute simplest and clearest possible
14 way, and to at provide something that can be further incrementally
15 improved.
16 """
17
18 from nmigen import (Elaboratable, Module, Signal, ClockSignal, ResetSignal,
19 ClockDomain, DomainRenamer, Mux, Const, Repl, Cat)
20 from nmigen.cli import rtlil
21 from nmigen.cli import main
22 import sys
23
24 from nmutil.singlepipe import ControlBase
25 from soc.simple.core_data import FetchOutput, FetchInput
26
27 from nmigen.lib.coding import PriorityEncoder
28
29 from openpower.decoder.power_decoder import create_pdecode
30 from openpower.decoder.power_decoder2 import PowerDecode2, SVP64PrefixDecoder
31 from openpower.decoder.decode2execute1 import IssuerDecode2ToOperand
32 from openpower.decoder.decode2execute1 import Data
33 from openpower.decoder.power_enums import (MicrOp, SVP64PredInt, SVP64PredCR,
34 SVP64PredMode)
35 from openpower.state import CoreState
36 from openpower.consts import (CR, SVP64CROffs)
37 from soc.experiment.testmem import TestMemory # test only for instructions
38 from soc.regfile.regfiles import StateRegs, FastRegs
39 from soc.simple.core import NonProductionCore
40 from soc.config.test.test_loadstore import TestMemPspec
41 from soc.config.ifetch import ConfigFetchUnit
42 from soc.debug.dmi import CoreDebug, DMIInterface
43 from soc.debug.jtag import JTAG
44 from soc.config.pinouts import get_pinspecs
45 from soc.interrupts.xics import XICS_ICP, XICS_ICS
46 from soc.bus.simple_gpio import SimpleGPIO
47 from soc.bus.SPBlock512W64B8W import SPBlock512W64B8W
48 from soc.clock.select import ClockSelect
49 from soc.clock.dummypll import DummyPLL
50 from openpower.sv.svstate import SVSTATERec
51
52
53 from nmutil.util import rising_edge
54
55
56 def get_insn(f_instr_o, pc):
57 if f_instr_o.width == 32:
58 return f_instr_o
59 else:
60 # 64-bit: bit 2 of pc decides which word to select
61 return f_instr_o.word_select(pc[2], 32)
62
63 # gets state input or reads from state regfile
64
65
66 def state_get(m, core_rst, state_i, name, regfile, regnum):
67 comb = m.d.comb
68 sync = m.d.sync
69 # read the PC
70 res = Signal(64, reset_less=True, name=name)
71 res_ok_delay = Signal(name="%s_ok_delay" % name)
72 with m.If(~core_rst):
73 sync += res_ok_delay.eq(~state_i.ok)
74 with m.If(state_i.ok):
75 # incoming override (start from pc_i)
76 comb += res.eq(state_i.data)
77 with m.Else():
78 # otherwise read StateRegs regfile for PC...
79 comb += regfile.ren.eq(1 << regnum)
80 # ... but on a 1-clock delay
81 with m.If(res_ok_delay):
82 comb += res.eq(regfile.o_data)
83 return res
84
85
86 def get_predint(m, mask, name):
87 """decode SVP64 predicate integer mask field to reg number and invert
88 this is identical to the equivalent function in ISACaller except that
89 it doesn't read the INT directly, it just decodes "what needs to be done"
90 i.e. which INT reg, whether it is shifted and whether it is bit-inverted.
91
92 * all1s is set to indicate that no mask is to be applied.
93 * regread indicates the GPR register number to be read
94 * invert is set to indicate that the register value is to be inverted
95 * unary indicates that the contents of the register is to be shifted 1<<r3
96 """
97 comb = m.d.comb
98 regread = Signal(5, name=name+"regread")
99 invert = Signal(name=name+"invert")
100 unary = Signal(name=name+"unary")
101 all1s = Signal(name=name+"all1s")
102 with m.Switch(mask):
103 with m.Case(SVP64PredInt.ALWAYS.value):
104 comb += all1s.eq(1) # use 0b1111 (all ones)
105 with m.Case(SVP64PredInt.R3_UNARY.value):
106 comb += regread.eq(3)
107 comb += unary.eq(1) # 1<<r3 - shift r3 (single bit)
108 with m.Case(SVP64PredInt.R3.value):
109 comb += regread.eq(3)
110 with m.Case(SVP64PredInt.R3_N.value):
111 comb += regread.eq(3)
112 comb += invert.eq(1)
113 with m.Case(SVP64PredInt.R10.value):
114 comb += regread.eq(10)
115 with m.Case(SVP64PredInt.R10_N.value):
116 comb += regread.eq(10)
117 comb += invert.eq(1)
118 with m.Case(SVP64PredInt.R30.value):
119 comb += regread.eq(30)
120 with m.Case(SVP64PredInt.R30_N.value):
121 comb += regread.eq(30)
122 comb += invert.eq(1)
123 return regread, invert, unary, all1s
124
125
126 def get_predcr(m, mask, name):
127 """decode SVP64 predicate CR to reg number field and invert status
128 this is identical to _get_predcr in ISACaller
129 """
130 comb = m.d.comb
131 idx = Signal(2, name=name+"idx")
132 invert = Signal(name=name+"crinvert")
133 with m.Switch(mask):
134 with m.Case(SVP64PredCR.LT.value):
135 comb += idx.eq(CR.LT)
136 comb += invert.eq(0)
137 with m.Case(SVP64PredCR.GE.value):
138 comb += idx.eq(CR.LT)
139 comb += invert.eq(1)
140 with m.Case(SVP64PredCR.GT.value):
141 comb += idx.eq(CR.GT)
142 comb += invert.eq(0)
143 with m.Case(SVP64PredCR.LE.value):
144 comb += idx.eq(CR.GT)
145 comb += invert.eq(1)
146 with m.Case(SVP64PredCR.EQ.value):
147 comb += idx.eq(CR.EQ)
148 comb += invert.eq(0)
149 with m.Case(SVP64PredCR.NE.value):
150 comb += idx.eq(CR.EQ)
151 comb += invert.eq(1)
152 with m.Case(SVP64PredCR.SO.value):
153 comb += idx.eq(CR.SO)
154 comb += invert.eq(0)
155 with m.Case(SVP64PredCR.NS.value):
156 comb += idx.eq(CR.SO)
157 comb += invert.eq(1)
158 return idx, invert
159
160
161 # Fetch Finite State Machine.
162 # WARNING: there are currently DriverConflicts but it's actually working.
163 # TODO, here: everything that is global in nature, information from the
164 # main TestIssuerInternal, needs to move to either ispec() or ospec().
165 # not only that: TestIssuerInternal.imem can entirely move into here
166 # because imem is only ever accessed inside the FetchFSM.
167 class FetchFSM(ControlBase):
168 def __init__(self, allow_overlap, svp64_en, imem, core_rst,
169 pdecode2, cur_state,
170 dbg, core, svstate, nia, is_svp64_mode):
171 self.allow_overlap = allow_overlap
172 self.svp64_en = svp64_en
173 self.imem = imem
174 self.core_rst = core_rst
175 self.pdecode2 = pdecode2
176 self.cur_state = cur_state
177 self.dbg = dbg
178 self.core = core
179 self.svstate = svstate
180 self.nia = nia
181 self.is_svp64_mode = is_svp64_mode
182
183 # set up pipeline ControlBase and allocate i/o specs
184 # (unusual: normally done by the Pipeline API)
185 super().__init__(stage=self)
186 self.p.i_data, self.n.o_data = self.new_specs(None)
187 self.i, self.o = self.p.i_data, self.n.o_data
188
189 # next 3 functions are Stage API Compliance
190 def setup(self, m, i):
191 pass
192
193 def ispec(self):
194 return FetchInput()
195
196 def ospec(self):
197 return FetchOutput()
198
199 def elaborate(self, platform):
200 """fetch FSM
201
202 this FSM performs fetch of raw instruction data, partial-decodes
203 it 32-bit at a time to detect SVP64 prefixes, and will optionally
204 read a 2nd 32-bit quantity if that occurs.
205 """
206 m = super().elaborate(platform)
207
208 dbg = self.dbg
209 core = self.core,
210 pc = self.i.pc
211 svstate = self.svstate
212 nia = self.nia
213 is_svp64_mode = self.is_svp64_mode
214 fetch_pc_o_ready = self.p.o_ready
215 fetch_pc_i_valid = self.p.i_valid
216 fetch_insn_o_valid = self.n.o_valid
217 fetch_insn_i_ready = self.n.i_ready
218
219 comb = m.d.comb
220 sync = m.d.sync
221 pdecode2 = self.pdecode2
222 cur_state = self.cur_state
223 dec_opcode_o = pdecode2.dec.raw_opcode_in # raw opcode
224
225 msr_read = Signal(reset=1)
226
227 # don't read msr every cycle
228 staterf = self.core.regs.rf['state']
229 state_r_msr = staterf.r_ports['msr'] # MSR rd
230
231 comb += state_r_msr.ren.eq(0)
232
233 with m.FSM(name='fetch_fsm'):
234
235 # waiting (zzz)
236 with m.State("IDLE"):
237 with m.If(~dbg.stopping_o):
238 comb += fetch_pc_o_ready.eq(1)
239 with m.If(fetch_pc_i_valid):
240 # instruction allowed to go: start by reading the PC
241 # capture the PC and also drop it into Insn Memory
242 # we have joined a pair of combinatorial memory
243 # lookups together. this is Generally Bad.
244 comb += self.imem.a_pc_i.eq(pc)
245 comb += self.imem.a_i_valid.eq(1)
246 comb += self.imem.f_i_valid.eq(1)
247 sync += cur_state.pc.eq(pc)
248 sync += cur_state.svstate.eq(svstate) # and svstate
249
250 # initiate read of MSR. arrives one clock later
251 comb += state_r_msr.ren.eq(1 << StateRegs.MSR)
252 sync += msr_read.eq(0)
253
254 m.next = "INSN_READ" # move to "wait for bus" phase
255
256 # dummy pause to find out why simulation is not keeping up
257 with m.State("INSN_READ"):
258 if self.allow_overlap:
259 stopping = dbg.stopping_o
260 else:
261 stopping = Const(0)
262 with m.If(stopping):
263 # stopping: jump back to idle
264 m.next = "IDLE"
265 with m.Else():
266 # one cycle later, msr/sv read arrives. valid only once.
267 with m.If(~msr_read):
268 sync += msr_read.eq(1) # yeah don't read it again
269 sync += cur_state.msr.eq(state_r_msr.o_data)
270 with m.If(self.imem.f_busy_o): # zzz...
271 # busy: stay in wait-read
272 comb += self.imem.a_i_valid.eq(1)
273 comb += self.imem.f_i_valid.eq(1)
274 with m.Else():
275 # not busy: instruction fetched
276 insn = get_insn(self.imem.f_instr_o, cur_state.pc)
277 if self.svp64_en:
278 svp64 = self.svp64
279 # decode the SVP64 prefix, if any
280 comb += svp64.raw_opcode_in.eq(insn)
281 comb += svp64.bigendian.eq(self.core_bigendian_i)
282 # pass the decoded prefix (if any) to PowerDecoder2
283 sync += pdecode2.sv_rm.eq(svp64.svp64_rm)
284 sync += pdecode2.is_svp64_mode.eq(is_svp64_mode)
285 # remember whether this is a prefixed instruction,
286 # so the FSM can readily loop when VL==0
287 sync += is_svp64_mode.eq(svp64.is_svp64_mode)
288 # calculate the address of the following instruction
289 insn_size = Mux(svp64.is_svp64_mode, 8, 4)
290 sync += nia.eq(cur_state.pc + insn_size)
291 with m.If(~svp64.is_svp64_mode):
292 # with no prefix, store the instruction
293 # and hand it directly to the next FSM
294 sync += dec_opcode_o.eq(insn)
295 m.next = "INSN_READY"
296 with m.Else():
297 # fetch the rest of the instruction from memory
298 comb += self.imem.a_pc_i.eq(cur_state.pc + 4)
299 comb += self.imem.a_i_valid.eq(1)
300 comb += self.imem.f_i_valid.eq(1)
301 m.next = "INSN_READ2"
302 else:
303 # not SVP64 - 32-bit only
304 sync += nia.eq(cur_state.pc + 4)
305 sync += dec_opcode_o.eq(insn)
306 m.next = "INSN_READY"
307
308 with m.State("INSN_READ2"):
309 with m.If(self.imem.f_busy_o): # zzz...
310 # busy: stay in wait-read
311 comb += self.imem.a_i_valid.eq(1)
312 comb += self.imem.f_i_valid.eq(1)
313 with m.Else():
314 # not busy: instruction fetched
315 insn = get_insn(self.imem.f_instr_o, cur_state.pc+4)
316 sync += dec_opcode_o.eq(insn)
317 m.next = "INSN_READY"
318 # TODO: probably can start looking at pdecode2.rm_dec
319 # here or maybe even in INSN_READ state, if svp64_mode
320 # detected, in order to trigger - and wait for - the
321 # predicate reading.
322 if self.svp64_en:
323 pmode = pdecode2.rm_dec.predmode
324 """
325 if pmode != SVP64PredMode.ALWAYS.value:
326 fire predicate loading FSM and wait before
327 moving to INSN_READY
328 else:
329 sync += self.srcmask.eq(-1) # set to all 1s
330 sync += self.dstmask.eq(-1) # set to all 1s
331 m.next = "INSN_READY"
332 """
333
334 with m.State("INSN_READY"):
335 # hand over the instruction, to be decoded
336 comb += fetch_insn_o_valid.eq(1)
337 with m.If(fetch_insn_i_ready):
338 m.next = "IDLE"
339
340 # whatever was done above, over-ride it if core reset is held
341 with m.If(self.core_rst):
342 sync += nia.eq(0)
343
344 return m
345
346
347 class TestIssuerInternal(Elaboratable):
348 """TestIssuer - reads instructions from TestMemory and issues them
349
350 efficiency and speed is not the main goal here: functional correctness
351 and code clarity is. optimisations (which almost 100% interfere with
352 easy understanding) come later.
353 """
354
355 def __init__(self, pspec):
356
357 # test is SVP64 is to be enabled
358 self.svp64_en = hasattr(pspec, "svp64") and (pspec.svp64 == True)
359
360 # and if regfiles are reduced
361 self.regreduce_en = (hasattr(pspec, "regreduce") and
362 (pspec.regreduce == True))
363
364 # and if overlap requested
365 self.allow_overlap = (hasattr(pspec, "allow_overlap") and
366 (pspec.allow_overlap == True))
367
368 # JTAG interface. add this right at the start because if it's
369 # added it *modifies* the pspec, by adding enable/disable signals
370 # for parts of the rest of the core
371 self.jtag_en = hasattr(pspec, "debug") and pspec.debug == 'jtag'
372 self.dbg_domain = "sync" # sigh "dbgsunc" too problematic
373 # self.dbg_domain = "dbgsync" # domain for DMI/JTAG clock
374 if self.jtag_en:
375 # XXX MUST keep this up-to-date with litex, and
376 # soc-cocotb-sim, and err.. all needs sorting out, argh
377 subset = ['uart',
378 'mtwi',
379 'eint', 'gpio', 'mspi0',
380 # 'mspi1', - disabled for now
381 # 'pwm', 'sd0', - disabled for now
382 'sdr']
383 self.jtag = JTAG(get_pinspecs(subset=subset),
384 domain=self.dbg_domain)
385 # add signals to pspec to enable/disable icache and dcache
386 # (or data and intstruction wishbone if icache/dcache not included)
387 # https://bugs.libre-soc.org/show_bug.cgi?id=520
388 # TODO: do we actually care if these are not domain-synchronised?
389 # honestly probably not.
390 pspec.wb_icache_en = self.jtag.wb_icache_en
391 pspec.wb_dcache_en = self.jtag.wb_dcache_en
392 self.wb_sram_en = self.jtag.wb_sram_en
393 else:
394 self.wb_sram_en = Const(1)
395
396 # add 4k sram blocks?
397 self.sram4x4k = (hasattr(pspec, "sram4x4kblock") and
398 pspec.sram4x4kblock == True)
399 if self.sram4x4k:
400 self.sram4k = []
401 for i in range(4):
402 self.sram4k.append(SPBlock512W64B8W(name="sram4k_%d" % i,
403 # features={'err'}
404 ))
405
406 # add interrupt controller?
407 self.xics = hasattr(pspec, "xics") and pspec.xics == True
408 if self.xics:
409 self.xics_icp = XICS_ICP()
410 self.xics_ics = XICS_ICS()
411 self.int_level_i = self.xics_ics.int_level_i
412
413 # add GPIO peripheral?
414 self.gpio = hasattr(pspec, "gpio") and pspec.gpio == True
415 if self.gpio:
416 self.simple_gpio = SimpleGPIO()
417 self.gpio_o = self.simple_gpio.gpio_o
418
419 # main instruction core. suitable for prototyping / demo only
420 self.core = core = NonProductionCore(pspec)
421 self.core_rst = ResetSignal("coresync")
422
423 # instruction decoder. goes into Trap Record
424 #pdecode = create_pdecode()
425 self.cur_state = CoreState("cur") # current state (MSR/PC/SVSTATE)
426 self.pdecode2 = PowerDecode2(None, state=self.cur_state,
427 opkls=IssuerDecode2ToOperand,
428 svp64_en=self.svp64_en,
429 regreduce_en=self.regreduce_en)
430 pdecode = self.pdecode2.dec
431
432 if self.svp64_en:
433 self.svp64 = SVP64PrefixDecoder() # for decoding SVP64 prefix
434
435 # Test Instruction memory
436 self.imem = ConfigFetchUnit(pspec).fu
437
438 # DMI interface
439 self.dbg = CoreDebug()
440
441 # instruction go/monitor
442 self.pc_o = Signal(64, reset_less=True)
443 self.pc_i = Data(64, "pc_i") # set "ok" to indicate "please change me"
444 self.svstate_i = Data(64, "svstate_i") # ditto
445 self.core_bigendian_i = Signal() # TODO: set based on MSR.LE
446 self.busy_o = Signal(reset_less=True)
447 self.memerr_o = Signal(reset_less=True)
448
449 # STATE regfile read /write ports for PC, MSR, SVSTATE
450 staterf = self.core.regs.rf['state']
451 self.state_r_pc = staterf.r_ports['cia'] # PC rd
452 self.state_w_pc = staterf.w_ports['d_wr1'] # PC wr
453 self.state_r_sv = staterf.r_ports['sv'] # SVSTATE rd
454 self.state_w_sv = staterf.w_ports['sv'] # SVSTATE wr
455
456 # DMI interface access
457 intrf = self.core.regs.rf['int']
458 crrf = self.core.regs.rf['cr']
459 xerrf = self.core.regs.rf['xer']
460 self.int_r = intrf.r_ports['dmi'] # INT read
461 self.cr_r = crrf.r_ports['full_cr_dbg'] # CR read
462 self.xer_r = xerrf.r_ports['full_xer'] # XER read
463
464 if self.svp64_en:
465 # for predication
466 self.int_pred = intrf.r_ports['pred'] # INT predicate read
467 self.cr_pred = crrf.r_ports['cr_pred'] # CR predicate read
468
469 # hack method of keeping an eye on whether branch/trap set the PC
470 self.state_nia = self.core.regs.rf['state'].w_ports['nia']
471 self.state_nia.wen.name = 'state_nia_wen'
472
473 # pulse to synchronize the simulator at instruction end
474 self.insn_done = Signal()
475
476 # indicate any instruction still outstanding, in execution
477 self.any_busy = Signal()
478
479 if self.svp64_en:
480 # store copies of predicate masks
481 self.srcmask = Signal(64)
482 self.dstmask = Signal(64)
483
484 def fetch_predicate_fsm(self, m,
485 pred_insn_i_valid, pred_insn_o_ready,
486 pred_mask_o_valid, pred_mask_i_ready):
487 """fetch_predicate_fsm - obtains (constructs in the case of CR)
488 src/dest predicate masks
489
490 https://bugs.libre-soc.org/show_bug.cgi?id=617
491 the predicates can be read here, by using IntRegs r_ports['pred']
492 or CRRegs r_ports['pred']. in the case of CRs it will have to
493 be done through multiple reads, extracting one relevant at a time.
494 later, a faster way would be to use the 32-bit-wide CR port but
495 this is more complex decoding, here. equivalent code used in
496 ISACaller is "from openpower.decoder.isa.caller import get_predcr"
497
498 note: this ENTIRE FSM is not to be called when svp64 is disabled
499 """
500 comb = m.d.comb
501 sync = m.d.sync
502 pdecode2 = self.pdecode2
503 rm_dec = pdecode2.rm_dec # SVP64RMModeDecode
504 predmode = rm_dec.predmode
505 srcpred, dstpred = rm_dec.srcpred, rm_dec.dstpred
506 cr_pred, int_pred = self.cr_pred, self.int_pred # read regfiles
507 # get src/dst step, so we can skip already used mask bits
508 cur_state = self.cur_state
509 srcstep = cur_state.svstate.srcstep
510 dststep = cur_state.svstate.dststep
511 cur_vl = cur_state.svstate.vl
512
513 # decode predicates
514 sregread, sinvert, sunary, sall1s = get_predint(m, srcpred, 's')
515 dregread, dinvert, dunary, dall1s = get_predint(m, dstpred, 'd')
516 sidx, scrinvert = get_predcr(m, srcpred, 's')
517 didx, dcrinvert = get_predcr(m, dstpred, 'd')
518
519 # store fetched masks, for either intpred or crpred
520 # when src/dst step is not zero, the skipped mask bits need to be
521 # shifted-out, before actually storing them in src/dest mask
522 new_srcmask = Signal(64, reset_less=True)
523 new_dstmask = Signal(64, reset_less=True)
524
525 with m.FSM(name="fetch_predicate"):
526
527 with m.State("FETCH_PRED_IDLE"):
528 comb += pred_insn_o_ready.eq(1)
529 with m.If(pred_insn_i_valid):
530 with m.If(predmode == SVP64PredMode.INT):
531 # skip fetching destination mask register, when zero
532 with m.If(dall1s):
533 sync += new_dstmask.eq(-1)
534 # directly go to fetch source mask register
535 # guaranteed not to be zero (otherwise predmode
536 # would be SVP64PredMode.ALWAYS, not INT)
537 comb += int_pred.addr.eq(sregread)
538 comb += int_pred.ren.eq(1)
539 m.next = "INT_SRC_READ"
540 # fetch destination predicate register
541 with m.Else():
542 comb += int_pred.addr.eq(dregread)
543 comb += int_pred.ren.eq(1)
544 m.next = "INT_DST_READ"
545 with m.Elif(predmode == SVP64PredMode.CR):
546 # go fetch masks from the CR register file
547 sync += new_srcmask.eq(0)
548 sync += new_dstmask.eq(0)
549 m.next = "CR_READ"
550 with m.Else():
551 sync += self.srcmask.eq(-1)
552 sync += self.dstmask.eq(-1)
553 m.next = "FETCH_PRED_DONE"
554
555 with m.State("INT_DST_READ"):
556 # store destination mask
557 inv = Repl(dinvert, 64)
558 with m.If(dunary):
559 # set selected mask bit for 1<<r3 mode
560 dst_shift = Signal(range(64))
561 comb += dst_shift.eq(self.int_pred.o_data & 0b111111)
562 sync += new_dstmask.eq(1 << dst_shift)
563 with m.Else():
564 # invert mask if requested
565 sync += new_dstmask.eq(self.int_pred.o_data ^ inv)
566 # skip fetching source mask register, when zero
567 with m.If(sall1s):
568 sync += new_srcmask.eq(-1)
569 m.next = "FETCH_PRED_SHIFT_MASK"
570 # fetch source predicate register
571 with m.Else():
572 comb += int_pred.addr.eq(sregread)
573 comb += int_pred.ren.eq(1)
574 m.next = "INT_SRC_READ"
575
576 with m.State("INT_SRC_READ"):
577 # store source mask
578 inv = Repl(sinvert, 64)
579 with m.If(sunary):
580 # set selected mask bit for 1<<r3 mode
581 src_shift = Signal(range(64))
582 comb += src_shift.eq(self.int_pred.o_data & 0b111111)
583 sync += new_srcmask.eq(1 << src_shift)
584 with m.Else():
585 # invert mask if requested
586 sync += new_srcmask.eq(self.int_pred.o_data ^ inv)
587 m.next = "FETCH_PRED_SHIFT_MASK"
588
589 # fetch masks from the CR register file
590 # implements the following loop:
591 # idx, inv = get_predcr(mask)
592 # mask = 0
593 # for cr_idx in range(vl):
594 # cr = crl[cr_idx + SVP64CROffs.CRPred] # takes one cycle
595 # if cr[idx] ^ inv:
596 # mask |= 1 << cr_idx
597 # return mask
598 with m.State("CR_READ"):
599 # CR index to be read, which will be ready by the next cycle
600 cr_idx = Signal.like(cur_vl, reset_less=True)
601 # submit the read operation to the regfile
602 with m.If(cr_idx != cur_vl):
603 # the CR read port is unary ...
604 # ren = 1 << cr_idx
605 # ... in MSB0 convention ...
606 # ren = 1 << (7 - cr_idx)
607 # ... and with an offset:
608 # ren = 1 << (7 - off - cr_idx)
609 idx = SVP64CROffs.CRPred + cr_idx
610 comb += cr_pred.ren.eq(1 << (7 - idx))
611 # signal data valid in the next cycle
612 cr_read = Signal(reset_less=True)
613 sync += cr_read.eq(1)
614 # load the next index
615 sync += cr_idx.eq(cr_idx + 1)
616 with m.Else():
617 # exit on loop end
618 sync += cr_read.eq(0)
619 sync += cr_idx.eq(0)
620 m.next = "FETCH_PRED_SHIFT_MASK"
621 with m.If(cr_read):
622 # compensate for the one cycle delay on the regfile
623 cur_cr_idx = Signal.like(cur_vl)
624 comb += cur_cr_idx.eq(cr_idx - 1)
625 # read the CR field, select the appropriate bit
626 cr_field = Signal(4)
627 scr_bit = Signal()
628 dcr_bit = Signal()
629 comb += cr_field.eq(cr_pred.o_data)
630 comb += scr_bit.eq(cr_field.bit_select(sidx, 1)
631 ^ scrinvert)
632 comb += dcr_bit.eq(cr_field.bit_select(didx, 1)
633 ^ dcrinvert)
634 # set the corresponding mask bit
635 bit_to_set = Signal.like(self.srcmask)
636 comb += bit_to_set.eq(1 << cur_cr_idx)
637 with m.If(scr_bit):
638 sync += new_srcmask.eq(new_srcmask | bit_to_set)
639 with m.If(dcr_bit):
640 sync += new_dstmask.eq(new_dstmask | bit_to_set)
641
642 with m.State("FETCH_PRED_SHIFT_MASK"):
643 # shift-out skipped mask bits
644 sync += self.srcmask.eq(new_srcmask >> srcstep)
645 sync += self.dstmask.eq(new_dstmask >> dststep)
646 m.next = "FETCH_PRED_DONE"
647
648 with m.State("FETCH_PRED_DONE"):
649 comb += pred_mask_o_valid.eq(1)
650 with m.If(pred_mask_i_ready):
651 m.next = "FETCH_PRED_IDLE"
652
653 def issue_fsm(self, m, core, pc_changed, sv_changed, nia,
654 dbg, core_rst, is_svp64_mode,
655 fetch_pc_o_ready, fetch_pc_i_valid,
656 fetch_insn_o_valid, fetch_insn_i_ready,
657 pred_insn_i_valid, pred_insn_o_ready,
658 pred_mask_o_valid, pred_mask_i_ready,
659 exec_insn_i_valid, exec_insn_o_ready,
660 exec_pc_o_valid, exec_pc_i_ready):
661 """issue FSM
662
663 decode / issue FSM. this interacts with the "fetch" FSM
664 through fetch_insn_ready/valid (incoming) and fetch_pc_ready/valid
665 (outgoing). also interacts with the "execute" FSM
666 through exec_insn_ready/valid (outgoing) and exec_pc_ready/valid
667 (incoming).
668 SVP64 RM prefixes have already been set up by the
669 "fetch" phase, so execute is fairly straightforward.
670 """
671
672 comb = m.d.comb
673 sync = m.d.sync
674 pdecode2 = self.pdecode2
675 cur_state = self.cur_state
676
677 # temporaries
678 dec_opcode_i = pdecode2.dec.raw_opcode_in # raw opcode
679
680 # for updating svstate (things like srcstep etc.)
681 update_svstate = Signal() # set this (below) if updating
682 new_svstate = SVSTATERec("new_svstate")
683 comb += new_svstate.eq(cur_state.svstate)
684
685 # precalculate srcstep+1 and dststep+1
686 cur_srcstep = cur_state.svstate.srcstep
687 cur_dststep = cur_state.svstate.dststep
688 next_srcstep = Signal.like(cur_srcstep)
689 next_dststep = Signal.like(cur_dststep)
690 comb += next_srcstep.eq(cur_state.svstate.srcstep+1)
691 comb += next_dststep.eq(cur_state.svstate.dststep+1)
692
693 # note if an exception happened. in a pipelined or OoO design
694 # this needs to be accompanied by "shadowing" (or stalling)
695 exc_happened = self.core.o.exc_happened
696 # also note instruction fetch failed
697 if hasattr(core, "icache"):
698 fetch_failed = core.icache.i_out.fetch_failed
699 else:
700 fetch_failed = Const(0, 1)
701 # set to zero initially
702 sync += pdecode2.instr_fault.eq(0)
703
704 with m.FSM(name="issue_fsm"):
705
706 # sync with the "fetch" phase which is reading the instruction
707 # at this point, there is no instruction running, that
708 # could inadvertently update the PC.
709 with m.State("ISSUE_START"):
710 # wait on "core stop" release, before next fetch
711 # need to do this here, in case we are in a VL==0 loop
712 with m.If(~dbg.core_stop_o & ~core_rst):
713 comb += fetch_pc_i_valid.eq(1) # tell fetch to start
714 with m.If(fetch_pc_o_ready): # fetch acknowledged us
715 m.next = "INSN_WAIT"
716 with m.Else():
717 # tell core it's stopped, and acknowledge debug handshake
718 comb += dbg.core_stopped_i.eq(1)
719 # while stopped, allow updating the PC and SVSTATE
720 with m.If(self.pc_i.ok):
721 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
722 comb += self.state_w_pc.i_data.eq(self.pc_i.data)
723 sync += pc_changed.eq(1)
724 with m.If(self.svstate_i.ok):
725 comb += new_svstate.eq(self.svstate_i.data)
726 comb += update_svstate.eq(1)
727 sync += sv_changed.eq(1)
728
729 # wait for an instruction to arrive from Fetch
730 with m.State("INSN_WAIT"):
731 if self.allow_overlap:
732 stopping = dbg.stopping_o
733 else:
734 stopping = Const(0)
735 with m.If(stopping):
736 # stopping: jump back to idle
737 m.next = "ISSUE_START"
738 with m.Else():
739 comb += fetch_insn_i_ready.eq(1)
740 with m.If(fetch_insn_o_valid):
741 # loop into ISSUE_START if it's a SVP64 instruction
742 # and VL == 0. this because VL==0 is a for-loop
743 # from 0 to 0 i.e. always, always a NOP.
744 cur_vl = cur_state.svstate.vl
745 with m.If(is_svp64_mode & (cur_vl == 0)):
746 # update the PC before fetching the next instruction
747 # since we are in a VL==0 loop, no instruction was
748 # executed that we could be overwriting
749 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
750 comb += self.state_w_pc.i_data.eq(nia)
751 comb += self.insn_done.eq(1)
752 m.next = "ISSUE_START"
753 with m.Else():
754 if self.svp64_en:
755 m.next = "PRED_START" # fetching predicate
756 else:
757 m.next = "DECODE_SV" # skip predication
758
759 with m.State("PRED_START"):
760 comb += pred_insn_i_valid.eq(1) # tell fetch_pred to start
761 with m.If(pred_insn_o_ready): # fetch_pred acknowledged us
762 m.next = "MASK_WAIT"
763
764 with m.State("MASK_WAIT"):
765 comb += pred_mask_i_ready.eq(1) # ready to receive the masks
766 with m.If(pred_mask_o_valid): # predication masks are ready
767 m.next = "PRED_SKIP"
768
769 # skip zeros in predicate
770 with m.State("PRED_SKIP"):
771 with m.If(~is_svp64_mode):
772 m.next = "DECODE_SV" # nothing to do
773 with m.Else():
774 if self.svp64_en:
775 pred_src_zero = pdecode2.rm_dec.pred_sz
776 pred_dst_zero = pdecode2.rm_dec.pred_dz
777
778 # new srcstep, after skipping zeros
779 skip_srcstep = Signal.like(cur_srcstep)
780 # value to be added to the current srcstep
781 src_delta = Signal.like(cur_srcstep)
782 # add leading zeros to srcstep, if not in zero mode
783 with m.If(~pred_src_zero):
784 # priority encoder (count leading zeros)
785 # append guard bit, in case the mask is all zeros
786 pri_enc_src = PriorityEncoder(65)
787 m.submodules.pri_enc_src = pri_enc_src
788 comb += pri_enc_src.i.eq(Cat(self.srcmask,
789 Const(1, 1)))
790 comb += src_delta.eq(pri_enc_src.o)
791 # apply delta to srcstep
792 comb += skip_srcstep.eq(cur_srcstep + src_delta)
793 # shift-out all leading zeros from the mask
794 # plus the leading "one" bit
795 # TODO count leading zeros and shift-out the zero
796 # bits, in the same step, in hardware
797 sync += self.srcmask.eq(self.srcmask >> (src_delta+1))
798
799 # same as above, but for dststep
800 skip_dststep = Signal.like(cur_dststep)
801 dst_delta = Signal.like(cur_dststep)
802 with m.If(~pred_dst_zero):
803 pri_enc_dst = PriorityEncoder(65)
804 m.submodules.pri_enc_dst = pri_enc_dst
805 comb += pri_enc_dst.i.eq(Cat(self.dstmask,
806 Const(1, 1)))
807 comb += dst_delta.eq(pri_enc_dst.o)
808 comb += skip_dststep.eq(cur_dststep + dst_delta)
809 sync += self.dstmask.eq(self.dstmask >> (dst_delta+1))
810
811 # TODO: initialize mask[VL]=1 to avoid passing past VL
812 with m.If((skip_srcstep >= cur_vl) |
813 (skip_dststep >= cur_vl)):
814 # end of VL loop. Update PC and reset src/dst step
815 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
816 comb += self.state_w_pc.i_data.eq(nia)
817 comb += new_svstate.srcstep.eq(0)
818 comb += new_svstate.dststep.eq(0)
819 comb += update_svstate.eq(1)
820 # synchronize with the simulator
821 comb += self.insn_done.eq(1)
822 # go back to Issue
823 m.next = "ISSUE_START"
824 with m.Else():
825 # update new src/dst step
826 comb += new_svstate.srcstep.eq(skip_srcstep)
827 comb += new_svstate.dststep.eq(skip_dststep)
828 comb += update_svstate.eq(1)
829 # proceed to Decode
830 m.next = "DECODE_SV"
831
832 # pass predicate mask bits through to satellite decoders
833 # TODO: for SIMD this will be *multiple* bits
834 sync += core.i.sv_pred_sm.eq(self.srcmask[0])
835 sync += core.i.sv_pred_dm.eq(self.dstmask[0])
836
837 # after src/dst step have been updated, we are ready
838 # to decode the instruction
839 with m.State("DECODE_SV"):
840 # decode the instruction
841 sync += core.i.e.eq(pdecode2.e)
842 sync += core.i.state.eq(cur_state)
843 sync += core.i.raw_insn_i.eq(dec_opcode_i)
844 sync += core.i.bigendian_i.eq(self.core_bigendian_i)
845 if self.svp64_en:
846 sync += core.i.sv_rm.eq(pdecode2.sv_rm)
847 # set RA_OR_ZERO detection in satellite decoders
848 sync += core.i.sv_a_nz.eq(pdecode2.sv_a_nz)
849 # and svp64 detection
850 sync += core.i.is_svp64_mode.eq(is_svp64_mode)
851 # and svp64 bit-rev'd ldst mode
852 ldst_dec = pdecode2.use_svp64_ldst_dec
853 sync += core.i.use_svp64_ldst_dec.eq(ldst_dec)
854 # after decoding, reset any previous exception condition,
855 # allowing it to be set again during the next execution
856 sync += pdecode2.ldst_exc.eq(0)
857
858 # update (highest priority) instruction fault
859 sync += pdecode2.instr_fault.eq(fetch_failed)
860
861 m.next = "INSN_EXECUTE" # move to "execute"
862
863 # handshake with execution FSM, move to "wait" once acknowledged
864 with m.State("INSN_EXECUTE"):
865 comb += exec_insn_i_valid.eq(1) # trigger execute
866 with m.If(exec_insn_o_ready): # execute acknowledged us
867 m.next = "EXECUTE_WAIT"
868
869 with m.State("EXECUTE_WAIT"):
870 # wait on "core stop" release, at instruction end
871 # need to do this here, in case we are in a VL>1 loop
872 with m.If(~dbg.core_stop_o & ~core_rst):
873 comb += exec_pc_i_ready.eq(1)
874 # see https://bugs.libre-soc.org/show_bug.cgi?id=636
875 # the exception info needs to be blatted into
876 # pdecode.ldst_exc, and the instruction "re-run".
877 # when ldst_exc.happened is set, the PowerDecoder2
878 # reacts very differently: it re-writes the instruction
879 # with a "trap" (calls PowerDecoder2.trap()) which
880 # will *overwrite* whatever was requested and jump the
881 # PC to the exception address, as well as alter MSR.
882 # nothing else needs to be done other than to note
883 # the change of PC and MSR (and, later, SVSTATE)
884 with m.If(exc_happened):
885 sync += pdecode2.ldst_exc.eq(core.fus.get_exc("ldst0"))
886
887 with m.If(exec_pc_o_valid):
888
889 # was this the last loop iteration?
890 is_last = Signal()
891 cur_vl = cur_state.svstate.vl
892 comb += is_last.eq(next_srcstep == cur_vl)
893
894 # return directly to Decode if Execute generated an
895 # exception.
896 with m.If(pdecode2.ldst_exc.happened):
897 m.next = "DECODE_SV"
898
899 # if either PC or SVSTATE were changed by the previous
900 # instruction, go directly back to Fetch, without
901 # updating either PC or SVSTATE
902 with m.Elif(pc_changed | sv_changed):
903 m.next = "ISSUE_START"
904
905 # also return to Fetch, when no output was a vector
906 # (regardless of SRCSTEP and VL), or when the last
907 # instruction was really the last one of the VL loop
908 with m.Elif((~pdecode2.loop_continue) | is_last):
909 # before going back to fetch, update the PC state
910 # register with the NIA.
911 # ok here we are not reading the branch unit.
912 # TODO: this just blithely overwrites whatever
913 # pipeline updated the PC
914 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
915 comb += self.state_w_pc.i_data.eq(nia)
916 # reset SRCSTEP before returning to Fetch
917 if self.svp64_en:
918 with m.If(pdecode2.loop_continue):
919 comb += new_svstate.srcstep.eq(0)
920 comb += new_svstate.dststep.eq(0)
921 comb += update_svstate.eq(1)
922 else:
923 comb += new_svstate.srcstep.eq(0)
924 comb += new_svstate.dststep.eq(0)
925 comb += update_svstate.eq(1)
926 m.next = "ISSUE_START"
927
928 # returning to Execute? then, first update SRCSTEP
929 with m.Else():
930 comb += new_svstate.srcstep.eq(next_srcstep)
931 comb += new_svstate.dststep.eq(next_dststep)
932 comb += update_svstate.eq(1)
933 # return to mask skip loop
934 m.next = "PRED_SKIP"
935
936 with m.Else():
937 comb += dbg.core_stopped_i.eq(1)
938 # while stopped, allow updating the PC and SVSTATE
939 with m.If(self.pc_i.ok):
940 comb += self.state_w_pc.wen.eq(1 << StateRegs.PC)
941 comb += self.state_w_pc.i_data.eq(self.pc_i.data)
942 sync += pc_changed.eq(1)
943 with m.If(self.svstate_i.ok):
944 comb += new_svstate.eq(self.svstate_i.data)
945 comb += update_svstate.eq(1)
946 sync += sv_changed.eq(1)
947
948 # check if svstate needs updating: if so, write it to State Regfile
949 with m.If(update_svstate):
950 comb += self.state_w_sv.wen.eq(1 << StateRegs.SVSTATE)
951 comb += self.state_w_sv.i_data.eq(new_svstate)
952 sync += cur_state.svstate.eq(new_svstate) # for next clock
953
954 def execute_fsm(self, m, core, pc_changed, sv_changed,
955 exec_insn_i_valid, exec_insn_o_ready,
956 exec_pc_o_valid, exec_pc_i_ready):
957 """execute FSM
958
959 execute FSM. this interacts with the "issue" FSM
960 through exec_insn_ready/valid (incoming) and exec_pc_ready/valid
961 (outgoing). SVP64 RM prefixes have already been set up by the
962 "issue" phase, so execute is fairly straightforward.
963 """
964
965 comb = m.d.comb
966 sync = m.d.sync
967 pdecode2 = self.pdecode2
968
969 # temporaries
970 core_busy_o = core.n.o_data.busy_o # core is busy
971 core_ivalid_i = core.p.i_valid # instruction is valid
972
973 with m.FSM(name="exec_fsm"):
974
975 # waiting for instruction bus (stays there until not busy)
976 with m.State("INSN_START"):
977 comb += exec_insn_o_ready.eq(1)
978 with m.If(exec_insn_i_valid):
979 comb += core_ivalid_i.eq(1) # instruction is valid/issued
980 sync += sv_changed.eq(0)
981 sync += pc_changed.eq(0)
982 with m.If(core.p.o_ready): # only move if accepted
983 m.next = "INSN_ACTIVE" # move to "wait completion"
984
985 # instruction started: must wait till it finishes
986 with m.State("INSN_ACTIVE"):
987 # note changes to PC and SVSTATE
988 with m.If(self.state_nia.wen & (1 << StateRegs.SVSTATE)):
989 sync += sv_changed.eq(1)
990 with m.If(self.state_nia.wen & (1 << StateRegs.PC)):
991 sync += pc_changed.eq(1)
992 with m.If(~core_busy_o): # instruction done!
993 comb += exec_pc_o_valid.eq(1)
994 with m.If(exec_pc_i_ready):
995 # when finished, indicate "done".
996 # however, if there was an exception, the instruction
997 # is *not* yet done. this is an implementation
998 # detail: we choose to implement exceptions by
999 # taking the exception information from the LDST
1000 # unit, putting that *back* into the PowerDecoder2,
1001 # and *re-running the entire instruction*.
1002 # if we erroneously indicate "done" here, it is as if
1003 # there were *TWO* instructions:
1004 # 1) the failed LDST 2) a TRAP.
1005 with m.If(~pdecode2.ldst_exc.happened):
1006 comb += self.insn_done.eq(1)
1007 m.next = "INSN_START" # back to fetch
1008
1009 def setup_peripherals(self, m):
1010 comb, sync = m.d.comb, m.d.sync
1011
1012 # okaaaay so the debug module must be in coresync clock domain
1013 # but NOT its reset signal. to cope with this, set every single
1014 # submodule explicitly in coresync domain, debug and JTAG
1015 # in their own one but using *external* reset.
1016 csd = DomainRenamer("coresync")
1017 dbd = DomainRenamer(self.dbg_domain)
1018
1019 m.submodules.core = core = csd(self.core)
1020 m.submodules.imem = imem = csd(self.imem)
1021 m.submodules.dbg = dbg = dbd(self.dbg)
1022 if self.jtag_en:
1023 m.submodules.jtag = jtag = dbd(self.jtag)
1024 # TODO: UART2GDB mux, here, from external pin
1025 # see https://bugs.libre-soc.org/show_bug.cgi?id=499
1026 sync += dbg.dmi.connect_to(jtag.dmi)
1027
1028 cur_state = self.cur_state
1029
1030 # 4x 4k SRAM blocks. these simply "exist", they get routed in litex
1031 if self.sram4x4k:
1032 for i, sram in enumerate(self.sram4k):
1033 m.submodules["sram4k_%d" % i] = csd(sram)
1034 comb += sram.enable.eq(self.wb_sram_en)
1035
1036 # XICS interrupt handler
1037 if self.xics:
1038 m.submodules.xics_icp = icp = csd(self.xics_icp)
1039 m.submodules.xics_ics = ics = csd(self.xics_ics)
1040 comb += icp.ics_i.eq(ics.icp_o) # connect ICS to ICP
1041 sync += cur_state.eint.eq(icp.core_irq_o) # connect ICP to core
1042
1043 # GPIO test peripheral
1044 if self.gpio:
1045 m.submodules.simple_gpio = simple_gpio = csd(self.simple_gpio)
1046
1047 # connect one GPIO output to ICS bit 15 (like in microwatt soc.vhdl)
1048 # XXX causes litex ECP5 test to get wrong idea about input and output
1049 # (but works with verilator sim *sigh*)
1050 # if self.gpio and self.xics:
1051 # comb += self.int_level_i[15].eq(simple_gpio.gpio_o[0])
1052
1053 # instruction decoder
1054 pdecode = create_pdecode()
1055 m.submodules.dec2 = pdecode2 = csd(self.pdecode2)
1056 if self.svp64_en:
1057 m.submodules.svp64 = svp64 = csd(self.svp64)
1058
1059 # convenience
1060 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
1061 intrf = self.core.regs.rf['int']
1062
1063 # clock delay power-on reset
1064 cd_por = ClockDomain(reset_less=True)
1065 cd_sync = ClockDomain()
1066 core_sync = ClockDomain("coresync")
1067 m.domains += cd_por, cd_sync, core_sync
1068 if self.dbg_domain != "sync":
1069 dbg_sync = ClockDomain(self.dbg_domain)
1070 m.domains += dbg_sync
1071
1072 ti_rst = Signal(reset_less=True)
1073 delay = Signal(range(4), reset=3)
1074 with m.If(delay != 0):
1075 m.d.por += delay.eq(delay - 1)
1076 comb += cd_por.clk.eq(ClockSignal())
1077
1078 # power-on reset delay
1079 core_rst = ResetSignal("coresync")
1080 comb += ti_rst.eq(delay != 0 | dbg.core_rst_o | ResetSignal())
1081 comb += core_rst.eq(ti_rst)
1082
1083 # debug clock is same as coresync, but reset is *main external*
1084 if self.dbg_domain != "sync":
1085 dbg_rst = ResetSignal(self.dbg_domain)
1086 comb += dbg_rst.eq(ResetSignal())
1087
1088 # busy/halted signals from core
1089 core_busy_o = ~core.p.o_ready | core.n.o_data.busy_o # core is busy
1090 comb += self.busy_o.eq(core_busy_o)
1091 comb += pdecode2.dec.bigendian.eq(self.core_bigendian_i)
1092
1093 # temporary hack: says "go" immediately for both address gen and ST
1094 l0 = core.l0
1095 ldst = core.fus.fus['ldst0']
1096 st_go_edge = rising_edge(m, ldst.st.rel_o)
1097 # link addr-go direct to rel
1098 m.d.comb += ldst.ad.go_i.eq(ldst.ad.rel_o)
1099 m.d.comb += ldst.st.go_i.eq(st_go_edge) # link store-go to rising rel
1100
1101 def elaborate(self, platform):
1102 m = Module()
1103 # convenience
1104 comb, sync = m.d.comb, m.d.sync
1105 cur_state = self.cur_state
1106 pdecode2 = self.pdecode2
1107 dbg = self.dbg
1108 core = self.core
1109
1110 # set up peripherals and core
1111 core_rst = self.core_rst
1112 self.setup_peripherals(m)
1113
1114 # reset current state if core reset requested
1115 with m.If(core_rst):
1116 m.d.sync += self.cur_state.eq(0)
1117
1118 # PC and instruction from I-Memory
1119 comb += self.pc_o.eq(cur_state.pc)
1120 pc_changed = Signal() # note write to PC
1121 sv_changed = Signal() # note write to SVSTATE
1122
1123 # indicate to outside world if any FU is still executing
1124 comb += self.any_busy.eq(core.n.o_data.any_busy_o) # any FU executing
1125
1126 # read state either from incoming override or from regfile
1127 # TODO: really should be doing MSR in the same way
1128 pc = state_get(m, core_rst, self.pc_i,
1129 "pc", # read PC
1130 self.state_r_pc, StateRegs.PC)
1131 svstate = state_get(m, core_rst, self.svstate_i,
1132 "svstate", # read SVSTATE
1133 self.state_r_sv, StateRegs.SVSTATE)
1134
1135 # don't write pc every cycle
1136 comb += self.state_w_pc.wen.eq(0)
1137 comb += self.state_w_pc.i_data.eq(0)
1138
1139 # address of the next instruction, in the absence of a branch
1140 # depends on the instruction size
1141 nia = Signal(64)
1142
1143 # connect up debug signals
1144 # TODO comb += core.icache_rst_i.eq(dbg.icache_rst_o)
1145 comb += dbg.terminate_i.eq(core.o.core_terminate_o)
1146 comb += dbg.state.pc.eq(pc)
1147 comb += dbg.state.svstate.eq(svstate)
1148 comb += dbg.state.msr.eq(cur_state.msr)
1149
1150 # pass the prefix mode from Fetch to Issue, so the latter can loop
1151 # on VL==0
1152 is_svp64_mode = Signal()
1153
1154 # there are *THREE^WFOUR-if-SVP64-enabled* FSMs, fetch (32/64-bit)
1155 # issue, decode/execute, now joined by "Predicate fetch/calculate".
1156 # these are the handshake signals between each
1157
1158 # fetch FSM can run as soon as the PC is valid
1159 fetch_pc_i_valid = Signal() # Execute tells Fetch "start next read"
1160 fetch_pc_o_ready = Signal() # Fetch Tells SVSTATE "proceed"
1161
1162 # fetch FSM hands over the instruction to be decoded / issued
1163 fetch_insn_o_valid = Signal()
1164 fetch_insn_i_ready = Signal()
1165
1166 # predicate fetch FSM decodes and fetches the predicate
1167 pred_insn_i_valid = Signal()
1168 pred_insn_o_ready = Signal()
1169
1170 # predicate fetch FSM delivers the masks
1171 pred_mask_o_valid = Signal()
1172 pred_mask_i_ready = Signal()
1173
1174 # issue FSM delivers the instruction to the be executed
1175 exec_insn_i_valid = Signal()
1176 exec_insn_o_ready = Signal()
1177
1178 # execute FSM, hands over the PC/SVSTATE back to the issue FSM
1179 exec_pc_o_valid = Signal()
1180 exec_pc_i_ready = Signal()
1181
1182 # the FSMs here are perhaps unusual in that they detect conditions
1183 # then "hold" information, combinatorially, for the core
1184 # (as opposed to using sync - which would be on a clock's delay)
1185 # this includes the actual opcode, valid flags and so on.
1186
1187 # Fetch, then predicate fetch, then Issue, then Execute.
1188 # Issue is where the VL for-loop # lives. the ready/valid
1189 # signalling is used to communicate between the four.
1190
1191 # set up Fetch FSM
1192 fetch = FetchFSM(self.allow_overlap, self.svp64_en,
1193 self.imem, core_rst, pdecode2, cur_state,
1194 dbg, core, svstate, nia, is_svp64_mode)
1195 m.submodules.fetch = fetch
1196 # connect up in/out data to existing Signals
1197 comb += fetch.p.i_data.pc.eq(pc)
1198 # and the ready/valid signalling
1199 comb += fetch_pc_o_ready.eq(fetch.p.o_ready)
1200 comb += fetch.p.i_valid.eq(fetch_pc_i_valid)
1201 comb += fetch_insn_o_valid.eq(fetch.n.o_valid)
1202 comb += fetch.n.i_ready.eq(fetch_insn_i_ready)
1203
1204 self.issue_fsm(m, core, pc_changed, sv_changed, nia,
1205 dbg, core_rst, is_svp64_mode,
1206 fetch_pc_o_ready, fetch_pc_i_valid,
1207 fetch_insn_o_valid, fetch_insn_i_ready,
1208 pred_insn_i_valid, pred_insn_o_ready,
1209 pred_mask_o_valid, pred_mask_i_ready,
1210 exec_insn_i_valid, exec_insn_o_ready,
1211 exec_pc_o_valid, exec_pc_i_ready)
1212
1213 if self.svp64_en:
1214 self.fetch_predicate_fsm(m,
1215 pred_insn_i_valid, pred_insn_o_ready,
1216 pred_mask_o_valid, pred_mask_i_ready)
1217
1218 self.execute_fsm(m, core, pc_changed, sv_changed,
1219 exec_insn_i_valid, exec_insn_o_ready,
1220 exec_pc_o_valid, exec_pc_i_ready)
1221
1222 # this bit doesn't have to be in the FSM: connect up to read
1223 # regfiles on demand from DMI
1224 self.do_dmi(m, dbg)
1225
1226 # DEC and TB inc/dec FSM. copy of DEC is put into CoreState,
1227 # (which uses that in PowerDecoder2 to raise 0x900 exception)
1228 self.tb_dec_fsm(m, cur_state.dec)
1229
1230 return m
1231
1232 def do_dmi(self, m, dbg):
1233 """deals with DMI debug requests
1234
1235 currently only provides read requests for the INT regfile, CR and XER
1236 it will later also deal with *writing* to these regfiles.
1237 """
1238 comb = m.d.comb
1239 sync = m.d.sync
1240 dmi, d_reg, d_cr, d_xer, = dbg.dmi, dbg.d_gpr, dbg.d_cr, dbg.d_xer
1241 intrf = self.core.regs.rf['int']
1242
1243 with m.If(d_reg.req): # request for regfile access being made
1244 # TODO: error-check this
1245 # XXX should this be combinatorial? sync better?
1246 if intrf.unary:
1247 comb += self.int_r.ren.eq(1 << d_reg.addr)
1248 else:
1249 comb += self.int_r.addr.eq(d_reg.addr)
1250 comb += self.int_r.ren.eq(1)
1251 d_reg_delay = Signal()
1252 sync += d_reg_delay.eq(d_reg.req)
1253 with m.If(d_reg_delay):
1254 # data arrives one clock later
1255 comb += d_reg.data.eq(self.int_r.o_data)
1256 comb += d_reg.ack.eq(1)
1257
1258 # sigh same thing for CR debug
1259 with m.If(d_cr.req): # request for regfile access being made
1260 comb += self.cr_r.ren.eq(0b11111111) # enable all
1261 d_cr_delay = Signal()
1262 sync += d_cr_delay.eq(d_cr.req)
1263 with m.If(d_cr_delay):
1264 # data arrives one clock later
1265 comb += d_cr.data.eq(self.cr_r.o_data)
1266 comb += d_cr.ack.eq(1)
1267
1268 # aaand XER...
1269 with m.If(d_xer.req): # request for regfile access being made
1270 comb += self.xer_r.ren.eq(0b111111) # enable all
1271 d_xer_delay = Signal()
1272 sync += d_xer_delay.eq(d_xer.req)
1273 with m.If(d_xer_delay):
1274 # data arrives one clock later
1275 comb += d_xer.data.eq(self.xer_r.o_data)
1276 comb += d_xer.ack.eq(1)
1277
1278 def tb_dec_fsm(self, m, spr_dec):
1279 """tb_dec_fsm
1280
1281 this is a FSM for updating either dec or tb. it runs alternately
1282 DEC, TB, DEC, TB. note that SPR pipeline could have written a new
1283 value to DEC, however the regfile has "passthrough" on it so this
1284 *should* be ok.
1285
1286 see v3.0B p1097-1099 for Timeer Resource and p1065 and p1076
1287 """
1288
1289 comb, sync = m.d.comb, m.d.sync
1290 fast_rf = self.core.regs.rf['fast']
1291 fast_r_dectb = fast_rf.r_ports['issue'] # DEC/TB
1292 fast_w_dectb = fast_rf.w_ports['issue'] # DEC/TB
1293
1294 with m.FSM() as fsm:
1295
1296 # initiates read of current DEC
1297 with m.State("DEC_READ"):
1298 comb += fast_r_dectb.addr.eq(FastRegs.DEC)
1299 comb += fast_r_dectb.ren.eq(1)
1300 m.next = "DEC_WRITE"
1301
1302 # waits for DEC read to arrive (1 cycle), updates with new value
1303 with m.State("DEC_WRITE"):
1304 new_dec = Signal(64)
1305 # TODO: MSR.LPCR 32-bit decrement mode
1306 comb += new_dec.eq(fast_r_dectb.o_data - 1)
1307 comb += fast_w_dectb.addr.eq(FastRegs.DEC)
1308 comb += fast_w_dectb.wen.eq(1)
1309 comb += fast_w_dectb.i_data.eq(new_dec)
1310 sync += spr_dec.eq(new_dec) # copy into cur_state for decoder
1311 m.next = "TB_READ"
1312
1313 # initiates read of current TB
1314 with m.State("TB_READ"):
1315 comb += fast_r_dectb.addr.eq(FastRegs.TB)
1316 comb += fast_r_dectb.ren.eq(1)
1317 m.next = "TB_WRITE"
1318
1319 # waits for read TB to arrive, initiates write of current TB
1320 with m.State("TB_WRITE"):
1321 new_tb = Signal(64)
1322 comb += new_tb.eq(fast_r_dectb.o_data + 1)
1323 comb += fast_w_dectb.addr.eq(FastRegs.TB)
1324 comb += fast_w_dectb.wen.eq(1)
1325 comb += fast_w_dectb.i_data.eq(new_tb)
1326 m.next = "DEC_READ"
1327
1328 return m
1329
1330 def __iter__(self):
1331 yield from self.pc_i.ports()
1332 yield self.pc_o
1333 yield self.memerr_o
1334 yield from self.core.ports()
1335 yield from self.imem.ports()
1336 yield self.core_bigendian_i
1337 yield self.busy_o
1338
1339 def ports(self):
1340 return list(self)
1341
1342 def external_ports(self):
1343 ports = self.pc_i.ports()
1344 ports += [self.pc_o, self.memerr_o, self.core_bigendian_i, self.busy_o,
1345 ]
1346
1347 if self.jtag_en:
1348 ports += list(self.jtag.external_ports())
1349 else:
1350 # don't add DMI if JTAG is enabled
1351 ports += list(self.dbg.dmi.ports())
1352
1353 ports += list(self.imem.ibus.fields.values())
1354 ports += list(self.core.l0.cmpi.wb_bus().fields.values())
1355
1356 if self.sram4x4k:
1357 for sram in self.sram4k:
1358 ports += list(sram.bus.fields.values())
1359
1360 if self.xics:
1361 ports += list(self.xics_icp.bus.fields.values())
1362 ports += list(self.xics_ics.bus.fields.values())
1363 ports.append(self.int_level_i)
1364
1365 if self.gpio:
1366 ports += list(self.simple_gpio.bus.fields.values())
1367 ports.append(self.gpio_o)
1368
1369 return ports
1370
1371 def ports(self):
1372 return list(self)
1373
1374
1375 class TestIssuer(Elaboratable):
1376 def __init__(self, pspec):
1377 self.ti = TestIssuerInternal(pspec)
1378 self.pll = DummyPLL(instance=True)
1379
1380 # PLL direct clock or not
1381 self.pll_en = hasattr(pspec, "use_pll") and pspec.use_pll
1382 if self.pll_en:
1383 self.pll_test_o = Signal(reset_less=True)
1384 self.pll_vco_o = Signal(reset_less=True)
1385 self.clk_sel_i = Signal(2, reset_less=True)
1386 self.ref_clk = ClockSignal() # can't rename it but that's ok
1387 self.pllclk_clk = ClockSignal("pllclk")
1388
1389 def elaborate(self, platform):
1390 m = Module()
1391 comb = m.d.comb
1392
1393 # TestIssuer nominally runs at main clock, actually it is
1394 # all combinatorial internally except for coresync'd components
1395 m.submodules.ti = ti = self.ti
1396
1397 if self.pll_en:
1398 # ClockSelect runs at PLL output internal clock rate
1399 m.submodules.wrappll = pll = self.pll
1400
1401 # add clock domains from PLL
1402 cd_pll = ClockDomain("pllclk")
1403 m.domains += cd_pll
1404
1405 # PLL clock established. has the side-effect of running clklsel
1406 # at the PLL's speed (see DomainRenamer("pllclk") above)
1407 pllclk = self.pllclk_clk
1408 comb += pllclk.eq(pll.clk_pll_o)
1409
1410 # wire up external 24mhz to PLL
1411 #comb += pll.clk_24_i.eq(self.ref_clk)
1412 # output 18 mhz PLL test signal, and analog oscillator out
1413 comb += self.pll_test_o.eq(pll.pll_test_o)
1414 comb += self.pll_vco_o.eq(pll.pll_vco_o)
1415
1416 # input to pll clock selection
1417 comb += pll.clk_sel_i.eq(self.clk_sel_i)
1418
1419 # now wire up ResetSignals. don't mind them being in this domain
1420 pll_rst = ResetSignal("pllclk")
1421 comb += pll_rst.eq(ResetSignal())
1422
1423 # internal clock is set to selector clock-out. has the side-effect of
1424 # running TestIssuer at this speed (see DomainRenamer("intclk") above)
1425 # debug clock runs at coresync internal clock
1426 cd_coresync = ClockDomain("coresync")
1427 #m.domains += cd_coresync
1428 if self.ti.dbg_domain != 'sync':
1429 cd_dbgsync = ClockDomain("dbgsync")
1430 #m.domains += cd_dbgsync
1431 intclk = ClockSignal("coresync")
1432 dbgclk = ClockSignal(self.ti.dbg_domain)
1433 # XXX BYPASS PLL XXX
1434 # XXX BYPASS PLL XXX
1435 # XXX BYPASS PLL XXX
1436 if self.pll_en:
1437 comb += intclk.eq(self.ref_clk)
1438 else:
1439 comb += intclk.eq(ClockSignal())
1440 if self.ti.dbg_domain != 'sync':
1441 dbgclk = ClockSignal(self.ti.dbg_domain)
1442 comb += dbgclk.eq(intclk)
1443
1444 return m
1445
1446 def ports(self):
1447 return list(self.ti.ports()) + list(self.pll.ports()) + \
1448 [ClockSignal(), ResetSignal()]
1449
1450 def external_ports(self):
1451 ports = self.ti.external_ports()
1452 ports.append(ClockSignal())
1453 ports.append(ResetSignal())
1454 if self.pll_en:
1455 ports.append(self.clk_sel_i)
1456 ports.append(self.pll.clk_24_i)
1457 ports.append(self.pll_test_o)
1458 ports.append(self.pll_vco_o)
1459 ports.append(self.pllclk_clk)
1460 ports.append(self.ref_clk)
1461 return ports
1462
1463
1464 if __name__ == '__main__':
1465 units = {'alu': 1, 'cr': 1, 'branch': 1, 'trap': 1, 'logical': 1,
1466 'spr': 1,
1467 'div': 1,
1468 'mul': 1,
1469 'shiftrot': 1
1470 }
1471 pspec = TestMemPspec(ldst_ifacetype='bare_wb',
1472 imem_ifacetype='bare_wb',
1473 addr_wid=48,
1474 mask_wid=8,
1475 reg_wid=64,
1476 units=units)
1477 dut = TestIssuer(pspec)
1478 vl = main(dut, ports=dut.ports(), name="test_issuer")
1479
1480 if len(sys.argv) == 1:
1481 vl = rtlil.convert(dut, ports=dut.external_ports(), name="test_issuer")
1482 with open("test_issuer.il", "w") as f:
1483 f.write(vl)