#!/usr/bin/env python3
+"""
+pinmux documented here https://libre-soc.org/docs/pinmux/
+"""
from nmigen.build.dsl import Resource, Subsignal, Pins
from nmigen.build.plat import TemplatedPlatform
from nmigen.build.res import ResourceManager, ResourceError
import sys
# extra dependencies for jtag testing (?)
-from soc.bus.sram import SRAM
+#from soc.bus.sram import SRAM
-from nmigen import Memory
-from nmigen.sim import Simulator, Delay, Settle, Tick
+#from nmigen import Memory
+from nmigen.sim import Simulator, Delay, Settle, Tick, Passive
from nmutil.util import wrap
-from soc.debug.jtagutils import (jtag_read_write_reg,
- jtag_srv, jtag_set_reset,
- jtag_set_ir, jtag_set_get_dr)
+#from soc.debug.jtagutils import (jtag_read_write_reg,
+# jtag_srv, jtag_set_reset,
+# jtag_set_ir, jtag_set_get_dr)
from c4m.nmigen.jtag.tap import TAP, IOType
from c4m.nmigen.jtag.bus import Interface as JTAGInterface
-from soc.debug.dmi import DMIInterface, DBGCore
-from soc.debug.test.dmi_sim import dmi_sim
-from soc.debug.test.jtagremote import JTAGServer, JTAGClient
+#from soc.debug.dmi import DMIInterface, DBGCore
+#from soc.debug.test.dmi_sim import dmi_sim
+#from soc.debug.test.jtagremote import JTAGServer, JTAGClient
from nmigen.build.res import ResourceError
# Was thinking of using these functions, but skipped for simplicity for now
# as a triplet, it's a single Record named "io". sigh.
# therefore the only way to get a triplet of i/o/oe
# is to *actually* create explicit triple pins
- pad = Subsignal("io",
- Pins("%s_i %s_o %s_oe" % (pname, pname, pname),
- dir="io", assert_width=3))
- ios.append(Resource(pname, 0, pad))
+ # XXX ARRRGH, doesn't work
+ #pad = Subsignal("io",
+ # Pins("%s_i %s_o %s_oe" % (pname, pname, pname),
+ # dir="io", assert_width=3))
+ #ios.append(Resource(pname, 0, pad))
+ pads = []
+ pads.append(Subsignal("i",
+ Pins(pname+"_i", dir="i", assert_width=1)))
+ pads.append(Subsignal("o",
+ Pins(pname+"_o", dir="o", assert_width=1)))
+ pads.append(Subsignal("oe",
+ Pins(pname+"_oe", dir="o", assert_width=1)))
+ ios.append(Resource.family(pname, 0, default_name=pname,
+ ios=pads))
resources.append(Resource.family(periph, 0, default_name="gpio",
ios=ios))
def I2CResource(*args, scl, sda):
- io = []
- fmt = "%s_i %s_o %s_oe"
- scl = fmt % (scl, scl, scl)
- sda = fmt % (sda, sda, sda)
- io.append(Subsignal("scl", Pins(scl, dir="io", assert_width=3)))
- io.append(Subsignal("sda", Pins(sda, dir="io", assert_width=3)))
- return Resource.family(*args, default_name="i2c", ios=io)
-
-
-def recurse_down(asicpad, jtagpad):
- eqs = []
- for asiclayout, jtaglayout in zip(asicpad.layout, jtagpad.layout):
- apad = getattr(asicpad, asiclayout[0])
- jpad = getattr(jtagpad, jtaglayout[0])
- print ("recurse_down", asiclayout, jtaglayout, apad, jpad)
- if isinstance(asiclayout[1], Layout):
- eqs += recurse_down(apad, jpad)
- elif asiclayout[0] == 'i':
- eqs.append(jpad.eq(apad))
- elif asiclayout[0] in ['o', 'oe']:
- eqs.append(apad.eq(jpad))
- return eqs
-
-
-# ridiculously-simple top-level module. doesn't even have a sync domain
-# and can't have one until a clock has been established by ASICPlatform.
+ ios = []
+ pads = []
+ pads.append(Subsignal("i", Pins(sda+"_i", dir="i", assert_width=1)))
+ pads.append(Subsignal("o", Pins(sda+"_o", dir="o", assert_width=1)))
+ pads.append(Subsignal("oe", Pins(sda+"_oe", dir="o", assert_width=1)))
+ ios.append(Resource.family(sda, 0, default_name=sda, ios=pads))
+ pads = []
+ pads.append(Subsignal("i", Pins(scl+"_i", dir="i", assert_width=1)))
+ pads.append(Subsignal("o", Pins(scl+"_o", dir="o", assert_width=1)))
+ pads.append(Subsignal("oe", Pins(scl+"_oe", dir="o", assert_width=1)))
+ ios.append(Resource.family(scl, 0, default_name=scl, ios=pads))
+ return Resource.family(*args, default_name="i2c", ios=ios)
+
+
+# top-level demo module.
class Blinker(Elaboratable):
def __init__(self, pinset, resources):
- self.jtag = JTAG({}, "sync")
- self.jtag.pad_mgr = ResourceManager([], [])
- self.jtag.core_mgr = ResourceManager([], [])
- self.jtag.pad_mgr.add_resources(resources)
- self.jtag.core_mgr.add_resources(resources)
- # record resource lookup between core IO names and pads
- self.jtag.padlookup = {}
- self.jtag.requests_made = []
- self.jtag.boundary_scan_pads = []
- self.jtag.resource_table = {}
- self.jtag.resource_table_pads = {}
- self.jtag.eqs = []
- memory = Memory(width=32, depth=16)
- self.sram = SRAM(memory=memory, bus=self.jtag.wb)
-
- for resource in resources:
- print ("JTAG resource", resource)
- if resource.name in ['clk', 'rst']: # hack
- continue
- self.add_jtag_request(resource.name, resource.number)
+ self.jtag = JTAG({}, "sync", resources=resources)
+ #memory = Memory(width=32, depth=16)
+ #self.sram = SRAM(memory=memory, bus=self.jtag.wb)
def elaborate(self, platform):
jtag_resources = self.jtag.pad_mgr.resources
- core_resources = self.jtag.core_mgr.resources
m = Module()
m.submodules.jtag = self.jtag
- m.submodules.sram = self.sram
+ #m.submodules.sram = self.sram
count = Signal(5)
m.d.sync += count.eq(count+1)
print ("resources", platform, jtag_resources.items())
- gpio = self.jtag_request('gpio')
+ gpio = self.jtag.request('gpio')
print (gpio, gpio.layout, gpio.fields)
# get the GPIO bank, mess about with some of the pins
- m.d.comb += gpio.gpio0.io.o.eq(1)
- m.d.comb += gpio.gpio1.io.o.eq(gpio.gpio2.io.i)
- m.d.comb += gpio.gpio1.io.oe.eq(count[4])
- m.d.sync += count[0].eq(gpio.gpio1.io.i)
+ m.d.comb += gpio.gpio0.o.eq(1)
+ m.d.comb += gpio.gpio1.o.eq(gpio.gpio2.i)
+ m.d.comb += gpio.gpio1.oe.eq(count[4])
+ m.d.sync += count[0].eq(gpio.gpio1.i)
# get the UART resource, mess with the output tx
- uart = self.jtag_request('uart')
+ uart = self.jtag.request('uart')
print (uart, uart.fields)
- intermediary = Signal()
- m.d.comb += uart.tx.eq(intermediary)
- m.d.comb += intermediary.eq(uart.rx)
-
- # platform requested: make the exact same requests,
- # then add JTAG afterwards
- if platform is not None:
- for (name, number, dir, xdr) in self.jtag.requests_made:
- asicpad = platform.request(name, number, dir=dir, xdr=xdr)
- jtagpad = self.jtag.resource_table_pads[(name, number)]
- print ("jtagpad", jtagpad, jtagpad.layout)
- m.d.comb += recurse_down(asicpad, jtagpad)
-
- # wire up JTAG otherwise we are in trouble (no clock)
- jtag = platform.request('jtag')
- m.d.comb += self.jtag.bus.tdi.eq(jtag.tdi)
- m.d.comb += self.jtag.bus.tck.eq(jtag.tck)
- m.d.comb += self.jtag.bus.tms.eq(jtag.tms)
- m.d.comb += jtag.tdo.eq(self.jtag.bus.tdo)
-
- # add the eq assignments connecting up JTAG boundary scan to core
- m.d.comb += self.jtag.eqs
- return m
+ self.intermediary = Signal()
+ m.d.comb += uart.tx.eq(self.intermediary)
+ m.d.comb += self.intermediary.eq(uart.rx)
+
+ # to even be able to get at objects, you first have to make them
+ # available - i.e. not as local variables
+ self.gpio = gpio
+ self.uart = uart
+
+ return self.jtag.boundary_elaborate(m, platform)
def ports(self):
return list(self)
def __iter__(self):
- yield self.jtag.bus.tdi
- yield self.jtag.bus.tdo
- yield self.jtag.bus.tck
- yield self.jtag.bus.tms
- yield from self.jtag.boundary_scan_pads
-
- def jtag_request(self, name, number=0, *, dir=None, xdr=None):
- return self.jtag.resource_table[(name, number)]
-
- def add_jtag_request(self, name, number=0, *, dir=None, xdr=None):
- """request a Resource (e.g. name="uart", number=0) which will
- return a data structure containing Records of all the pins.
-
- this override will also - automatically - create a JTAG Boundary Scan
- connection *without* any change to the actual Platform.request() API
- """
- pad_mgr = self.jtag.pad_mgr
- core_mgr = self.jtag.core_mgr
- padlookup = self.jtag.padlookup
- # okaaaay, bit of shenanigens going on: the important data structure
- # here is Resourcemanager._ports. requests add to _ports, which is
- # what needs redirecting. therefore what has to happen is to
- # capture the number of ports *before* the request. sigh.
- start_ports = len(core_mgr._ports)
- value = core_mgr.request(name, number, dir=dir, xdr=xdr)
- end_ports = len(core_mgr._ports)
-
- # take a copy of the requests made
- self.jtag.requests_made.append((name, number, dir, xdr))
-
- # now make a corresponding (duplicate) request to the pad manager
- # BUT, if it doesn't exist, don't sweat it: all it means is, the
- # application did not request Boundary Scan for that resource.
- pad_start_ports = len(pad_mgr._ports)
- pvalue = pad_mgr.request(name, number, dir=dir, xdr=xdr)
- pad_end_ports = len(pad_mgr._ports)
-
- # ok now we have the lengths: now create a lookup between the pad
- # and the core, so that JTAG boundary scan can be inserted in between
- core = core_mgr._ports[start_ports:end_ports]
- pads = pad_mgr._ports[pad_start_ports:pad_end_ports]
- # oops if not the same numbers added. it's a duplicate. shouldn't happen
- assert len(core) == len(pads), "argh, resource manager error"
- print ("core", core)
- print ("pads", pads)
-
- # pad/core each return a list of tuples of (res, pin, port, attrs)
- for pad, core in zip(pads, core):
- # create a lookup on pin name to get at the hidden pad instance
- # this pin name will be handed to get_input, get_output etc.
- # and without the padlookup you can't find the (duplicate) pad.
- # note that self.padlookup and self.jtag.ios use the *exact* same
- # pin.name per pin
- padpin = pad[1]
- corepin = core[1]
- if padpin is None: continue # skip when pin is None
- assert corepin is not None # if pad was None, core should be too
- print ("iter", pad, padpin.name)
- print ("existing pads", padlookup.keys())
- assert padpin.name not in padlookup # no overwrites allowed!
- assert padpin.name == corepin.name # has to be the same!
- padlookup[padpin.name] = pad # store pad by pin name
-
- # now add the IO Shift Register. first identify the type
- # then request a JTAG IOConn. we can't wire it up (yet) because
- # we don't have a Module() instance. doh. that comes in get_input
- # and get_output etc. etc.
- iotype = resiotypes[padpin.dir] # look up the C4M-JTAG IOType
- io = self.jtag.add_io(iotype=iotype, name=padpin.name) # IOConn
- self.jtag.ios[padpin.name] = io # store IOConn Record by pin name
-
- # and connect up core to pads based on type. could create
- # Modules here just like in Platform.get_input/output but
- # in some ways it is clearer by being simpler to wire them globally
-
- if padpin.dir == 'i':
- print ("jtag_request add input pin", padpin)
- print (" corepin", corepin)
- print (" jtag io core", io.core)
- print (" jtag io pad", io.pad)
- # corepin is to be returned, here. so, connect jtag corein to it
- self.jtag.eqs += [corepin.i.eq(io.core.i)]
- # and padpin to JTAG pad
- self.jtag.eqs += [io.pad.i.eq(padpin.i)]
- self.jtag.boundary_scan_pads.append(padpin.i)
- elif padpin.dir == 'o':
- print ("jtag_request add output pin", padpin)
- print (" corepin", corepin)
- print (" jtag io core", io.core)
- print (" jtag io pad", io.pad)
- # corepin is to be returned, here. connect it to jtag core out
- self.jtag.eqs += [io.core.o.eq(corepin.o)]
- # and JTAG pad to padpin
- self.jtag.eqs += [padpin.o.eq(io.pad.o)]
- self.jtag.boundary_scan_pads.append(padpin.o)
- elif padpin.dir == 'io':
- print ("jtag_request add io pin", padpin)
- print (" corepin", corepin)
- print (" jtag io core", io.core)
- print (" jtag io pad", io.pad)
- # corepin is to be returned, here. so, connect jtag corein to it
- self.jtag.eqs += [corepin.i.eq(io.core.i)]
- # and padpin to JTAG pad
- self.jtag.eqs += [io.pad.i.eq(padpin.i)]
- # corepin is to be returned, here. connect it to jtag core out
- self.jtag.eqs += [io.core.o.eq(corepin.o)]
- # and JTAG pad to padpin
- self.jtag.eqs += [padpin.o.eq(io.pad.o)]
- # corepin is to be returned, here. connect it to jtag core out
- self.jtag.eqs += [io.core.oe.eq(corepin.oe)]
- # and JTAG pad to padpin
- self.jtag.eqs += [padpin.oe.eq(io.pad.oe)]
-
- self.jtag.boundary_scan_pads.append(padpin.i)
- self.jtag.boundary_scan_pads.append(padpin.o)
- self.jtag.boundary_scan_pads.append(padpin.oe)
-
- # finally record the *CORE* value just like ResourceManager.request()
- # so that the module using this can connect to *CORE* i/o to the
- # resource. pads are taken care of
- self.jtag.resource_table[(name, number)] = value
- # and the *PAD* value so that it can be wired up externally
- self.jtag.resource_table_pads[(name, number)] = pvalue
-
+ yield from self.jtag.iter_ports()
'''
_trellis_command_templates = [
print (" get_tristate", pin, "port", port, port.layout)
m = Module()
- print (" pad", res, pin, port, attrs)
+ print (" pad", pin, port, attrs)
print (" pin", pin.layout)
return m
# m.submodules += Instance("$tribuf",
resources = create_resources(pinset)
top = Blinker(pinset, resources)
-#vl = rtlil.convert(top, ports=top.ports())
-#with open("test_jtag_blinker.il", "w") as f:
-# f.write(vl)
+vl = rtlil.convert(top, ports=top.ports())
+with open("test_jtag_blinker.il", "w") as f:
+ f.write(vl)
-if True:
+if False:
# XXX these modules are all being added *AFTER* the build process links
# everything together. the expectation that this would work is...
# unrealistic. ordering, clearly, is important.
# particularly when modules have been added *after* the platform build()
# function has been called.
-sim = Simulator(top_fragment)
+def test_case0():
+ print("Starting sanity test case!")
+ print("printing out list of stuff in top")
+ print(dir(top))
+ # ok top now has a variable named "gpio", let's enumerate that too
+ print("printing out list of stuff in top.gpio and its type")
+ print(top.gpio.__class__.__name__, dir(top.gpio))
+ # ok, it's a nmigen Record, therefore it has a layout. let's print
+ # that too
+ print("top.gpio is a Record therefore has fields and a layout")
+ print(" layout:", top.gpio.layout)
+ print(" fields:", top.gpio.fields)
+ print("Fun never ends...")
+ print(" layout, gpio2:", top.gpio.layout['gpio2'])
+ print(" fields, gpio2:", top.gpio.fields['gpio2'])
+ print(top.jtag.__class__.__name__, dir(top.jtag))
+
+ # etc etc. you get the general idea
+ delayVal = 0.2e-6
+ yield top.uart.rx.eq(0)
+ yield Delay(delayVal)
+ yield Settle()
+ yield top.gpio.gpio2.o.eq(0)
+ yield top.gpio.gpio3.o.eq(1)
+ yield Delay(delayVal)
+ yield Settle()
+ yield top.gpio.gpio2.oe.eq(1)
+ yield top.gpio.gpio3.oe.eq(1)
+ #yield top.jtag.gpio.gpio2.i.eq(1)
+ yield Delay(delayVal)
+ yield Settle()
+ for _ in range(20):
+ # get a value first (as an integer). you were trying to set
+ # it to the actual Signal
+ gpio_o2 = yield top.gpio.gpio2.o
+ # then set it
+ yield top.gpio.gpio2.o.eq(~gpio_o2)
+
+ # ditto: here you are trying to set to an AST expression
+ # which is inadviseable (likely to fail)
+ yield top.gpio.gpio3.o.eq(~top.gpio.gpio3.o)
+ yield Delay(delayVal)
+ yield Settle()
+ # again you are trying to set something equal to the Signal
+ # rather than to a value. this is attempting to change the
+ # actual HDL which is completely inappropriate
+ yield top.uart.rx.eq(~top.intermediary)
+ yield Delay(delayVal)
+ yield Settle()
+
+ yield top.gpio.gpio2.oe.eq(0)
+ yield top.gpio.gpio3.oe.eq(0)
+ #yield top.jtag.gpio.gpio2.i.eq(0)
+ yield Delay(delayVal)
+ yield Settle()
+
+# Code borrowed from cesar, runs, but shouldn't actually work because of
+# self. statements and non-existent signal names.
+def test_case1():
+ print("Example test case")
+ yield Passive()
+ while True:
+ # Settle() is needed to give a quick response to
+ # the zero delay case
+ yield Settle()
+ # wait for rel_o to become active
+ while not (yield self.rel_o):
+ yield
+ yield Settle()
+ # read the transaction parameters
+ assert self.expecting, "an unexpected result was produced"
+ delay = (yield self.delay)
+ expected = (yield self.expected)
+ # wait for `delay` cycles
+ for _ in range(delay):
+ yield
+ # activate go_i for one cycle
+ yield self.go_i.eq(1)
+ yield self.count.eq(self.count + 1)
+ yield
+ # check received data against the expected value
+ result = (yield self.port)
+ assert result == expected,\
+ f"expected {expected}, received {result}"
+ yield self.go_i.eq(0)
+ yield self.port.eq(0)
+
+sim = Simulator(top)
sim.add_clock(1e-6, domain="sync") # standard clock
-sim.add_sync_process(wrap(jtag_srv(top))) #? jtag server
+#sim.add_sync_process(wrap(jtag_srv(top))) #? jtag server
#if len(sys.argv) != 2 or sys.argv[1] != 'server':
-sim.add_sync_process(wrap(jtag_sim(cdut, top.jtag))) # actual jtag tester
-sim.add_sync_process(wrap(dmi_sim(top.jtag))) # handles (pretends to be) DMI
+#sim.add_sync_process(wrap(jtag_sim(cdut, top.jtag))) # actual jtag tester
+#sim.add_sync_process(wrap(dmi_sim(top.jtag))) # handles (pretends to be) DMI
+
+#sim.add_sync_process(wrap(test_case1()))
+sim.add_sync_process(wrap(test_case0()))
-with sim.write_vcd("dmi2jtag_test_srv.vcd"):
+with sim.write_vcd("blinker_test.vcd"):
sim.run()