m = Module()
m.submodules.jtag = self.jtag
#m.submodules.sram = self.sram
-
- count = Signal(5)
- m.d.sync += count.eq(count+1)
+
+ #count = Signal(5)
+ #m.d.sync += count.eq(count+1)
print ("resources", platform, jtag_resources.items())
gpio = self.jtag.request('gpio')
print (gpio, gpio.layout, gpio.fields)
# get the GPIO bank, mess about with some of the pins
- m.d.comb += gpio.gpio0.o.eq(1)
- m.d.comb += gpio.gpio1.o.eq(gpio.gpio2.i)
- m.d.comb += gpio.gpio1.oe.eq(count[4])
- m.d.sync += count[0].eq(gpio.gpio1.i)
+ #m.d.comb += gpio.gpio0.o.eq(1)
+ #m.d.comb += gpio.gpio1.o.eq(gpio.gpio2.i)
+ #m.d.comb += gpio.gpio1.oe.eq(count[4])
+ #m.d.sync += count[0].eq(gpio.gpio1.i)
+
+ num_gpios = 4
+ gpio_o_test = Signal(num_gpios)
+ gpio_oe_test = Signal(num_gpios)
+ # Wire up the output signal of each gpio by XOR'ing each bit of
+ # gpio_o_test with gpio's input
+ # Wire up each bit of gpio_oe_test signal to oe signal of each gpio.
+ # Turn into a loop at some point, probably a way without
+ # using get_attr()
+ m.d.comb += gpio.gpio0.o.eq(gpio_o_test[0] ^ gpio.gpio0.i)
+ m.d.comb += gpio.gpio1.o.eq(gpio_o_test[1] ^ gpio.gpio1.i)
+ m.d.comb += gpio.gpio2.o.eq(gpio_o_test[2] ^ gpio.gpio2.i)
+ m.d.comb += gpio.gpio3.o.eq(gpio_o_test[3] ^ gpio.gpio3.i)
+
+ m.d.comb += gpio.gpio0.oe.eq(gpio_oe_test[0])
+ m.d.comb += gpio.gpio1.oe.eq(gpio_oe_test[1])
+ m.d.comb += gpio.gpio2.oe.eq(gpio_oe_test[2])
+ m.d.comb += gpio.gpio3.oe.eq(gpio_oe_test[3])
+
# get the UART resource, mess with the output tx
uart = self.jtag.request('uart')
print ("uart fields", uart, uart.fields)
# available - i.e. not as local variables
self.gpio = gpio
self.uart = uart
+ self.gpio_o_test = gpio_o_test
+ self.gpio_oe_test = gpio_oe_test
# sigh these wire up to the pads so you cannot set Signals
# that are already wired
self.fragment = fragment
return super().toolchain_prepare(fragment, name, **kwargs)
-"""
-and to create a Platform instance with that list, and build
-something random
- p=Platform()
- p.resources=listofstuff
- p.build(Blinker())
-"""
-pinset = dummy_pinset()
-print(pinset)
-resources = create_resources(pinset)
-top = Blinker(pinset, resources)
-
-vl = rtlil.convert(top, ports=top.ports())
-with open("test_jtag_blinker.il", "w") as f:
- f.write(vl)
-
-if False:
- # XXX these modules are all being added *AFTER* the build process links
- # everything together. the expectation that this would work is...
- # unrealistic. ordering, clearly, is important.
-
- # dut = JTAG(test_pinset(), wb_data_wid=64, domain="sync")
- top.jtag.stop = False
- # rather than the client access the JTAG bus directly
- # create an alternative that the client sets
- class Dummy: pass
- cdut = Dummy()
- cdut.cbus = JTAGInterface()
-
- # set up client-server on port 44843-something
- top.jtag.s = JTAGServer()
- cdut.c = JTAGClient()
- top.jtag.s.get_connection()
- #else:
- # print ("running server only as requested, use openocd remote to test")
- # sys.stdout.flush()
- # top.jtag.s.get_connection(None) # block waiting for connection
-
- # take copy of ir_width and scan_len
- cdut._ir_width = top.jtag._ir_width
- cdut.scan_len = top.jtag.scan_len
-
- p = ASICPlatform (resources, top.jtag)
- p.build(top)
- # this is what needs to gets treated as "top", after "main module" top
- # is augmented with IO pads with JTAG tacked on. the expectation that
- # the get_input() etc functions will be called magically by some other
- # function is unrealistic.
- top_fragment = p.fragment
-
-# XXX simulating top (the module that does not itself contain IO pads
-# because that's covered by build) cannot possibly be expected to work
-# particularly when modules have been added *after* the platform build()
-# function has been called.
def test_case0():
print("Starting sanity test case!")
print(" layout, gpio2:", top.gpio.layout['gpio2'])
print(" fields, gpio2:", top.gpio.fields['gpio2'])
print(top.jtag.__class__.__name__, dir(top.jtag))
+ print("Pads:")
+ print(top.jtag.resource_table_pads[('gpio', 0)])
# etc etc. you get the general idea
delayVal = 0.2e-6
yield Settle()
yield top.gpio.gpio2.o.eq(0)
yield top.gpio.gpio3.o.eq(1)
+ yield
+ yield top.gpio.gpio3.oe.eq(1)
+ yield
+ yield top.gpio.gpio3.oe.eq(0)
+ # grab the JTAG resource pad
+ gpios_pad = top.jtag.resource_table_pads[('gpio', 0)]
+ yield gpios_pad.gpio3.i.eq(1)
yield Delay(delayVal)
yield Settle()
yield top.gpio.gpio2.oe.eq(1)
yield top.gpio.gpio3.oe.eq(1)
- #yield top.jtag.gpio.gpio2.i.eq(1)
+ yield gpios_pad.gpio3.i.eq(0)
+ yield top.jtag.gpio.gpio2.i.eq(1)
yield Delay(delayVal)
yield Settle()
gpio_o2 = 0
# ditto: here you are trying to set to an AST expression
# which is inadviseable (likely to fail)
- yield top.gpio.gpio3.o.eq(~top.gpio.gpio3.o)
+ gpio_o3 = not gpio_o2
+ yield top.gpio.gpio3.o.eq(gpio_o3)
yield Delay(delayVal)
yield Settle()
# grab the JTAG resource pad
yield top.gpio.gpio2.oe.eq(0)
yield top.gpio.gpio3.oe.eq(0)
- #yield top.jtag.gpio.gpio2.i.eq(0)
+ yield top.jtag.gpio.gpio2.i.eq(0)
yield Delay(delayVal)
- yield Settle()
+ yield Settle()
# Code borrowed from cesar, runs, but shouldn't actually work because of
# self. statements and non-existent signal names.
yield self.go_i.eq(0)
yield self.port.eq(0)
-sim = Simulator(top)
-sim.add_clock(1e-6, domain="sync") # standard clock
-
-#sim.add_sync_process(wrap(jtag_srv(top))) #? jtag server
-#if len(sys.argv) != 2 or sys.argv[1] != 'server':
-#sim.add_sync_process(wrap(jtag_sim(cdut, top.jtag))) # actual jtag tester
-#sim.add_sync_process(wrap(dmi_sim(top.jtag))) # handles (pretends to be) DMI
+def test_gpios():
+ print("Starting GPIO test case!")
+
+ num_gpios = top.gpio_o_test.width
+ # Grab GPIO outpud pad resource from JTAG BS - end of chain
+ print (top.jtag.boundary_scan_pads.keys())
+ gpio0_o = top.jtag.boundary_scan_pads['gpio_0__gpio0__o']['o']
+ gpio1_o = top.jtag.boundary_scan_pads['gpio_0__gpio1__o']['o']
+ gpio2_o = top.jtag.boundary_scan_pads['gpio_0__gpio2__o']['o']
+ gpio3_o = top.jtag.boundary_scan_pads['gpio_0__gpio3__o']['o']
+
+ # Grab GPIO input pad resource from JTAG BS - start of chain
+ gpio0_pad_in = top.jtag.boundary_scan_pads['gpio_0__gpio0__i']['i']
+ gpio1_pad_in = top.jtag.boundary_scan_pads['gpio_0__gpio1__i']['i']
+ gpio2_pad_in = top.jtag.boundary_scan_pads['gpio_0__gpio2__i']['i']
+ gpio3_pad_in = top.jtag.boundary_scan_pads['gpio_0__gpio3__i']['i']
+ #pad_in = [gpio0_pad_in gpio1_pad_in gpio2_pad_in gpio3_pad_in]
+
+ # temp test
+ # no: already told you, these are never going to work
+ print ("printing out info about the resource gpio0")
+ print (top.gpio['gpio0']['i'])
+ print ("this is a PIN resource", type(top.gpio['gpio0']['i']))
+ # yield can only be done on SIGNALS or RECORDS,
+ # NOT Pins/Resources gpio0_core_in = yield top.gpio['gpio0']['i']
+ #print("Test gpio0 core in: ", gpio0_core_in)
+
+ #print("JTAG")
+ #print(top.jtag.__class__.__name__, dir(top.jtag))
+ #print("TOP")
+ #print(top.__class__.__name__, dir(top))
+ #print("PORT")
+ #print(top.ports.__class__.__name__, dir(top.ports))
+ #print("GPIO")
+ #print(top.gpio.__class__.__name__, dir(top.gpio))
+
+ # Have the sim run through a for-loop where the gpio_o_test is
+ # incremented like a counter (0000, 0001...)
+ # At each iteration of the for-loop, assert:
+ # + output set at core matches output seen at pad
+ # TODO + input set at pad matches input seen at core
+ # TODO + if gpio_o_test bit is cleared, output seen at pad matches
+ # input seen at pad
+ num_gpio_o_states = num_gpios**2
+ print("Num of permutations of gpio_o_test record: ", num_gpio_o_states)
+ for gpio_o_val in range(0, num_gpio_o_states):
+ yield top.gpio_o_test.eq(gpio_o_val)
+ yield Settle()
+ yield # Move to the next clk cycle
+
+ # yield the pad output
+ pad0_out = yield gpio0_o
+ pad1_out = yield gpio1_o
+ pad2_out = yield gpio2_o
+ pad3_out = yield gpio3_o
+ print("Applied values:", bin(gpio_o_val), "Seeing",
+ pad3_out, pad2_out, pad1_out, pad0_out)
+ # Test without asserting input
+ # gpio_o_val is a 4-bit binary number setting each pad (single-bit)
+ assert ((gpio_o_val & 0b0001) != 0) == pad0_out
+ assert ((gpio_o_val & 0b0010) != 0) == pad1_out
+ assert ((gpio_o_val & 0b0100) != 0) == pad2_out
+ assert ((gpio_o_val & 0b1000) != 0) == pad3_out
+ # Test with input asserted
+ test_in = 1
+ yield gpio0_pad_in.eq(test_in)
+ # don't need this *and* a yield of 1 clock cycle yield Settle()
+ yield
-#sim.add_sync_process(wrap(test_case1()))
-sim.add_sync_process(wrap(test_case0()))
+ # after changing the gpio0 input, the output is also going to
+ # change. *therefore it must be read again* to get the
+ # snapshot (as a python value)
+ pad0_out = yield gpio0_o
+ pad1_out = yield gpio1_o
+ pad2_out = yield gpio2_o
+ pad3_out = yield gpio3_o
+ print("Applied test_in=1 with values:", bin(gpio_o_val), "Seeing",
+ pad3_out, pad2_out, pad1_out, pad0_out)
+ # Trying to read input from core side, looks like might be a pin...
+ # XXX don't "look like" - don't guess - *print it out*
+ print ("don't guess, CHECK", type(top.gpio.gpio0.i))
+ #temp_in = yield top.gpio.gpio0.i
+ #print("Core input ", temp_in, temp_in==test_in)
+ #print((gpio_o_val & 0b0001) == 1)
+ #print(((gpio_o_val & 0b0001) == 1) ^ test_in)
+ assert (((gpio_o_val & 0b0001) != 0) ^ test_in) == pad0_out
+ test_in = 0
+ yield gpio0_pad_in.eq(test_in)
+ print () # extra print to divide the output
+
+ # Another for loop to run through gpio_oe_test. Assert:
+ # + oe set at core matches oe seen at pad.
+ # TODO
+
+if __name__ == '__main__':
+ """
+ and to create a Platform instance with that list, and build
+ something random
+
+ p=Platform()
+ p.resources=listofstuff
+ p.build(Blinker())
+ """
+ pinset = dummy_pinset()
+ print(pinset)
+ resources = create_resources(pinset)
+ top = Blinker(pinset, resources, no_jtag_connect=False)#True)
+
+ vl = rtlil.convert(top, ports=top.ports())
+ with open("test_jtag_blinker.il", "w") as f:
+ f.write(vl)
+
+ if False:
+ # XXX these modules are all being added *AFTER* the build process links
+ # everything together. the expectation that this would work is...
+ # unrealistic. ordering, clearly, is important.
+
+ # dut = JTAG(test_pinset(), wb_data_wid=64, domain="sync")
+ top.jtag.stop = False
+ # rather than the client access the JTAG bus directly
+ # create an alternative that the client sets
+ class Dummy: pass
+ cdut = Dummy()
+ cdut.cbus = JTAGInterface()
+
+ # set up client-server on port 44843-something
+ top.jtag.s = JTAGServer()
+ cdut.c = JTAGClient()
+ top.jtag.s.get_connection()
+ #else:
+ # print ("running server only as requested,
+ # use openocd remote to test")
+ # sys.stdout.flush()
+ # top.jtag.s.get_connection(None) # block waiting for connection
+
+ # take copy of ir_width and scan_len
+ cdut._ir_width = top.jtag._ir_width
+ cdut.scan_len = top.jtag.scan_len
+
+ p = ASICPlatform (resources, top.jtag)
+ p.build(top)
+ # this is what needs to gets treated as "top", after "main module" top
+ # is augmented with IO pads with JTAG tacked on. the expectation that
+ # the get_input() etc functions will be called magically by some other
+ # function is unrealistic.
+ top_fragment = p.fragment
+
+ # XXX simulating top (the module that does not itself contain IO pads
+ # because that's covered by build) cannot possibly be expected to work
+ # particularly when modules have been added *after* the platform build()
+ # function has been called.
+
+ sim = Simulator(top)
+ sim.add_clock(1e-6, domain="sync") # standard clock
+
+ #sim.add_sync_process(wrap(jtag_srv(top))) #? jtag server
+ #if len(sys.argv) != 2 or sys.argv[1] != 'server':
+ # actual jtag tester
+ #sim.add_sync_process(wrap(jtag_sim(cdut, top.jtag)))
+ # handles (pretends to be) DMI
+ #sim.add_sync_process(wrap(dmi_sim(top.jtag)))
+
+ #sim.add_sync_process(wrap(test_case1()))
+ #sim.add_sync_process(wrap(test_case0()))
+ sim.add_sync_process(wrap(test_gpios()))
-with sim.write_vcd("blinker_test.vcd"):
- sim.run()
+ with sim.write_vcd("blinker_test.vcd"):
+ sim.run()