Add SDRAMPeripheral and SDRAMSoC example.
authorJean-François Nguyen <jf@lambdaconcept.com>
Mon, 28 Jun 2021 18:27:45 +0000 (20:27 +0200)
committerJean-François Nguyen <jf@lambdaconcept.com>
Mon, 28 Jun 2021 18:29:47 +0000 (20:29 +0200)
.gitmodules
MANIFEST.in
examples/sdram_soc.py [new file with mode: 0644]
lambdasoc/periph/sdram.py [new file with mode: 0644]
lambdasoc/soc/cpu.py
lambdasoc/software/bios
lambdasoc/test/test_periph_sdram.py [new file with mode: 0644]
lambdasoc/test/utils/formal.py [new file with mode: 0644]
lambdasoc/test/utils/toolchain.py [new file with mode: 0644]
lambdasoc/test/utils/wishbone.py

index bfb7431f4f8873d0d1ccecaaefc51d881c30e5bf..2bd39974f3e981388762a03a52fcd2397e9ec886 100644 (file)
@@ -1,3 +1,4 @@
 [submodule "lambdasoc/software/bios"]
        path = lambdasoc/software/bios
        url = https://github.com/lambdaconcept/lambdasoc-bios
+       branch = sdram-staging
index 3a94de34c812599609ab716454bff03aaeb1164c..0fef7e120c3246a568ac8374d73ec244271d2824 100644 (file)
@@ -1,4 +1,8 @@
 graft lambdasoc/software/bios/3rdparty/llvm-project/compiler-rt/lib/builtins
+graft lambdasoc/software/bios/3rdparty/litex/litex/soc/software/include
+include lambdasoc/software/bios/3rdparty/litex/litex/soc/cores/cpu/minerva/*.h
+graft lambdasoc/software/bios/3rdparty/litex/litex/soc/software/libbase
+graft lambdasoc/software/bios/3rdparty/litex/litex/soc/software/liblitedram
 include lambdasoc/software/bios/Makefile
 graft lambdasoc/software/bios/src
 include lambdasoc/software/bios/toolchain.mk
diff --git a/examples/sdram_soc.py b/examples/sdram_soc.py
new file mode 100644 (file)
index 0000000..2be6bfc
--- /dev/null
@@ -0,0 +1,160 @@
+import argparse
+
+from nmigen import *
+from nmigen.build import *
+from nmigen_soc import wishbone
+
+from lambdasoc.cpu.minerva import MinervaCPU
+from lambdasoc.periph.intc import GenericInterruptController
+from lambdasoc.periph.serial import AsyncSerialPeripheral
+from lambdasoc.periph.sram import SRAMPeripheral
+from lambdasoc.periph.timer import TimerPeripheral
+from lambdasoc.periph.sdram import SDRAMPeripheral
+from lambdasoc.soc.cpu import CPUSoC
+
+from lambdasoc.cores import litedram
+
+
+__all__ = ["SDRAMSoC"]
+
+
+class SDRAMSoC(CPUSoC, Elaboratable):
+    def __init__(self, *, reset_addr, clk_freq,
+                 rom_addr, rom_size,
+                 ram_addr, ram_size,
+                 uart_addr, uart_divisor, uart_pins,
+                 timer_addr, timer_width,
+                 sdram_addr, sdram_core, sdram_cache_size):
+        self._arbiter = wishbone.Arbiter(addr_width=30, data_width=32, granularity=8,
+                                         features={"cti", "bte"})
+        self._decoder = wishbone.Decoder(addr_width=30, data_width=32, granularity=8,
+                                         features={"cti", "bte"})
+
+        self.cpu = MinervaCPU(
+            reset_address=reset_addr,
+            with_icache=True, icache_nlines=128, icache_nwords=4, icache_nways=1,
+                              icache_base=sdram_addr, icache_limit=sdram_addr + sdram_core.size,
+            with_dcache=True, dcache_nlines=128, dcache_nwords=4, dcache_nways=1,
+                              dcache_base=sdram_addr, dcache_limit=sdram_addr + sdram_core.size,
+            with_muldiv=True,
+        )
+        self._arbiter.add(self.cpu.ibus)
+        self._arbiter.add(self.cpu.dbus)
+
+        self.rom = SRAMPeripheral(size=rom_size, writable=False)
+        self._decoder.add(self.rom.bus, addr=rom_addr)
+
+        self.ram = SRAMPeripheral(size=ram_size)
+        self._decoder.add(self.ram.bus, addr=ram_addr)
+
+        self.sdram = SDRAMPeripheral(core=sdram_core, cache_size=sdram_cache_size)
+        self._decoder.add(self.sdram.bus, addr=sdram_addr)
+
+        self.uart = AsyncSerialPeripheral(divisor=uart_divisor, pins=uart_pins)
+        self._decoder.add(self.uart.bus, addr=uart_addr)
+
+        self.timer = TimerPeripheral(width=timer_width)
+        self._decoder.add(self.timer.bus, addr=timer_addr)
+
+        self.intc = GenericInterruptController(width=len(self.cpu.ip))
+        self.intc.add_irq(self.timer.irq, 0)
+        self.intc.add_irq(self.uart .irq, 1)
+
+        self.memory_map = self._decoder.bus.memory_map
+
+        self.clk_freq = clk_freq
+
+    def elaborate(self, platform):
+        m = Module()
+
+        m.domains += [
+            ClockDomain("litedram_input"),
+            ClockDomain("litedram_user"),
+            ClockDomain("sync"),
+        ]
+
+        m.d.comb += [
+            ClockSignal("litedram_input").eq(platform.request("clk100", 0).i),
+
+            ClockSignal("sync").eq(ClockSignal("litedram_user")),
+            ResetSignal("sync").eq(ResetSignal("litedram_user")),
+        ]
+
+        m.submodules.arbiter = self._arbiter
+        m.submodules.cpu     = self.cpu
+
+        m.submodules.decoder = self._decoder
+        m.submodules.rom     = self.rom
+        m.submodules.ram     = self.ram
+        m.submodules.sdram   = self.sdram
+        m.submodules.uart    = self.uart
+        m.submodules.timer   = self.timer
+        m.submodules.intc    = self.intc
+
+        m.d.comb += [
+            self._arbiter.bus.connect(self._decoder.bus),
+            self.cpu.ip.eq(self.intc.ip),
+        ]
+
+        return m
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--baudrate", type=int,
+            default=9600,
+            help="UART baudrate (default: 9600)")
+    args = parser.parse_args()
+
+    # from nmigen_boards.arty_a7 import ArtyA7_35Platform
+    # platform = ArtyA7_35Platform()
+    # litedram_cfg = litedram.Artix7Config(
+    #     memtype          = "DDR3",
+    #     speedgrade       = "-1",
+    #     cmd_latency      = 0,
+    #     module_name      = "MT41K128M16",
+    #     module_bytes     = 2,
+    #     module_ranks     = 1,
+    #     rtt_nom          = 60,
+    #     rtt_wr           = 60,
+    #     ron              = 34,
+    #     input_clk_freq   = int(100e6),
+    #     user_clk_freq    = int(100e6),
+    #     iodelay_clk_freq = int(200e6),
+    # )
+
+    from nmigen_boards.ecpix5 import ECPIX585Platform
+    platform = ECPIX585Platform()
+    litedram_cfg = litedram.ECP5Config(
+        memtype        = "DDR3",
+        module_name    = "MT41K256M16",
+        module_bytes   = 2,
+        module_ranks   = 1,
+        input_clk_freq = int(100e6),
+        user_clk_freq  = int(70e6),
+        init_clk_freq  = int(25e6),
+    )
+
+    litedram_core = litedram.Core(
+        litedram_cfg,
+        pins = litedram_cfg.request_pins(platform, "ddr3", 0),
+    )
+
+    litedram_products = litedram_core.build(do_build=True)
+    with litedram_products.extract(f"{litedram_core.name}/{litedram_core.name}.v") as litedram_v:
+        with open(litedram_v, "r") as litedram_v_contents:
+            platform.add_file(litedram_v, litedram_v_contents)
+
+    soc = SDRAMSoC(
+         reset_addr=0x30000000, clk_freq=litedram_cfg.user_clk_freq,
+          uart_addr=0x00005000, uart_divisor=int(litedram_cfg.user_clk_freq // args.baudrate),
+                                uart_pins=platform.request("uart", 0),
+         timer_addr=0x00006000, timer_width=32,
+
+           rom_addr=0x30000000, rom_size=0x8000,
+           ram_addr=0x30008000, ram_size=0x1000,
+         sdram_addr=0x40000000, sdram_core=litedram_core, sdram_cache_size=8192,
+    )
+    soc.build(do_build=True, do_init=True)
+
+    platform.build(soc, do_program=True)
diff --git a/lambdasoc/periph/sdram.py b/lambdasoc/periph/sdram.py
new file mode 100644 (file)
index 0000000..5c13ef9
--- /dev/null
@@ -0,0 +1,332 @@
+from nmigen import *
+from nmigen.asserts import *
+from nmigen.utils import log2_int
+
+from nmigen_soc import wishbone
+from nmigen_soc.memory import MemoryMap
+
+from . import Peripheral
+
+from ..cores import litedram
+
+
+__all__ = ["WritebackCache", "SDRAMPeripheral"]
+
+
+class WritebackCache(Elaboratable):
+    """Write-back cache.
+
+    A write-back cache designed to bridge the SoC interconnect to LiteDRAM.
+
+    Parameters
+    ----------
+    dram_port : :class:`litedram.NativePort`
+        LiteDRAM user port.
+    size : int
+        Cache size.
+    data_width : int
+        Initiator bus data width.
+    granularity : int
+        Initiator bus granularity.
+    dirty_init : bool
+        Dirty initialization. Defaults to ``False``. May be useful for simulation.
+
+    Attributes
+    ----------
+    intr_bus : :class:`nmigen_soc.wishbone.Interface`
+        Initiator bus, with support for incremental bursts.
+    """
+    def __init__(self, dram_port, *, size, data_width, granularity=None, dirty_init=False):
+        if not isinstance(dram_port, litedram.NativePort):
+            raise TypeError("DRAM port must be an instance of lambdasoc.cores.litedram.NativePort, "
+                            "not {!r}"
+                            .format(dram_port))
+        if not isinstance(size, int) or size <= 0 or size & size - 1:
+            raise ValueError("Cache size must be a positive power of two integer, not {!r}"
+                             .format(size))
+        if not isinstance(data_width, int) or data_width <= 0 or data_width & data_width - 1:
+            raise ValueError("Data width must be a positive power of two integer, not {!r}"
+                             .format(data_width))
+        if dram_port.data_width % data_width != 0:
+            raise ValueError("DRAM port data width must be a multiple of data width, but {} is "
+                             "not a multiple of {}"
+                             .format(dram_port.data_width, data_width))
+
+        self.intr_bus  = wishbone.Interface(
+            addr_width  = dram_port.addr_width + log2_int(dram_port.data_width // data_width),
+            data_width  = data_width,
+            granularity = granularity,
+            features    = {"cti", "bte"},
+        )
+        intr_map = MemoryMap(
+            addr_width = self.intr_bus.addr_width + log2_int(data_width // granularity),
+            data_width = granularity,
+        )
+        try:
+            intr_map.add_window(dram_port.memory_map)
+        except AttributeError:
+            pass
+        self.intr_bus.memory_map = intr_map
+
+        self.dram_port = dram_port
+        self.size      = size
+
+        self.dirty_init = bool(dirty_init)
+
+    def elaborate(self, platform):
+        m = Module()
+
+        ratio    = self.dram_port.data_width // self.intr_bus.data_width
+        nb_lines = (self.size * self.intr_bus.granularity) // self.dram_port.data_width
+
+        intr_adr = Record([
+            ("offset", log2_int(ratio)),
+            ("line",   log2_int(nb_lines)),
+            ("tag",    len(self.intr_bus.adr) - log2_int(nb_lines) - log2_int(ratio)),
+        ])
+        m.d.comb += intr_adr.eq(self.intr_bus.adr),
+
+        intr_adr_next = Record.like(intr_adr)
+
+        with m.Switch(self.intr_bus.bte):
+            with m.Case(wishbone.BurstTypeExt.LINEAR):
+                m.d.comb += intr_adr_next.eq(intr_adr + 1)
+            with m.Case(wishbone.BurstTypeExt.WRAP_4):
+                m.d.comb += intr_adr_next[:2].eq(intr_adr[:2] + 1)
+                m.d.comb += intr_adr_next[2:].eq(intr_adr[2:])
+            with m.Case(wishbone.BurstTypeExt.WRAP_8):
+                m.d.comb += intr_adr_next[:3].eq(intr_adr[:3] + 1)
+                m.d.comb += intr_adr_next[3:].eq(intr_adr[3:])
+            with m.Case(wishbone.BurstTypeExt.WRAP_16):
+                m.d.comb += intr_adr_next[:4].eq(intr_adr[:4] + 1)
+                m.d.comb += intr_adr_next[4:].eq(intr_adr[4:])
+
+        tag_rp_data = Record([
+            ("tag",   intr_adr.tag.shape()),
+            ("dirty", 1),
+        ])
+        tag_wp_data = Record.like(tag_rp_data)
+
+        tag_mem = Memory(width=len(tag_rp_data), depth=nb_lines)
+        if self.dirty_init:
+            tag_mem.init = [-1 for _ in range(nb_lines)]
+
+        m.submodules.tag_rp = tag_rp = tag_mem.read_port(transparent=False)
+        m.submodules.tag_wp = tag_wp = tag_mem.write_port()
+        tag_rp.en.reset = 0
+
+        m.d.comb += [
+            tag_rp_data.eq(tag_rp.data),
+            tag_wp.data.eq(tag_wp_data),
+        ]
+
+        dat_mem = Memory(width=self.dram_port.data_width, depth=nb_lines)
+        m.submodules.dat_rp = dat_rp = dat_mem.read_port(transparent=False)
+        m.submodules.dat_wp = dat_wp = dat_mem.write_port(granularity=self.intr_bus.granularity)
+        dat_rp.en.reset = 0
+
+        intr_bus_r = Record.like(self.intr_bus)
+        intr_adr_r = Record.like(intr_adr)
+        m.d.comb += intr_adr_r.eq(intr_bus_r.adr)
+
+        with m.FSM() as fsm:
+            with m.State("CHECK"):
+                m.d.sync += [
+                    intr_bus_r.cyc.eq(self.intr_bus.cyc),
+                    intr_bus_r.stb.eq(self.intr_bus.stb),
+                    intr_bus_r.adr.eq(self.intr_bus.adr),
+                ]
+                # Tag/Data memory read
+                with m.If(self.intr_bus.cyc & self.intr_bus.stb):
+                    with m.If(self.intr_bus.ack & (self.intr_bus.cti == wishbone.CycleType.INCR_BURST)):
+                        m.d.comb += [
+                            tag_rp.addr.eq(intr_adr_next.line),
+                            dat_rp.addr.eq(intr_adr_next.line),
+                        ]
+                    with m.Else():
+                        m.d.comb += [
+                            tag_rp.addr.eq(intr_adr.line),
+                            dat_rp.addr.eq(intr_adr.line),
+                        ]
+                    with m.If(~intr_bus_r.cyc | ~intr_bus_r.stb | self.intr_bus.ack):
+                        m.d.comb += [
+                            tag_rp.en.eq(1),
+                            dat_rp.en.eq(1),
+                        ]
+                m.d.comb += [
+                    self.intr_bus.dat_r.eq(
+                        dat_rp.data.word_select(intr_adr.offset, len(self.intr_bus.dat_r))
+                    ),
+                ]
+                # Tag/Data memory write
+                m.d.comb += [
+                    tag_wp.addr      .eq(intr_adr.line),
+                    tag_wp_data.tag  .eq(intr_adr.tag),
+                    tag_wp_data.dirty.eq(1),
+                    dat_wp.addr      .eq(intr_adr.line),
+                    dat_wp.data      .eq(Repl(self.intr_bus.dat_w, ratio)),
+                ]
+                with m.If(self.intr_bus.cyc & self.intr_bus.stb):
+                    with m.If(intr_adr.tag == tag_rp_data.tag):
+                        m.d.comb += self.intr_bus.ack.eq(intr_bus_r.cyc & intr_bus_r.stb)
+                        with m.If(self.intr_bus.we & self.intr_bus.ack):
+                            m.d.comb += [
+                                tag_wp.en.eq(1),
+                                dat_wp.en.word_select(intr_adr.offset, len(self.intr_bus.sel)).eq(self.intr_bus.sel),
+                            ]
+                    with m.Elif(intr_bus_r.cyc & intr_bus_r.stb):
+                        m.d.sync += [
+                            intr_bus_r.cyc.eq(0),
+                            intr_bus_r.stb.eq(0),
+                        ]
+                        with m.If(tag_rp_data.dirty):
+                            m.next = "EVICT"
+                        with m.Else():
+                            m.next = "REFILL"
+
+            with m.State("EVICT"):
+                evict_done = Record([("cmd", 1), ("w", 1)])
+                with m.If(evict_done.all()):
+                    m.d.sync += evict_done.eq(0)
+                    m.next = "REFILL"
+                # Command
+                m.d.comb += [
+                    self.dram_port.cmd.valid.eq(~evict_done.cmd),
+                    self.dram_port.cmd.last .eq(0),
+                    self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, tag_rp_data.tag)),
+                    self.dram_port.cmd.we   .eq(1),
+                ]
+                with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
+                    m.d.sync += evict_done.cmd.eq(1)
+                # Write
+                m.d.comb += [
+                    self.dram_port.w.valid.eq(~evict_done.w),
+                    self.dram_port.w.we   .eq(Repl(Const(1), self.dram_port.data_width // 8)),
+                    self.dram_port.w.data .eq(dat_rp.data),
+                ]
+                with m.If(self.dram_port.w.valid & self.dram_port.w.ready):
+                    m.d.sync += evict_done.w.eq(1)
+
+            with m.State("REFILL"):
+                refill_done = Record([("cmd", 1), ("r", 1)])
+                with m.If(refill_done.all()):
+                    m.d.sync += refill_done.eq(0)
+                    m.next = "CHECK"
+                # Command
+                m.d.comb += [
+                    self.dram_port.cmd.valid.eq(~refill_done.cmd),
+                    self.dram_port.cmd.last .eq(1),
+                    self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, intr_adr_r.tag)),
+                    self.dram_port.cmd.we   .eq(0),
+                ]
+                with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
+                    m.d.sync += refill_done.cmd.eq(1)
+                # Read
+                m.d.comb += [
+                    self.dram_port.r.ready.eq(~refill_done.r),
+                    tag_wp.addr      .eq(intr_adr_r.line),
+                    tag_wp.en        .eq((self.dram_port.r.valid & self.dram_port.r.ready)),
+                    tag_wp_data.tag  .eq(intr_adr_r.tag),
+                    tag_wp_data.dirty.eq(0),
+                    dat_wp.addr      .eq(intr_adr_r.line),
+                    dat_wp.en        .eq(Repl((self.dram_port.r.valid & self.dram_port.r.ready), len(dat_wp.en))),
+                    dat_wp.data      .eq(self.dram_port.r.data),
+                ]
+                with m.If(self.dram_port.r.valid & self.dram_port.r.ready):
+                    m.d.sync += refill_done.r.eq(1)
+
+        if platform == "formal":
+            with m.If(Initial()):
+                m.d.comb += [
+                    Assume(fsm.ongoing("CHECK")),
+                    Assume(~intr_bus_r.cyc),
+                    Assume(~evict_done.any()),
+                    Assume(~refill_done.any()),
+                ]
+
+        return m
+
+
+class SDRAMPeripheral(Peripheral, Elaboratable):
+    """SDRAM controller peripheral.
+
+    Parameters
+    ----------
+    core : :class:`litedram.Core`
+        LiteDRAM core.
+    cache_size : int
+        Cache size, in bytes.
+    cache_dirty_init : boot
+        Initialize cache as dirty. Defaults to `False`.
+    """
+    def __init__(self, *, core, cache_size, cache_dirty_init=False):
+        super().__init__()
+
+        if not isinstance(core, litedram.Core):
+            raise TypeError("LiteDRAM core must be an instance of lambdasoc.cores.litedram.Core, "
+                            "not {!r}"
+                            .format(core))
+        self.core = core
+
+        data_width  = core.ctrl_bus.data_width
+        granularity = core.ctrl_bus.granularity
+
+        self._data_bus = self.window(
+            addr_width  = core.user_port.addr_width
+                        + log2_int(core.user_port.data_width // 8)
+                        - log2_int(data_width // granularity),
+            data_width  = data_width,
+            granularity = granularity,
+            features    = {"cti", "bte"},
+        )
+        self._ctrl_bus = self.window(
+            addr_width  = core._ctrl_bus.addr_width,
+            data_width  = core._ctrl_bus.data_width,
+            granularity = core._ctrl_bus.granularity,
+            addr        = core.size,
+        )
+
+        self._cache    = WritebackCache(
+            core.user_port,
+            size        = cache_size,
+            data_width  = data_width,
+            granularity = granularity,
+            dirty_init  = cache_dirty_init,
+        )
+
+        self._ctrl_bus.memory_map.add_window(core.ctrl_bus.memory_map)
+        self._data_bus.memory_map.add_window(self._cache.intr_bus.memory_map)
+
+        self._bridge = self.bridge(data_width=data_width, granularity=granularity)
+        self.bus     = self._bridge.bus
+
+    def elaborate(self, platform):
+        m = Module()
+
+        m.submodules.bridge = self._bridge
+        m.submodules.cache  = self._cache
+        m.submodules.core   = self.core
+
+        m.d.comb += [
+            self._cache.intr_bus.adr  .eq(self._data_bus.adr),
+            self._cache.intr_bus.cyc  .eq(self._data_bus.cyc),
+            self._cache.intr_bus.stb  .eq(self._data_bus.stb),
+            self._cache.intr_bus.sel  .eq(self._data_bus.sel),
+            self._cache.intr_bus.we   .eq(self._data_bus.we),
+            self._cache.intr_bus.dat_w.eq(self._data_bus.dat_w),
+            self._cache.intr_bus.cti  .eq(self._data_bus.cti),
+            self._cache.intr_bus.bte  .eq(self._data_bus.bte),
+            self._data_bus.ack  .eq(self._cache.intr_bus.ack),
+            self._data_bus.dat_r.eq(self._cache.intr_bus.dat_r),
+
+            self.core.ctrl_bus.adr  .eq(self._ctrl_bus.adr),
+            self.core.ctrl_bus.cyc  .eq(self._ctrl_bus.cyc),
+            self.core.ctrl_bus.stb  .eq(self._ctrl_bus.stb),
+            self.core.ctrl_bus.sel  .eq(self._ctrl_bus.sel),
+            self.core.ctrl_bus.we   .eq(self._ctrl_bus.we),
+            self.core.ctrl_bus.dat_w.eq(self._ctrl_bus.dat_w),
+            self._ctrl_bus.ack  .eq(self.core.ctrl_bus.ack),
+            self._ctrl_bus.dat_r.eq(self.core.ctrl_bus.dat_r),
+        ]
+
+        return m
index d0f226240ea4bea9cb8e1d55dbf298eda5484325..5f26054582886a070d6414694035fa604780ebcf 100644 (file)
@@ -1,7 +1,10 @@
+import os
+
 from .base import *
 from ..cpu import CPU
 from ..periph.intc import InterruptController
 from ..periph.sram import SRAMPeripheral
+from ..periph.sdram import SDRAMPeripheral
 from ..periph.serial import AsyncSerialPeripheral
 from ..periph.timer import TimerPeripheral
 
@@ -13,7 +16,8 @@ class CPUSoC(SoC):
     cpu    = socproperty(CPU)
     intc   = socproperty(InterruptController)
     rom    = socproperty(SRAMPeripheral)
-    ram    = socproperty(SRAMPeripheral) # TODO: abstract class for storage peripherals.
+    ram    = socproperty(SRAMPeripheral)
+    sdram  = socproperty(SDRAMPeripheral, weak=True)
     uart   = socproperty(AsyncSerialPeripheral)
     timer  = socproperty(TimerPeripheral)
 
@@ -21,11 +25,13 @@ class CPUSoC(SoC):
     clk_freq = socproperty(int)
 
     def build(self, name=None,
+              litedram_dir="build/litedram",
               build_dir="build/soc", do_build=True,
               do_init=False):
         """TODO
         """
-        plan = BIOSBuilder().prepare(self, build_dir, name)
+        plan = BIOSBuilder().prepare(self, build_dir, name,
+                                     litedram_dir=os.path.abspath(litedram_dir))
         if not do_build:
             return plan
 
@@ -33,7 +39,7 @@ class CPUSoC(SoC):
         if not do_init:
             return products
 
-        with products.extract("bios.bin") as bios_filename:
+        with products.extract("bios/bios.bin") as bios_filename:
             with open(bios_filename, "rb") as f:
                 words = iter(lambda: f.read(self.cpu.data_width // 8), b'')
                 bios  = [int.from_bytes(w, self.cpu.byteorder) for w in words]
@@ -66,19 +72,47 @@ class BIOSBuilder(ConfigBuilder):
             CONFIG_TIMER_IRQNO={{soc.intc.find_index(soc.timer.irq)}}
             CONFIG_TIMER_CTR_WIDTH={{soc.timer.width}}
             CONFIG_CLOCK_FREQ={{soc.clk_freq}}
+
+            {% if soc.sdram is not none %}
+            CONFIG_WITH_SDRAM=y
+            CONFIG_SDRAM_START={{hex(periph_addr(soc.sdram))}}
+            CONFIG_SDRAM_SIZE={{hex(soc.sdram.core.size)}}
+            {% else %}
+            CONFIG_WITH_SDRAM=n
+            {% endif %}
+        """,
+        "litex_config.h": r"""
+            // {{autogenerated}}
+            #ifndef __LITEX_CONFIG_H_LAMBDASOC
+            #define __LITEX_CONFIG_H_LAMBDASOC
+
+            #define LX_CONFIG_TIMER_START {{hex(periph_addr(soc.timer))}}
+
+            {% if soc.sdram is not none %}
+            #define LX_CONFIG_SDRAM_START {{hex(periph_addr(soc.sdram))}}UL
+            #define LX_CONFIG_SDRAM_SIZE {{hex(soc.sdram.core.size)}}UL
+            #define LX_CONFIG_SDRAM_CACHE_SIZE {{soc.sdram._cache.size}}
+            #define LX_CONFIG_MEMTEST_DATA_SIZE 2*1024*1024
+            #define LX_CONFIG_MEMTEST_ADDR_SIZE 65536
+            {% endif %}
+
+            #endif
         """,
     }
     command_templates = [
         *ConfigBuilder.command_templates,
         r"""
+            {% if soc.sdram is not none %}
+            litedram_dir={{litedram_dir}}/{{soc.sdram.core.name}}
+            {% endif %}
             build={{build_dir}}
             KCONFIG_CONFIG={{build_dir}}/{{name}}.config
             make -C {{software_dir}}/bios 1>&2
         """,
     ]
 
-    def prepare(self, soc, build_dir, name):
+    def prepare(self, soc, build_dir, name, litedram_dir):
         if not isinstance(soc, CPUSoC):
             raise TypeError("SoC must be an instance of CPUSoC, not {!r}"
                             .format(soc))
-        return super().prepare(soc, build_dir, name)
+        return super().prepare(soc, build_dir, name, litedram_dir=litedram_dir)
index f8b86196a85c45cb6407876c98b6433698e2e8ae..57ce4bad04dfd1148d5e438d68100703925a1edf 160000 (submodule)
@@ -1 +1 @@
-Subproject commit f8b86196a85c45cb6407876c98b6433698e2e8ae
+Subproject commit 57ce4bad04dfd1148d5e438d68100703925a1edf
diff --git a/lambdasoc/test/test_periph_sdram.py b/lambdasoc/test/test_periph_sdram.py
new file mode 100644 (file)
index 0000000..feff56c
--- /dev/null
@@ -0,0 +1,256 @@
+# nmigen: UnusedElaboratable=no
+
+import unittest
+
+from nmigen import *
+from nmigen.test.utils import *
+from nmigen.asserts import *
+from nmigen.utils import log2_int
+
+from nmigen_soc import wishbone
+
+from ..periph.sdram import *
+from ..cores import litedram
+
+from .utils.wishbone import WishboneSubordinateSpec
+from .utils.formal import FormalTestCase
+
+
+class WritebackCacheSpec(Elaboratable):
+    def __init__(self, dut):
+        self.dut = dut
+
+    def elaborate(self, platform):
+        m = Module()
+
+        m.submodules.dut = dut = self.dut
+
+        # Wishbone interface
+
+        m.submodules.wb_sub_spec = WishboneSubordinateSpec(self.dut.intr_bus)
+
+        ratio        = dut.dram_port.data_width // dut.intr_bus.data_width
+        nb_lines     = (dut.size * dut.intr_bus.granularity) // dut.dram_port.data_width
+        intr_bus_adr = Record([
+            ("offset", log2_int(ratio)),
+            ("line",   log2_int(nb_lines)),
+            ("tag",    dut.intr_bus.addr_width - log2_int(nb_lines) - log2_int(ratio)),
+        ])
+        m.d.comb += [
+            Assert(len(intr_bus_adr) == len(dut.intr_bus.adr)),
+            intr_bus_adr.eq(dut.intr_bus.adr),
+        ]
+
+        dram_spec_addr = AnyConst(dut.dram_port.addr_width)
+        dram_spec_data = Signal(dut.dram_port.data_width)
+        dram_spec_line = Array(
+            Signal(dut.intr_bus.data_width, name=f"dram_spec_word_{i}")
+            for i in range(ratio)
+        )
+
+        m.d.comb += dram_spec_data.eq(Cat(dram_spec_line))
+
+        with m.If(Initial()):
+            m.d.comb += Assume(dram_spec_data == 0)
+
+        intr_dram_addr = Signal.like(dram_spec_addr)
+        m.d.comb += intr_dram_addr.eq(Cat(intr_bus_adr.line, intr_bus_adr.tag))
+
+        with m.If(dut.intr_bus.cyc & dut.intr_bus.stb & dut.intr_bus.ack
+                & (intr_dram_addr == dram_spec_addr)
+                & dut.intr_bus.we):
+            dram_spec_word = dram_spec_line[intr_bus_adr.offset]
+            for i, sel_bit in enumerate(dut.intr_bus.sel):
+                dram_spec_bits = dram_spec_word    .word_select(i, dut.intr_bus.granularity)
+                intr_data_bits = dut.intr_bus.dat_w.word_select(i, dut.intr_bus.granularity)
+                with m.If(sel_bit):
+                    m.d.sync += dram_spec_bits.eq(intr_data_bits)
+
+        # * A cache hit at `dram_spec_addr` must be coherent with `dram_spec_data`.
+
+        with m.If(dut.intr_bus.cyc & dut.intr_bus.stb & dut.intr_bus.ack
+                & (intr_dram_addr == dram_spec_addr)):
+            dram_spec_word = dram_spec_line[intr_bus_adr.offset]
+            m.d.comb += Assert(dut.intr_bus.dat_r == dram_spec_word)
+
+        # DRAM interface
+
+        dram_cmd  = Record([
+            ("addr", dut.dram_port.addr_width),
+            ("we",   1),
+        ])
+        dram_data = Record([
+            ("r", dut.dram_port.data_width),
+            ("w", dut.dram_port.data_width),
+        ])
+        dram_done = Record([
+            ("cmd", 1),
+            ("r",   1),
+            ("w",   1),
+        ])
+
+        with m.If(Initial()):
+            m.d.comb += Assume(~dram_done.any())
+
+        with m.If(dut.dram_port.cmd.valid & dut.dram_port.cmd.ready):
+            m.d.sync += [
+                dram_done.cmd.eq(1),
+                dram_cmd.addr.eq(dut.dram_port.cmd.addr),
+                dram_cmd.we  .eq(dut.dram_port.cmd.we),
+            ]
+        with m.If(dut.dram_port.w.valid & dut.dram_port.w.ready):
+            m.d.sync += [
+                dram_done.w.eq(1),
+                dram_data.w.eq(dut.dram_port.w.data),
+            ]
+        with m.If(dut.dram_port.r.ready & dut.dram_port.r.valid):
+            m.d.sync += [
+                dram_done.r.eq(1),
+                dram_data.r.eq(dut.dram_port.r.data),
+            ]
+
+        with m.If(dram_done.cmd & dram_done.r):
+            with m.If(dram_cmd.addr == dram_spec_addr):
+                m.d.comb += Assume(dram_data.r == dram_spec_data)
+
+        # Some of the following constraints are tighter than what the LiteDRAM native interface
+        # actually allows. We may relax them in the future to improve cache performance.
+
+        # * A new command must not be initiated before the previous one has completed.
+
+        with m.If(dut.dram_port.cmd.valid):
+            m.d.comb += Assert(~dram_done.cmd)
+        with m.If(dut.dram_port.w.valid):
+            m.d.comb += Assert(~dram_done.w)
+        with m.If(dut.dram_port.r.ready):
+            m.d.comb += Assert(~dram_done.r)
+
+        # * A command may either be a read or a write, but not both.
+
+        with m.If(dram_done.cmd):
+            with m.If(dram_cmd.we):
+                m.d.comb += Assert(~dram_done.r)
+            with m.Else():
+                m.d.comb += Assert(~dram_done.w)
+
+        m.d.comb += [
+            Assert(dram_done.r.implies(~dram_done.w)),
+            Assert(dram_done.w.implies(~dram_done.r)),
+        ]
+
+        # * A read command consists of:
+        #   - a transaction on the `dram.cmd` stream with `dram.cmd.we` de-asserted;
+        #   - a transaction on the `dram.r` stream.
+
+        with m.If(dram_done.cmd & dram_done.r):
+            m.d.comb += Assert(~dram_cmd.we)
+            m.d.sync += [
+                dram_done.cmd.eq(0),
+                dram_done.r  .eq(0),
+            ]
+
+        # * A write command consists of:
+        #   - a transaction on the `dram.cmd` stream with `dram.cmd.we` asserted;
+        #   - a transaction on the `dram.w` stream.
+
+        with m.If(dram_done.cmd & dram_done.w):
+            m.d.comb += Assert(dram_cmd.we)
+            m.d.sync += [
+                dram_done.cmd.eq(0),
+                dram_done.w  .eq(0),
+            ]
+
+        # * A DRAM write at `dram_spec_addr` must be coherent with `dram_spec_data`.
+
+        with m.If(dram_done.cmd & dram_done.w):
+            with m.If(dram_cmd.addr == dram_spec_addr):
+                m.d.comb += Assert(dram_data.w == dram_spec_data)
+
+        # For fairness, assume that any stream transaction completes in at most 3 clock cycles.
+
+        dram_wait = Record([
+            ("cmd", range(2)),
+            ("r",   range(2)),
+            ("w",   range(2)),
+        ])
+
+        with m.If(dut.dram_port.cmd.valid & ~dut.dram_port.cmd.ready):
+            m.d.sync += dram_wait.cmd.eq(dram_wait.cmd + 1)
+        with m.Else():
+            m.d.sync += dram_wait.cmd.eq(0)
+
+        with m.If(dut.dram_port.w.valid & ~dut.dram_port.w.ready):
+            m.d.sync += dram_wait.w.eq(dram_wait.w + 1)
+        with m.Else():
+            m.d.sync += dram_wait.w.eq(0)
+
+        with m.If(dut.dram_port.r.ready & ~dut.dram_port.r.valid):
+            m.d.sync += dram_wait.r.eq(dram_wait.r + 1)
+        with m.Else():
+            m.d.sync += dram_wait.r.eq(0)
+
+        m.d.comb += [
+            Assume(dram_wait.cmd < 2),
+            Assume(dram_wait.r   < 2),
+            Assume(dram_wait.w   < 2),
+        ]
+
+        return m
+
+
+class WritebackCacheTestCase(FormalTestCase):
+    def test_wrong_dram_port(self):
+        with self.assertRaisesRegex(TypeError,
+                r"DRAM port must be an instance of lambdasoc\.cores\.litedram\.NativePort, "
+                r"not 'foo'"):
+            WritebackCache(dram_port="foo", size=8, data_width=16)
+
+    def test_wrong_size(self):
+        dram_port = litedram.NativePort(addr_width=23, data_width=32)
+        with self.assertRaisesRegex(ValueError,
+                r"Cache size must be a positive power of two integer, not 'foo'"):
+            WritebackCache(dram_port, size="foo", data_width=16)
+        with self.assertRaisesRegex(ValueError,
+                r"Cache size must be a positive power of two integer, not -1"):
+            WritebackCache(dram_port, size=-1, data_width=16)
+        with self.assertRaisesRegex(ValueError,
+                r"Cache size must be a positive power of two integer, not 42"):
+            WritebackCache(dram_port, size=42, data_width=16)
+
+    def test_wrong_data_width(self):
+        dram_port = litedram.NativePort(addr_width=23, data_width=32)
+        with self.assertRaisesRegex(ValueError,
+                r"Data width must be a positive power of two integer, not 'foo'"):
+            WritebackCache(dram_port, size=8, data_width="foo")
+        with self.assertRaisesRegex(ValueError,
+                r"Data width must be a positive power of two integer, not -1"):
+            WritebackCache(dram_port, size=8, data_width=-1)
+        with self.assertRaisesRegex(ValueError,
+                r"Data width must be a positive power of two integer, not 42"):
+            WritebackCache(dram_port, size=8, data_width=42)
+
+    def test_wrong_ratio(self):
+        dram_port = litedram.NativePort(addr_width=23, data_width=32)
+        with self.assertRaisesRegex(ValueError,
+                r"DRAM port data width must be a multiple of data width, but 32 is not a multiple "
+                r"of 64"):
+            WritebackCache(dram_port, size=8, data_width=64)
+
+    def check(self, dut):
+        m = Module()
+        m.domains.sync = ClockDomain(reset_less=True)
+        m.submodules.spec = WritebackCacheSpec(dut)
+        self.assertFormal(m, mode="hybrid", depth=10)
+
+    def test_spec_simple(self):
+        dram_port = litedram.NativePort(addr_width=23, data_width=32)
+        dut = WritebackCache(dram_port, size=8, data_width=16, granularity=8)
+        self.check(dut)
+
+
+class SDRAMPeripheralTestCase(unittest.TestCase):
+    def test_wrong_core(self):
+        with self.assertRaisesRegex(TypeError,
+                r"LiteDRAM core must be an instance of lambdasoc\.cores\.litedram\.Core, "
+                r"not 'foo'"):
+            sdram = SDRAMPeripheral(core="foo", cache_size=8)
diff --git a/lambdasoc/test/utils/formal.py b/lambdasoc/test/utils/formal.py
new file mode 100644 (file)
index 0000000..ec8be04
--- /dev/null
@@ -0,0 +1,65 @@
+import os
+import shutil
+import subprocess
+import textwrap
+import traceback
+import unittest
+
+from nmigen.hdl.ast import *
+from nmigen.hdl.ir import *
+from nmigen.back import rtlil
+from .toolchain import require_tool
+
+
+__all__ = ["FormalTestCase"]
+
+
+class FormalTestCase(unittest.TestCase):
+    def assertFormal(self, spec, mode="bmc", depth=1):
+        caller, *_ = traceback.extract_stack(limit=2)
+        spec_root, _ = os.path.splitext(caller.filename)
+        spec_dir = os.path.dirname(spec_root)
+        spec_name = "{}_{}".format(
+            os.path.basename(spec_root).replace("test_", "spec_"),
+            caller.name.replace("test_", "")
+        )
+
+        # The sby -f switch seems not fully functional when sby is reading from stdin.
+        if os.path.exists(os.path.join(spec_dir, spec_name)):
+            shutil.rmtree(os.path.join(spec_dir, spec_name))
+
+        if mode == "hybrid":
+            # A mix of BMC and k-induction.
+            script = "setattr -unset init w:* a:nmigen.sample_reg %d"
+            mode   = "bmc"
+        else:
+            script = ""
+
+        config = textwrap.dedent("""\
+        [options]
+        mode {mode}
+        depth {depth}
+        wait on
+
+        [engines]
+        smtbmc
+
+        [script]
+        read_ilang top.il
+        prep
+        {script}
+
+        [file top.il]
+        {rtlil}
+        """).format(
+            mode=mode,
+            depth=depth,
+            script=script,
+            rtlil=rtlil.convert(Fragment.get(spec, platform="formal"))
+        )
+        with subprocess.Popen([require_tool("sby"), "-f", "-d", spec_name], cwd=spec_dir,
+                              universal_newlines=True,
+                              stdin=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
+            stdout, stderr = proc.communicate(config)
+            if proc.returncode != 0:
+                self.fail("Formal verification failed:\n" + stdout)
diff --git a/lambdasoc/test/utils/toolchain.py b/lambdasoc/test/utils/toolchain.py
new file mode 100644 (file)
index 0000000..fca2bac
--- /dev/null
@@ -0,0 +1,37 @@
+import os
+import shutil
+
+
+__all__ = ["ToolNotFound", "tool_env_var", "has_tool", "require_tool"]
+
+
+class ToolNotFound(Exception):
+    pass
+
+
+def tool_env_var(name):
+    return name.upper().replace("-", "_")
+
+
+def _get_tool(name):
+    return os.environ.get(tool_env_var(name), name)
+
+
+def has_tool(name):
+    return shutil.which(_get_tool(name)) is not None
+
+
+def require_tool(name):
+    env_var = tool_env_var(name)
+    path = _get_tool(name)
+    if shutil.which(path) is None:
+        if env_var in os.environ:
+            raise ToolNotFound("Could not find required tool {} in {} as "
+                               "specified via the {} environment variable".
+                               format(name, path, env_var))
+        else:
+            raise ToolNotFound("Could not find required tool {} in PATH. Place "
+                               "it directly in PATH or specify path explicitly "
+                               "via the {} environment variable".
+                               format(name, env_var))
+    return path
index a2eef4aa69880e865f2028fe17f1490d22f411a0..c5ed4d9df6696003f38bd90e01c164e9582a4d3d 100644 (file)
@@ -1,9 +1,10 @@
 from nmigen import *
+from nmigen.asserts import *
 
 from nmigen_soc import wishbone
 
 
-__all__ = ["wb_read", "wb_write"]
+__all__ = ["wb_read", "wb_write", "WishboneSubordinateSpec"]
 
 
 def wb_read(bus, addr, sel, timeout=32):
@@ -41,3 +42,70 @@ def wb_write(bus, addr, data, sel, timeout=32):
     yield bus.cyc.eq(0)
     yield bus.stb.eq(0)
     yield bus.we.eq(0)
+
+
+class WishboneSubordinateSpec(Elaboratable):
+    def __init__(self, bus):
+        if not isinstance(bus, wishbone.Interface):
+            raise TypeError("Bus must be an instance of wishbone.Interface, not {!r}"
+                            .format(bus))
+        self.bus = bus
+
+    def elaborate(self, platform):
+        m = Module()
+
+        with m.If(Initial()):
+            m.d.comb += [
+                Assume(~self.bus.cyc),
+                Assume(~self.bus.stb),
+                Assert(~self.bus.ack),
+            ]
+
+        with m.If(~self.bus.cyc & ~Past(self.bus.cyc)):
+            m.d.comb += Assert(~self.bus.ack)
+
+        with m.If(Past(self.bus.cyc) & Past(self.bus.stb)):
+            # Assume that input signals are held until the transaction is acknowledged.
+            with m.If(~Past(self.bus.ack)):
+                m.d.comb += [
+                    Assume(self.bus.adr   == Past(self.bus.adr)),
+                    Assume(self.bus.we    == Past(self.bus.we)),
+                    Assume(self.bus.dat_w == Past(self.bus.dat_w)),
+                    Assume(self.bus.sel   == Past(self.bus.sel)),
+                ]
+                if hasattr(self.bus, "cti"):
+                    m.d.comb += Assume(self.bus.cti == Past(self.bus.cti))
+                if hasattr(self.bus, "bte"):
+                    m.d.comb += Assume(self.bus.bte == Past(self.bus.bte))
+
+            if hasattr(self.bus, "cti"):
+                # The previous transaction was acknowledged, and this is an incrementing burst.
+                with m.Elif(Past(self.bus.cti) == wishbone.CycleType.INCR_BURST):
+                    if hasattr(self.bus, "bte"):
+                        with m.Switch(self.bus.bte):
+                            with m.Case(wishbone.BurstTypeExt.LINEAR):
+                                m.d.comb += Assume(self.bus.adr == Past(self.bus.adr) + 1)
+                            with m.Case(wishbone.BurstTypeExt.WRAP_4):
+                                m.d.comb += [
+                                    Assume(self.bus.adr[:2] == Past(self.bus.adr)[:2] + 1),
+                                    Assume(self.bus.adr[2:] == Past(self.bus.adr)[2:]),
+                                ]
+                            with m.Case(wishbone.BurstTypeExt.WRAP_8):
+                                m.d.comb += [
+                                    Assume(self.bus.adr[:3] == Past(self.bus.adr)[:3] + 1),
+                                    Assume(self.bus.adr[3:] == Past(self.bus.adr)[3:]),
+                                ]
+                            with m.Case(wishbone.BurstTypeExt.WRAP_16):
+                                m.d.comb += [
+                                    Assume(self.bus.adr[:4] == Past(self.bus.adr)[:4] + 1),
+                                    Assume(self.bus.adr[4:] == Past(self.bus.adr)[4:]),
+                                ]
+                    else:
+                        m.d.comb += Assume(self.bus.adr == Past(self.bus.adr) + 1)
+
+            # The previous transaction was acknowledged, and this is either not a burst, or the
+            # end of a burst.
+            with m.Else():
+                m.d.comb += Assume(~self.bus.cyc)
+
+        return m