add option to set small cache sizes in
[soc.git] / src / soc / experiment / icache.py
index f9d7f64c2ee1b33178dad74073f87e6a654b7100..ff45332d8fd9a5460464468fcefbffd9e808306f 100644 (file)
@@ -63,202 +63,216 @@ from nmigen.cli import main, rtlil
 # Also, check out the cxxsim nmigen branch, and latest yosys from git
 from nmutil.sim_tmp_alternative import Simulator, Settle
 
+# from microwatt/utils.vhdl
+def ispow2(n):
+    return n != 0 and (n & (n - 1)) == 0
 
 SIM            = 0
-LINE_SIZE      = 64
-# BRAM organisation: We never access more than wishbone_data_bits
-# at a time so to save resources we make the array only that wide,
-# and use consecutive indices for to make a cache "line"
-#
-# ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
-ROW_SIZE       = WB_DATA_BITS // 8
-# Number of lines in a set
-NUM_LINES      = 64
-# Number of ways
-NUM_WAYS       = 2
-# L1 ITLB number of entries (direct mapped)
-TLB_SIZE       = 64
-# L1 ITLB log_2(page_size)
-TLB_LG_PGSZ    = 12
-# Number of real address bits that we store
-REAL_ADDR_BITS = 56
 # Non-zero to enable log data collection
 LOG_LENGTH     = 0
 
-ROW_SIZE_BITS  = ROW_SIZE * 8
-# ROW_PER_LINE is the number of row (wishbone) transactions in a line
-ROW_PER_LINE   = LINE_SIZE // ROW_SIZE
-# BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
-BRAM_ROWS      = NUM_LINES * ROW_PER_LINE
-# INSN_PER_ROW is the number of 32bit instructions per BRAM row
-INSN_PER_ROW   = ROW_SIZE_BITS // 32
-
-# Bit fields counts in the address
-#
-# INSN_BITS is the number of bits to select an instruction in a row
-INSN_BITS      = log2_int(INSN_PER_ROW)
-# ROW_BITS is the number of bits to select a row
-ROW_BITS       = log2_int(BRAM_ROWS)
-# ROW_LINE_BITS is the number of bits to select a row within a line
-ROW_LINE_BITS  = log2_int(ROW_PER_LINE)
-# LINE_OFF_BITS is the number of bits for the offset in a cache line
-LINE_OFF_BITS  = log2_int(LINE_SIZE)
-# ROW_OFF_BITS is the number of bits for the offset in a row
-ROW_OFF_BITS   = log2_int(ROW_SIZE)
-# INDEX_BITS is the number of bits to select a cache line
-INDEX_BITS     = log2_int(NUM_LINES)
-# SET_SIZE_BITS is the log base 2 of the set size
-SET_SIZE_BITS  = LINE_OFF_BITS + INDEX_BITS
-# TAG_BITS is the number of bits of the tag part of the address
-TAG_BITS       = REAL_ADDR_BITS - SET_SIZE_BITS
-# TAG_WIDTH is the width in bits of each way of the tag RAM
-TAG_WIDTH      = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
-
-# WAY_BITS is the number of bits to select a way
-WAY_BITS       = log2_int(NUM_WAYS)
-TAG_RAM_WIDTH  = TAG_BITS * NUM_WAYS
-
-# L1 ITLB
-TLB_BITS        = log2_int(TLB_SIZE)
-TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_BITS)
-TLB_PTE_BITS    = 64
-
-print("BRAM_ROWS       =", BRAM_ROWS)
-print("INDEX_BITS      =", INDEX_BITS)
-print("INSN_BITS       =", INSN_BITS)
-print("INSN_PER_ROW    =", INSN_PER_ROW)
-print("LINE_SIZE       =", LINE_SIZE)
-print("LINE_OFF_BITS   =", LINE_OFF_BITS)
-print("LOG_LENGTH      =", LOG_LENGTH)
-print("NUM_LINES       =", NUM_LINES)
-print("NUM_WAYS        =", NUM_WAYS)
-print("REAL_ADDR_BITS  =", REAL_ADDR_BITS)
-print("ROW_BITS        =", ROW_BITS)
-print("ROW_OFF_BITS    =", ROW_OFF_BITS)
-print("ROW_LINE_BITS   =", ROW_LINE_BITS)
-print("ROW_PER_LINE    =", ROW_PER_LINE)
-print("ROW_SIZE        =", ROW_SIZE)
-print("ROW_SIZE_BITS   =", ROW_SIZE_BITS)
-print("SET_SIZE_BITS   =", SET_SIZE_BITS)
-print("SIM             =", SIM)
-print("TAG_BITS        =", TAG_BITS)
-print("TAG_RAM_WIDTH   =", TAG_RAM_WIDTH)
-print("TAG_BITS        =", TAG_BITS)
-print("TLB_BITS        =", TLB_BITS)
-print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS)
-print("TLB_LG_PGSZ     =", TLB_LG_PGSZ)
-print("TLB_PTE_BITS    =", TLB_PTE_BITS)
-print("TLB_SIZE        =", TLB_SIZE)
-print("WAY_BITS        =", WAY_BITS)
-
-# from microwatt/utils.vhdl
-def ispow2(n):
-    return n != 0 and (n & (n - 1)) == 0
-
-assert LINE_SIZE % ROW_SIZE == 0
-assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
-assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
-assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
-assert ispow2(INSN_PER_ROW), "INSN_PER_ROW not power of 2"
-assert (ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)), \
-    "geometry bits don't add up"
-assert (LINE_OFF_BITS == (ROW_OFF_BITS + ROW_LINE_BITS)), \
-   "geometry bits don't add up"
-assert (REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS)), \
-    "geometry bits don't add up"
-assert (REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)), \
-    "geometry bits don't add up"
-
-# Example of layout for 32 lines of 64 bytes:
-#
-# ..  tag    |index|  line  |
-# ..         |   row   |    |
-# ..         |     |   | |00| zero          (2)
-# ..         |     |   |-|  | INSN_BITS     (1)
-# ..         |     |---|    | ROW_LINE_BITS  (3)
-# ..         |     |--- - --| LINE_OFF_BITS (6)
-# ..         |         |- --| ROW_OFF_BITS  (3)
-# ..         |----- ---|    | ROW_BITS      (8)
-# ..         |-----|        | INDEX_BITS    (5)
-# .. --------|              | TAG_BITS      (53)
-
-# The cache data BRAM organized as described above for each way
-#subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
-#
-def RowPerLineValidArray():
-    return Array(Signal(name="rows_valid_%d" %x) \
-                 for x in range(ROW_PER_LINE))
-
-
-# TODO to be passed to nigmen as ram attributes
-# attribute ram_style : string;
-# attribute ram_style of cache_tags : signal is "distributed";
-
-def TLBRecord(name):
-    tlb_layout = [ ('tag', TLB_EA_TAG_BITS),
-                  ('pte', TLB_PTE_BITS)
-                 ]
-    return Record(tlb_layout, name=name)
-
-def TLBArray():
-    return Array(TLBRecord("tlb%d" % x) for x in range(TLB_SIZE))
-
-# PLRU output interface
-def PLRUOut():
-    return Array(Signal(WAY_BITS, name="plru_out_%d" %x) \
-                 for x in range(NUM_LINES))
-
-# Return the cache line index (tag index) for an address
-def get_index(addr):
-    return addr[LINE_OFF_BITS:SET_SIZE_BITS]
-
-# Return the cache row index (data memory) for an address
-def get_row(addr):
-    return addr[ROW_OFF_BITS:SET_SIZE_BITS]
-
-# Return the index of a row within a line
-def get_row_of_line(row):
-    return row[:ROW_BITS][:ROW_LINE_BITS]
-
-# Returns whether this is the last row of a line
-def is_last_row_addr(addr, last):
-    return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
-
-# Returns whether this is the last row of a line
-def is_last_row(row, last):
-    return get_row_of_line(row) == last
-
-# Return the next row in the current cache line. We use a dedicated
-# function in order to limit the size of the generated adder to be
-# only the bits within a cache line (3 bits with default settings)
-def next_row(row):
-    row_v = row[0:ROW_LINE_BITS] + 1
-    return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
-
-# Read the instruction word for the given address
-# in the current cache row
-def read_insn_word(addr, data):
-    word = addr[2:INSN_BITS+2]
-    return data.word_select(word, 32)
-
-# Get the tag value from the address
-def get_tag(addr):
-    return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
-
-# Read a tag from a tag memory row
-def read_tag(way, tagset):
-    return tagset.word_select(way, TAG_BITS)
-
-# Write a tag to tag memory row
-def write_tag(way, tagset, tag):
-    return read_tag(way, tagset).eq(tag)
-
-# Simple hash for direct-mapped TLB index
-def hash_ea(addr):
-    hsh = (addr[TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_BITS] ^
-           addr[TLB_LG_PGSZ + TLB_BITS:TLB_LG_PGSZ + 2 * TLB_BITS ] ^
-           addr[TLB_LG_PGSZ + 2 * TLB_BITS:TLB_LG_PGSZ + 3 * TLB_BITS])
-    return hsh
+class ICacheConfig:
+    def __init__(self, XLEN          = 64,
+                       LINE_SIZE     = 64,
+                       NUM_LINES     = 64,  # Number of lines in a set
+                       NUM_WAYS      = 2,  # Number of ways
+                       TLB_SIZE      = 64,  # L1 ITLB number of entries
+                       TLB_LG_PGSZ   = 12): # L1 ITLB log_2(page_size)
+        self.XLEN           = XLEN
+        self.LINE_SIZE      = LINE_SIZE
+        self.NUM_LINES      = NUM_LINES
+        self.NUM_WAYS       = NUM_WAYS
+        self.TLB_SIZE       = TLB_SIZE
+        self.TLB_LG_PGSZ    = TLB_LG_PGSZ
+
+        # BRAM organisation: We never access more than wishbone_data_bits
+        # at a time so to save resources we make the array only that wide,
+        # and use consecutive indices for to make a cache "line"
+        #
+        # self.ROW_SIZE is the width in bytes of the BRAM
+        # (based on WB, so 64-bits)
+        self.ROW_SIZE       = WB_DATA_BITS // 8
+        # Number of real address bits that we store
+        self.REAL_ADDR_BITS = XLEN-8 # 56 for XLEN=64
+
+        self.ROW_SIZE_BITS  = self.ROW_SIZE * 8
+        # ROW_PER_LINE is the number of row (wishbone) transactions in a line
+        self.ROW_PER_LINE   = self.LINE_SIZE // self.ROW_SIZE
+        # BRAM_ROWS is the number of rows in BRAM
+        # needed to represent the full icache
+        self.BRAM_ROWS      = self.NUM_LINES * self.ROW_PER_LINE
+        # INSN_PER_ROW is the number of 32bit instructions per BRAM row
+        self.INSN_PER_ROW   = self.ROW_SIZE_BITS // 32
+
+        # Bit fields counts in the address
+        #
+        # INSN_BITS is the number of bits to select an instruction in a row
+        self.INSN_BITS      = log2_int(self.INSN_PER_ROW)
+        # ROW_BITS is the number of bits to select a row
+        self.ROW_BITS       = log2_int(self.BRAM_ROWS)
+        # ROW_LINE_BITS is the number of bits to select a row within a line
+        self.ROW_LINE_BITS  = log2_int(self.ROW_PER_LINE)
+        # LINE_OFF_BITS is the number of bits for the offset in a cache line
+        self.LINE_OFF_BITS  = log2_int(self.LINE_SIZE)
+        # ROW_OFF_BITS is the number of bits for the offset in a row
+        self.ROW_OFF_BITS   = log2_int(self.ROW_SIZE)
+        # INDEX_BITS is the number of bits to select a cache line
+        self.INDEX_BITS     = log2_int(self.NUM_LINES)
+        # SET_SIZE_BITS is the log base 2 of the set size
+        self.SET_SIZE_BITS  = self.LINE_OFF_BITS + self.INDEX_BITS
+        # TAG_BITS is the number of bits of the tag part of the address
+        self.TAG_BITS       = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
+        # TAG_WIDTH is the width in bits of each way of the tag RAM
+        self.TAG_WIDTH      = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
+
+        # WAY_BITS is the number of bits to select a way
+        self.WAY_BITS       = log2_int(self.NUM_WAYS)
+        self.TAG_RAM_WIDTH  = self.TAG_BITS * self.NUM_WAYS
+
+        # L1 ITLB
+        self.TL_BITS        = log2_int(self.TLB_SIZE)
+        self.TLB_EA_TAG_BITS = XLEN - (self.TLB_LG_PGSZ + self.TL_BITS)
+        self.TLB_PTE_BITS    = XLEN
+
+        print("self.XLEN            =", self.XLEN)
+        print("self.BRAM_ROWS       =", self.BRAM_ROWS)
+        print("self.INDEX_BITS      =", self.INDEX_BITS)
+        print("self.INSN_BITS       =", self.INSN_BITS)
+        print("self.INSN_PER_ROW    =", self.INSN_PER_ROW)
+        print("self.LINE_SIZE       =", self.LINE_SIZE)
+        print("self.LINE_OFF_BITS   =", self.LINE_OFF_BITS)
+        print("LOG_LENGTH      =", LOG_LENGTH)
+        print("self.NUM_LINES       =", self.NUM_LINES)
+        print("self.NUM_WAYS        =", self.NUM_WAYS)
+        print("self.REAL_ADDR_BITS  =", self.REAL_ADDR_BITS)
+        print("self.ROW_BITS        =", self.ROW_BITS)
+        print("self.ROW_OFF_BITS    =", self.ROW_OFF_BITS)
+        print("self.ROW_LINE_BITS   =", self.ROW_LINE_BITS)
+        print("self.ROW_PER_LINE    =", self.ROW_PER_LINE)
+        print("self.ROW_SIZE        =", self.ROW_SIZE)
+        print("self.ROW_SIZE_BITS   =", self.ROW_SIZE_BITS)
+        print("self.SET_SIZE_BITS   =", self.SET_SIZE_BITS)
+        print("SIM             =", SIM)
+        print("self.TAG_BITS        =", self.TAG_BITS)
+        print("self.TAG_RAM_WIDTH   =", self.TAG_RAM_WIDTH)
+        print("self.TAG_BITS        =", self.TAG_BITS)
+        print("self.TL_BITS        =", self.TL_BITS)
+        print("self.TLB_EA_TAG_BITS =", self.TLB_EA_TAG_BITS)
+        print("self.TLB_LG_PGSZ     =", self.TLB_LG_PGSZ)
+        print("self.TLB_PTE_BITS    =", self.TLB_PTE_BITS)
+        print("self.TLB_SIZE        =", self.TLB_SIZE)
+        print("self.WAY_BITS        =", self.WAY_BITS)
+        print()
+
+        assert self.LINE_SIZE % self.ROW_SIZE == 0
+        assert ispow2(self.LINE_SIZE), "self.LINE_SIZE not power of 2"
+        assert ispow2(self.NUM_LINES), "self.NUM_LINES not power of 2"
+        assert ispow2(self.ROW_PER_LINE), "self.ROW_PER_LINE not power of 2"
+        assert ispow2(self.INSN_PER_ROW), "self.INSN_PER_ROW not power of 2"
+        assert (self.ROW_BITS == (self.INDEX_BITS + self.ROW_LINE_BITS)), \
+            "geometry bits don't add up"
+        assert (self.LINE_OFF_BITS ==
+            (self.ROW_OFF_BITS + self.ROW_LINE_BITS)), \
+           "geometry bits don't add up"
+        assert (self.REAL_ADDR_BITS ==
+            (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS)), \
+            "geometry bits don't add up"
+        assert (self.REAL_ADDR_BITS ==
+            (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS)), \
+            "geometry bits don't add up"
+
+        # Example of layout for 32 lines of 64 bytes:
+        #
+        # ..  tag    |index|  line  |
+        # ..         |   row   |    |
+        # ..         |     |   | |00| zero          (2)
+        # ..         |     |   |-|  | self.INSN_BITS     (1)
+        # ..         |     |---|    | self.ROW_LINE_BITS  (3)
+        # ..         |     |--- - --| self.LINE_OFF_BITS (6)
+        # ..         |         |- --| self.ROW_OFF_BITS  (3)
+        # ..         |----- ---|    | self.ROW_BITS      (8)
+        # ..         |-----|        | self.INDEX_BITS    (5)
+        # .. --------|              | self.TAG_BITS      (53)
+
+    # The cache data BRAM organized as described above for each way
+    #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
+    #
+    def RowPerLineValidArray(self):
+        return Array(Signal(name="rows_valid_%d" %x) \
+                     for x in range(self.ROW_PER_LINE))
+
+
+    # TODO to be passed to nigmen as ram attributes
+    # attribute ram_style : string;
+    # attribute ram_style of cache_tags : signal is "distributed";
+
+    def TLBRecord(self, name):
+        tlb_layout = [ ('tag', self.TLB_EA_TAG_BITS),
+                      ('pte', self.TLB_PTE_BITS)
+                     ]
+        return Record(tlb_layout, name=name)
+
+    def TLBArray(self):
+        return Array(self.TLBRecord("tlb%d" % x) for x in range(self.TLB_SIZE))
+
+    # PLRU output interface
+    def PLRUOut(self):
+        return Array(Signal(self.WAY_BITS, name="plru_out_%d" %x) \
+                     for x in range(self.NUM_LINES))
+
+    # Return the cache line index (tag index) for an address
+    def get_index(self, addr):
+        return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
+
+    # Return the cache row index (data memory) for an address
+    def get_row(self, addr):
+        return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
+
+    # Return the index of a row within a line
+    def get_row_of_line(self, row):
+        return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
+
+    # Returns whether this is the last row of a line
+    def is_last_row_addr(self, addr, last):
+        return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
+
+    # Returns whether this is the last row of a line
+    def is_last_row(self, row, last):
+        return self.get_row_of_line(row) == last
+
+    # Return the next row in the current cache line. We use a dedicated
+    # function in order to limit the size of the generated adder to be
+    # only the bits within a cache line (3 bits with default settings)
+    def next_row(self, row):
+        row_v = row[0:self.ROW_LINE_BITS] + 1
+        return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
+
+    # Read the instruction word for the given address
+    # in the current cache row
+    def read_insn_word(self, addr, data):
+        word = addr[2:self.INSN_BITS+2]
+        return data.word_select(word, 32)
+
+    # Get the tag value from the address
+    def get_tag(self, addr):
+        return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
+
+    # Read a tag from a tag memory row
+    def read_tag(self, way, tagset):
+        return tagset.word_select(way, self.TAG_BITS)
+
+    # Write a tag to tag memory row
+    def write_tag(self, way, tagset, tag):
+        return self.read_tag(way, tagset).eq(tag)
+
+    # Simple hash for direct-mapped TLB index
+    def hash_ea(self, addr):
+        hsh = (addr[self.TLB_LG_PGSZ:self.TLB_LG_PGSZ + self.TL_BITS] ^
+               addr[self.TLB_LG_PGSZ + self.TL_BITS:
+                    self.TLB_LG_PGSZ + 2 * self.TL_BITS ] ^
+               addr[self.TLB_LG_PGSZ + 2 * self.TL_BITS:
+                    self.TLB_LG_PGSZ + 3 * self.TL_BITS])
+        return hsh
 
 
 # Cache reload state machine
@@ -270,10 +284,10 @@ class State(Enum):
 
 
 class RegInternal(RecordObject):
-    def __init__(self):
+    def __init__(self, cfg):
         super().__init__()
         # Cache hit state (Latches for 1 cycle BRAM access)
-        self.hit_way      = Signal(WAY_BITS)
+        self.hit_way      = Signal(cfg.WAY_BITS)
         self.hit_nia      = Signal(64)
         self.hit_smark    = Signal()
         self.hit_valid    = Signal()
@@ -282,19 +296,19 @@ class RegInternal(RecordObject):
         self.state        = Signal(State, reset=State.IDLE)
         self.wb           = WBMasterOut("wb")
         self.req_adr      = Signal(64)
-        self.store_way    = Signal(WAY_BITS)
-        self.store_index  = Signal(INDEX_BITS)
-        self.store_row    = Signal(ROW_BITS)
-        self.store_tag    = Signal(TAG_BITS)
+        self.store_way    = Signal(cfg.WAY_BITS)
+        self.store_index  = Signal(cfg.INDEX_BITS)
+        self.store_row    = Signal(cfg.ROW_BITS)
+        self.store_tag    = Signal(cfg.TAG_BITS)
         self.store_valid  = Signal()
-        self.end_row_ix   = Signal(ROW_LINE_BITS)
-        self.rows_valid   = RowPerLineValidArray()
+        self.end_row_ix   = Signal(cfg.ROW_LINE_BITS)
+        self.rows_valid   = cfg.RowPerLineValidArray()
 
         # TLB miss state
         self.fetch_failed = Signal()
 
 
-class ICache(FetchUnitInterface, Elaboratable):
+class ICache(FetchUnitInterface, Elaboratable, ICacheConfig):
     """64 bit direct mapped icache. All instructions are 4B aligned."""
     def __init__(self, pspec):
         FetchUnitInterface.__init__(self, pspec)
@@ -313,7 +327,7 @@ class ICache(FetchUnitInterface, Elaboratable):
                             data_width=64,
                             granularity=8,
                             features={'stall'},
-                            alignment=0,
+                            #alignment=0,
                             name="icache_wb")
 
         self.log_out        = Signal(54)
@@ -321,6 +335,34 @@ class ICache(FetchUnitInterface, Elaboratable):
         # use FetchUnitInterface, helps keep some unit tests running
         self.use_fetch_iface = False
 
+        # test if small cache to be enabled
+        self.small_cache = (hasattr(pspec, "small_cache") and
+                                 (pspec.small_cache == True))
+        # test if microwatt compatibility to be enabled
+        self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
+                                 (pspec.microwatt_compat == True))
+
+        XLEN = pspec.XLEN
+        LINE_SIZE = 64
+        TLB_SIZE = 16
+        NUM_LINES = 16
+        NUM_WAYS = 2
+        if self.small_cache:
+            # reduce way sizes and num lines to ridiculously small
+            NUM_LINES = 2
+            NUM_WAYS = 1
+            TLB_SIZE = 2
+        if self.microwatt_compat:
+            # reduce way sizes
+            NUM_WAYS = 1
+
+        ICacheConfig.__init__(self, LINE_SIZE=LINE_SIZE,
+                                    XLEN=XLEN,
+                                    NUM_LINES = NUM_LINES,
+                                    NUM_WAYS = NUM_WAYS,
+                                    TLB_SIZE=TLB_SIZE
+                             )
+
     def use_fetch_interface(self):
         self.use_fetch_iface = True
 
@@ -337,25 +379,26 @@ class ICache(FetchUnitInterface, Elaboratable):
         do_read  = Signal()
         comb += do_read.eq(~(stall_in | use_previous))
 
-        rd_addr  = Signal(ROW_BITS)
-        wr_addr  = Signal(ROW_BITS)
+        rd_addr  = Signal(self.ROW_BITS)
+        wr_addr  = Signal(self.ROW_BITS)
         comb += rd_addr.eq(req_row)
         comb += wr_addr.eq(r.store_row)
 
         # binary-to-unary converters: replace-way enabled by bus.ack,
         # hit-way left permanently enabled
-        m.submodules.replace_way_e = re = Decoder(NUM_WAYS)
-        m.submodules.hit_way_e = he = Decoder(NUM_WAYS)
+        m.submodules.replace_way_e = re = Decoder(self.NUM_WAYS)
+        m.submodules.hit_way_e = he = Decoder(self.NUM_WAYS)
         comb += re.i.eq(replace_way)
         comb += re.n.eq(~bus.ack)
         comb += he.i.eq(r.hit_way)
 
-        for i in range(NUM_WAYS):
+        for i in range(self.NUM_WAYS):
             do_write = Signal(name="do_wr_%d" % i)
-            d_out    = Signal(ROW_SIZE_BITS, name="d_out_%d" % i)
-            wr_sel   = Signal(ROW_SIZE, name="wr_sel_%d" % i)
+            d_out    = Signal(self.ROW_SIZE_BITS, name="d_out_%d" % i)
+            wr_sel   = Signal(self.ROW_SIZE, name="wr_sel_%d" % i)
 
-            way = CacheRam(ROW_BITS, ROW_SIZE_BITS, TRACE=True, ram_num=i)
+            way = CacheRam(self.ROW_BITS, self.ROW_SIZE_BITS,
+                           TRACE=True, ram_num=i)
             m.submodules["cacheram_%d" % i] =  way
 
             comb += way.rd_en.eq(do_read)
@@ -377,20 +420,21 @@ class ICache(FetchUnitInterface, Elaboratable):
                     sync += Display("cache read adr: %x data: %x",
                                      req_row, d_out)
 
-            comb += wr_sel.eq(Repl(do_write, ROW_SIZE))
+            comb += wr_sel.eq(Repl(do_write, self.ROW_SIZE))
 
     # Generate PLRUs
     def maybe_plrus(self, m, r, plru_victim):
         comb = m.d.comb
 
-        if NUM_WAYS == 0:
+        if self.NUM_WAYS == 0:
             return
 
 
-        m.submodules.plrus = plru = PLRUs(NUM_LINES, WAY_BITS)
+        m.submodules.plrus = plru = PLRUs("itag", self.NUM_LINES,
+                                                  self.WAY_BITS)
         comb += plru.way.eq(r.hit_way)
         comb += plru.valid.eq(r.hit_valid)
-        comb += plru.index.eq(get_index(r.hit_nia))
+        comb += plru.index.eq(self.get_index(r.hit_nia))
         comb += plru.isel.eq(r.store_index) # select victim
         comb += plru_victim.eq(plru.o_index) # selected victim
 
@@ -405,24 +449,24 @@ class ICache(FetchUnitInterface, Elaboratable):
 
         # use an *asynchronous* Memory read port here (combinatorial)
         m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
-        tlb = TLBRecord("tlb_rdport")
+        tlb = self.TLBRecord("tlb_rdport")
         pte, ttag = tlb.pte, tlb.tag
 
-        comb += tlb_req_index.eq(hash_ea(i_in.nia))
+        comb += tlb_req_index.eq(self.hash_ea(i_in.nia))
         comb += rd_tlb.addr.eq(tlb_req_index)
         comb += tlb.eq(rd_tlb.data)
 
         with m.If(i_in.virt_mode):
-            comb += real_addr.eq(Cat(i_in.nia[:TLB_LG_PGSZ],
-                                     pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
+            comb += real_addr.eq(Cat(i_in.nia[:self.TLB_LG_PGSZ],
+                                     pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
 
-            with m.If(ttag == i_in.nia[TLB_LG_PGSZ + TLB_BITS:64]):
+            with m.If(ttag == i_in.nia[self.TLB_LG_PGSZ + self.TL_BITS:64]):
                 comb += ra_valid.eq(itlb_valid.q.bit_select(tlb_req_index, 1))
 
             comb += eaa_priv.eq(pte[3])
 
         with m.Else():
-            comb += real_addr.eq(i_in.nia[:REAL_ADDR_BITS])
+            comb += real_addr.eq(i_in.nia[:self.REAL_ADDR_BITS])
             comb += ra_valid.eq(1)
             comb += eaa_priv.eq(1)
 
@@ -437,9 +481,9 @@ class ICache(FetchUnitInterface, Elaboratable):
 
         m_in = self.m_in
 
-        wr_index = Signal(TLB_BITS)
-        wr_unary = Signal(TLB_SIZE)
-        comb += wr_index.eq(hash_ea(m_in.addr))
+        wr_index = Signal(self.TL_BITS)
+        wr_unary = Signal(self.TLB_SIZE)
+        comb += wr_index.eq(self.hash_ea(m_in.addr))
         comb += wr_unary.eq(1<<wr_index)
 
         m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
@@ -455,8 +499,8 @@ class ICache(FetchUnitInterface, Elaboratable):
             sync += itlb_valid.r.eq(wr_unary)
 
         with m.Elif(m_in.tlbld):
-            tlb = TLBRecord("tlb_wrport")
-            comb += tlb.tag.eq(m_in.addr[TLB_LG_PGSZ + TLB_BITS:64])
+            tlb = self.TLBRecord("tlb_wrport")
+            comb += tlb.tag.eq(m_in.addr[self.TLB_LG_PGSZ + self.TL_BITS:64])
             comb += tlb.pte.eq(m_in.pte)
             comb += wr_tlb.en.eq(1)
             comb += wr_tlb.addr.eq(wr_index)
@@ -477,47 +521,49 @@ class ICache(FetchUnitInterface, Elaboratable):
         flush_in, stall_out = self.flush_in, self.stall_out
 
         is_hit  = Signal()
-        hit_way = Signal(WAY_BITS)
+        hit_way = Signal(self.WAY_BITS)
 
         # i_in.sequential means that i_in.nia this cycle is 4 more than
         # last cycle.  If we read more than 32 bits at a time, had a
         # cache hit last cycle, and we don't want the first 32-bit chunk
         # then we can keep the data we read last cycle and just use that.
-        with m.If(i_in.nia[2:INSN_BITS+2] != 0):
+        with m.If(i_in.nia[2:self.INSN_BITS+2] != 0):
             comb += use_previous.eq(i_in.sequential & r.hit_valid)
 
         # Extract line, row and tag from request
-        comb += req_index.eq(get_index(i_in.nia))
-        comb += req_row.eq(get_row(i_in.nia))
-        comb += req_tag.eq(get_tag(real_addr))
+        comb += req_index.eq(self.get_index(i_in.nia))
+        comb += req_row.eq(self.get_row(i_in.nia))
+        comb += req_tag.eq(self.get_tag(real_addr))
 
         # Calculate address of beginning of cache row, will be
         # used for cache miss processing if needed
         comb += req_laddr.eq(Cat(
-                 Const(0, ROW_OFF_BITS),
-                 real_addr[ROW_OFF_BITS:REAL_ADDR_BITS],
+                 Const(0, self.ROW_OFF_BITS),
+                 real_addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS],
                 ))
 
         # Test if pending request is a hit on any way
         hitcond = Signal()
-        comb += hitcond.eq((r.state == State.WAIT_ACK)
-                 & (req_index == r.store_index)
-                 & r.rows_valid[req_row % ROW_PER_LINE]
+        rowvalid = Signal()
+        comb += rowvalid.eq(r.rows_valid[req_row % self.ROW_PER_LINE])
+        comb += hitcond.eq((r.state == State.WAIT_ACK) &
+                            (req_index == r.store_index) &
+                             rowvalid
                 )
         # i_in.req asserts Decoder active
-        cvb = Signal(NUM_WAYS)
-        ctag = Signal(TAG_RAM_WIDTH)
+        cvb = Signal(self.NUM_WAYS)
+        ctag = Signal(self.TAG_RAM_WIDTH)
         comb += rd_tag.addr.eq(req_index)
         comb += ctag.eq(rd_tag.data)
-        comb += cvb.eq(cache_valids.q.word_select(req_index, NUM_WAYS))
-        m.submodules.store_way_e = se = Decoder(NUM_WAYS)
+        comb += cvb.eq(cache_valids.q.word_select(req_index, self.NUM_WAYS))
+        m.submodules.store_way_e = se = Decoder(self.NUM_WAYS)
         comb += se.i.eq(r.store_way)
         comb += se.n.eq(~i_in.req)
-        for i in range(NUM_WAYS):
-            tagi = Signal(TAG_BITS, name="tag_i%d" % i)
+        for i in range(self.NUM_WAYS):
+            tagi = Signal(self.TAG_BITS, name="tag_i%d" % i)
             hit_test = Signal(name="hit_test%d" % i)
             is_tag_hit = Signal(name="is_tag_hit_%d" % i)
-            comb += tagi.eq(read_tag(i, ctag))
+            comb += tagi.eq(self.read_tag(i, ctag))
             comb += hit_test.eq(se.o[i])
             comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
                                   (tagi == req_tag))
@@ -547,7 +593,7 @@ class ICache(FetchUnitInterface, Elaboratable):
         # be output an entire row which I prefer not to do just yet
         # as it would force fetch2 to know about some of the cache
         # geometry information.
-        comb += i_out.insn.eq(read_insn_word(r.hit_nia, cache_out_row))
+        comb += i_out.insn.eq(self.read_insn_word(r.hit_nia, cache_out_row))
         comb += i_out.valid.eq(r.hit_valid)
         comb += i_out.nia.eq(r.hit_nia)
         comb += i_out.stop_mark.eq(r.hit_smark)
@@ -606,7 +652,7 @@ class ICache(FetchUnitInterface, Elaboratable):
         i_in = self.i_in
 
         # Reset per-row valid flags, only used in WAIT_ACK
-        for i in range(ROW_PER_LINE):
+        for i in range(self.ROW_PER_LINE):
             sync += r.rows_valid[i].eq(0)
 
         # We need to read a cache line
@@ -618,13 +664,13 @@ class ICache(FetchUnitInterface, Elaboratable):
                      replace_way, req_tag, real_addr)
 
             # Keep track of our index and way for subsequent stores
-            st_row = Signal(ROW_BITS)
-            comb += st_row.eq(get_row(req_laddr))
+            st_row = Signal(self.ROW_BITS)
+            comb += st_row.eq(self.get_row(req_laddr))
             sync += r.store_index.eq(req_index)
             sync += r.store_row.eq(st_row)
             sync += r.store_tag.eq(req_tag)
             sync += r.store_valid.eq(1)
-            sync += r.end_row_ix.eq(get_row_of_line(st_row) - 1)
+            sync += r.end_row_ix.eq(self.get_row_of_line(st_row) - 1)
 
             # Prep for first wishbone read.  We calculate the address
             # of the start of the cache line and start the WB cycle.
@@ -641,19 +687,19 @@ class ICache(FetchUnitInterface, Elaboratable):
         comb = m.d.comb
         sync = m.d.sync
         m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
-                                                    granularity=TAG_BITS)
+                                                    granularity=self.TAG_BITS)
 
         # Get victim way from plru
         sync += r.store_way.eq(replace_way)
 
         # Force misses on that way while reloading that line
-        idx = req_index*NUM_WAYS + replace_way # 2D index, 1st dim: NUM_WAYS
+        idx = req_index*self.NUM_WAYS + replace_way # 2D index, 1st dim: self.NUM_WAYS
         comb += cache_valids.r.eq(1<<idx)
 
         # use write-port "granularity" to select the tag to write to
         # TODO: the Memory should be multipled-up (by NUM_TAGS)
-        tagset = Signal(TAG_RAM_WIDTH)
-        comb += tagset.eq(r.store_tag << (replace_way*TAG_BITS))
+        tagset = Signal(self.TAG_RAM_WIDTH)
+        comb += tagset.eq(r.store_tag << (replace_way*self.TAG_BITS))
         comb += wr_tag.en.eq(1<<replace_way)
         comb += wr_tag.addr.eq(r.store_index)
         comb += wr_tag.data.eq(tagset)
@@ -661,62 +707,52 @@ class ICache(FetchUnitInterface, Elaboratable):
         sync += r.state.eq(State.WAIT_ACK)
 
     def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
-                             cache_valids, stbs_done):
+                             cache_valids):
         comb = m.d.comb
         sync = m.d.sync
 
         bus = self.bus
 
-        # Requests are all sent if stb is 0
-        stbs_zero = Signal()
-        comb += stbs_zero.eq(r.wb.stb == 0)
-        comb += stbs_done.eq(stbs_zero)
-
         # If we are still sending requests, was one accepted?
-        with m.If(~bus.stall & ~stbs_zero):
-            # That was the last word? We are done sending.
-            # Clear stb and set stbs_done so we can handle
-            # an eventual last ack on the same cycle.
-            with m.If(is_last_row_addr(r.req_adr, r.end_row_ix)):
+        with m.If(~bus.stall & r.wb.stb):
+            # That was the last word? We are done sending.  Clear stb
+            with m.If(self.is_last_row_addr(r.req_adr, r.end_row_ix)):
                 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
-                         "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x "
-                         "stbs_done:%x", r.wb.adr, r.end_row_ix,
-                         r.wb.stb, stbs_zero, stbs_done)
+                         "r.end_row_ix:%x r.wb.stb:%x",
+                         r.wb.adr, r.end_row_ix, r.wb.stb)
                 sync += r.wb.stb.eq(0)
-                comb += stbs_done.eq(1)
 
             # Calculate the next row address
-            rarange = Signal(LINE_OFF_BITS - ROW_OFF_BITS)
-            comb += rarange.eq(r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS] + 1)
-            sync += r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
+            rarange = Signal(self.LINE_OFF_BITS - self.ROW_OFF_BITS)
+            comb += rarange.eq(r.req_adr[self.ROW_OFF_BITS:
+                                         self.LINE_OFF_BITS] + 1)
+            sync += r.req_adr[self.ROW_OFF_BITS:self.LINE_OFF_BITS].eq(rarange)
             sync += Display("RARANGE r.req_adr:%x rarange:%x "
-                            "stbs_zero:%x stbs_done:%x",
-                            r.req_adr, rarange, stbs_zero, stbs_done)
+                            "r.wb.stb:%x",
+                            r.req_adr, rarange, r.wb.stb)
 
         # Incoming acks processing
         with m.If(bus.ack):
-            sync += Display("WB_IN_ACK data:%x stbs_zero:%x "
-                            "stbs_done:%x",
-                            bus.dat_r, stbs_zero, stbs_done)
+            sync += Display("WB_IN_ACK data:%x", bus.dat_r)
 
-            sync += r.rows_valid[r.store_row % ROW_PER_LINE].eq(1)
+            sync += r.rows_valid[r.store_row % self.ROW_PER_LINE].eq(1)
 
             # Check for completion
-            with m.If(stbs_done & is_last_row(r.store_row, r.end_row_ix)):
+            with m.If(self.is_last_row(r.store_row, r.end_row_ix)):
                 # Complete wishbone cycle
                 sync += r.wb.cyc.eq(0)
                 # be nice, clear addr
                 sync += r.req_adr.eq(0)
 
                 # Cache line is now valid
-                idx = r.store_index*NUM_WAYS + replace_way # 2D index again
+                idx = r.store_index*self.NUM_WAYS + replace_way # 2D index again
                 valid = r.store_valid & ~inval_in
                 comb += cache_valids.s.eq(1<<idx)
                 sync += r.state.eq(State.IDLE)
 
             # move on to next request in row
             # Increment store row counter
-            sync += r.store_row.eq(next_row(r.store_row))
+            sync += r.store_row.eq(self.next_row(r.store_row))
 
     # Cache miss/reload synchronous machine
     def icache_miss(self, m, r, req_is_miss,
@@ -729,8 +765,6 @@ class ICache(FetchUnitInterface, Elaboratable):
         stall_in, flush_in = self.stall_in, self.flush_in
         inval_in           = self.inval_in
 
-        stbs_done = Signal()
-
         comb += r.wb.sel.eq(-1)
         comb += r.wb.adr.eq(r.req_adr[3:])
 
@@ -754,7 +788,7 @@ class ICache(FetchUnitInterface, Elaboratable):
                                              cache_valids)
 
                 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
-                                          cache_valids, stbs_done)
+                                          cache_valids)
 
         # TLB miss and protection fault processing
         with m.If(flush_in | m_in.tlbld):
@@ -774,7 +808,7 @@ class ICache(FetchUnitInterface, Elaboratable):
         # Output data to logger
         for i in range(LOG_LENGTH):
             log_data = Signal(54)
-            lway     = Signal(WAY_BITS)
+            lway     = Signal(self.WAY_BITS)
             wstate   = Signal()
 
             sync += lway.eq(req_hit_way)
@@ -798,12 +832,13 @@ class ICache(FetchUnitInterface, Elaboratable):
 
         # Cache-Ways "valid" indicators.  this is a 2D Signal, by the
         # number of ways and the number of lines.
-        vec = SRLatch(sync=True, llen=NUM_WAYS*NUM_LINES, name="cachevalids")
+        vec = SRLatch(sync=True, llen=self.NUM_WAYS*self.NUM_LINES,
+                      name="cachevalids")
         m.submodules.cache_valids = cache_valids = vec
 
         # TLB Array
-        itlb            = TLBArray()
-        vec = SRLatch(sync=False, llen=TLB_SIZE, name="tlbvalids")
+        itlb            = self.TLBArray()
+        vec = SRLatch(sync=False, llen=self.TLB_SIZE, name="tlbvalids")
         m.submodules.itlb_valids = itlb_valid = vec
 
         # TODO to be passed to nmigen as ram attributes
@@ -813,31 +848,37 @@ class ICache(FetchUnitInterface, Elaboratable):
         # Privilege bit from PTE EAA field
         eaa_priv         = Signal()
 
-        r                = RegInternal()
+        r                = RegInternal(self)
 
         # Async signal on incoming request
-        req_index        = Signal(INDEX_BITS)
-        req_row          = Signal(ROW_BITS)
-        req_hit_way      = Signal(WAY_BITS)
-        req_tag          = Signal(TAG_BITS)
+        req_index        = Signal(self.INDEX_BITS)
+        req_row          = Signal(self.ROW_BITS)
+        req_hit_way      = Signal(self.WAY_BITS)
+        req_tag          = Signal(self.TAG_BITS)
         req_is_hit       = Signal()
         req_is_miss      = Signal()
         req_laddr        = Signal(64)
 
-        tlb_req_index    = Signal(TLB_BITS)
-        real_addr        = Signal(REAL_ADDR_BITS)
+        tlb_req_index    = Signal(self.TL_BITS)
+        real_addr        = Signal(self.REAL_ADDR_BITS)
         ra_valid         = Signal()
         priv_fault       = Signal()
         access_ok        = Signal()
         use_previous     = Signal()
 
-        cache_out_row    = Signal(ROW_SIZE_BITS)
+        cache_out_row    = Signal(self.ROW_SIZE_BITS)
 
-        plru_victim      = Signal(WAY_BITS)
-        replace_way      = Signal(WAY_BITS)
+        plru_victim      = Signal(self.WAY_BITS)
+        replace_way      = Signal(self.WAY_BITS)
 
-        self.tlbmem = Memory(depth=TLB_SIZE, width=TLB_EA_TAG_BITS+TLB_PTE_BITS)
-        self.tagmem = Memory(depth=NUM_LINES, width=TAG_RAM_WIDTH)
+        self.tlbmem = Memory(depth=self.TLB_SIZE,
+                             width=self.TLB_EA_TAG_BITS+self.TLB_PTE_BITS,
+                             #attrs={'syn_ramstyle': "block_ram"}
+                            )
+        self.tagmem = Memory(depth=self.NUM_LINES,
+                             width=self.TAG_RAM_WIDTH,
+                             #attrs={'syn_ramstyle': "block_ram"}
+                            )
 
         # call sub-functions putting everything together,
         # using shared signals established above
@@ -989,6 +1030,7 @@ def test_icache(mem):
     pspec = TestMemPspec(addr_wid=32,
                          mask_wid=8,
                          reg_wid=64,
+                         XLEN=32,
                          )
     dut    = ICache(pspec)
 
@@ -1023,6 +1065,7 @@ if __name__ == '__main__':
     from soc.config.test.test_loadstore import TestMemPspec
     pspec = TestMemPspec(addr_wid=64,
                          mask_wid=8,
+                         XLEN=32,
                          reg_wid=64,
                          )
     dut = ICache(pspec)