convert addr match into latched (SRLatch) version, activate on req_rel,
[soc.git] / src / scoreboard / addr_match.py
1 """ Load / Store partial address matcher
2
3 Loads and Stores do not need a full match (CAM), they need "good enough"
4 avoidance. Around 11 bits on a 64-bit address is "good enough".
5
6 The simplest way to use this module is to ignore not only the top bits,
7 but also the bottom bits as well: in this case (this RV64 processor),
8 enough to cover a DWORD (64-bit). that means ignore the bottom 4 bits,
9 due to the possibility of 64-bit LD/ST being misaligned.
10
11 To reiterate: the use of this module is an *optimisation*. All it has
12 to do is cover the cases that are *definitely* matches (by checking 11
13 bits or so), and if a few opportunities for parallel LD/STs are missed
14 because the top (or bottom) bits weren't checked, so what: all that
15 happens is: the mis-matched addresses are LD/STd on single-cycles. Big Deal.
16
17 However, if we wanted to enhance this algorithm (without using a CAM and
18 without using expensive comparators) probably the best way to do so would
19 be to turn the last 16 bits into a byte-level bitmap. LD/ST on a byte
20 would have 1 of the 16 bits set. LD/ST on a DWORD would have 8 of the 16
21 bits set (offset if the LD/ST was misaligned). TODO.
22
23 Notes:
24
25 > I have used bits <11:6> as they are not translated (4KB pages)
26 > and larger than a cache line (64 bytes).
27 > I have used bits <11:4> when the L1 cache was QuadW sized and
28 > the L2 cache was Line sized.
29 """
30
31 from nmigen.compat.sim import run_simulation
32 from nmigen.cli import verilog, rtlil
33 from nmigen import Module, Signal, Const, Array, Cat, Elaboratable
34
35 from nmutil.latch import latchregister, SRLatch
36
37
38 class PartialAddrMatch(Elaboratable):
39 """A partial address matcher
40 """
41 def __init__(self, n_adr, bitwid):
42 self.n_adr = n_adr
43 self.bitwid = bitwid
44 # inputs
45 self.addrs_i = Array(Signal(bitwid, name="addr") for i in range(n_adr))
46 self.addr_we_i = Signal(n_adr) # write-enable for incoming address
47 self.addr_en_i = Signal(n_adr) # address latched in
48 self.addr_rs_i = Signal(n_adr) # address deactivated
49
50 # output
51 self.addr_match_o = Array(Signal(n_adr, name="match_o") \
52 for i in range(n_adr))
53
54 def elaborate(self, platform):
55 m = Module()
56 return self._elaborate(m, platform)
57
58 def _elaborate(self, m, platform):
59 comb = m.d.comb
60 sync = m.d.sync
61
62 m.submodules.l = l = SRLatch(llen=self.n_adr, sync=False)
63 addrs_r = Array(Signal(self.bitwid, "a_r") for i in range(self.n_adr))
64
65 # latch set/reset
66 comb += l.s.eq(self.addr_en_i)
67 comb += l.r.eq(self.addr_rs_i)
68
69 # copy in addresses (and "enable" signals)
70 for i in range(self.n_adr):
71 latchregister(m, self.addrs_i[i], addrs_r[i], l.q[i])
72
73 # is there a clash, yes/no
74 for i in range(self.n_adr):
75 nomatch = []
76 for j in range(self.n_adr):
77 if i == j:
78 nomatch.append(Const(1)) # don't match against self!
79 else:
80 nomatch.append(addrs_r[i] != addrs_r[j])
81 comb += self.addr_match_o[i].eq(Cat(*nomatch) & l.q)
82
83 return m
84
85 def __iter__(self):
86 yield from self.addrs_i
87 yield self.addr_we_i
88 yield self.addr_en_i
89 yield from self.addr_match_o
90
91 def ports(self):
92 return list(self)
93
94
95 def part_addr_sim(dut):
96 yield dut.dest_i.eq(1)
97 yield dut.issue_i.eq(1)
98 yield
99 yield dut.issue_i.eq(0)
100 yield
101 yield dut.src1_i.eq(1)
102 yield dut.issue_i.eq(1)
103 yield
104 yield dut.issue_i.eq(0)
105 yield
106 yield dut.go_rd_i.eq(1)
107 yield
108 yield dut.go_rd_i.eq(0)
109 yield
110 yield dut.go_wr_i.eq(1)
111 yield
112 yield dut.go_wr_i.eq(0)
113 yield
114
115 def test_part_addr():
116 dut = PartialAddrMatch(3, 10)
117 vl = rtlil.convert(dut, ports=dut.ports())
118 with open("test_part_addr.il", "w") as f:
119 f.write(vl)
120
121 run_simulation(dut, part_addr_sim(dut), vcd_name='test_part_addr.vcd')
122
123 if __name__ == '__main__':
124 test_part_addr()