2 # Copyright 2018 ETH Zurich and University of Bologna.
3 # Copyright and related rights are licensed under the Solderpad Hardware
4 # License, Version 0.51 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 # http:#solderpad.org/licenses/SHL-0.51. Unless required by applicable law
7 # or agreed to in writing, software, hardware and materials distributed under
8 # this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
9 # CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 # specific language governing permissions and limitations under the License.
12 # Author: David Schaffenrath, TU Graz
13 # Author: Florian Zaruba, ETH Zurich
15 # Description: Hardware-PTW
17 /* verilator lint_off WIDTH */
21 from nmigen
import Const
, Signal
, Cat
, Module
22 from nmigen
.hdl
.ast
import ArrayProxy
23 from nmigen
.cli
import verilog
, rtlil
27 CONFIG_L1D_SIZE
= 32*1024
28 DCACHE_INDEX_WIDTH
= int(log2(CONFIG_L1D_SIZE
/ DCACHE_SET_ASSOC
))
29 DCACHE_TAG_WIDTH
= 56 - DCACHE_INDEX_WIDTH
34 self
.address_index
= Signal(DCACHE_INDEX_WIDTH
)
35 self
.address_tag
= Signal(DCACHE_TAG_WIDTH
)
36 self
.data_wdata
= Signal(64)
37 self
.data_req
= Signal()
38 self
.data_we
= Signal()
39 self
.data_be
= Signal(8)
40 self
.data_size
= Signal(2)
41 self
.kill_req
= Signal()
42 self
.tag_valid
= Signal()
45 return [self
.address_index
, self
.address_tag
,
46 self
.data_wdata
, self
.data_req
,
47 self
.data_we
, self
.data_be
, self
.data_size
,
48 self
.kill_req
, self
.tag_valid
,
53 self
.data_gnt
= Signal()
54 self
.data_rvalid
= Signal()
55 self
.data_rdata
= Signal(64)
58 return [ self
.data_gnt
, self
.data_rvalid
, self
.data_rdata
]
63 class PTE
: #(RecordObject):
65 self
.reserved
= Signal(10)
78 return Cat(*self
.ports())
81 if isinstance(x
, ArrayProxy
):
83 for o
in self
.ports():
84 i
= getattr(x
, o
.name
)
89 return self
.flatten().eq(x
)
92 return [self
.reserved
, self
.ppn
, self
.rsw
, self
.d
, self
.a
, self
.g
,
93 self
.u
, self
.x
, self
.w
, self
.r
, self
.v
]
98 self
.valid
= Signal() # valid flag
100 self
.is_1G
= Signal()
101 self
.vpn
= Signal(27)
102 self
.asid
= Signal(ASID_WIDTH
)
106 return Cat(*self
.ports())
109 return self
.flatten().eq(x
.flatten())
112 return [self
.valid
, self
.is_2M
, self
.is_1G
, self
.vpn
, self
.asid
] + \
115 # SV39 defines three levels of page tables
116 LVL1
= Const(0, 2) # defined to 0 so that ptw_lvl default-resets to LVL1
123 self
.flush_i
= Signal() # flush everything, we need to do this because
124 # actually everything we do is speculative at this stage
125 # e.g.: there could be a CSR instruction that changes everything
126 self
.ptw_active_o
= Signal(reset
=1) # active if not IDLE
127 self
.walking_instr_o
= Signal() # set when walking for TLB
128 self
.ptw_error_o
= Signal() # set when an error occurred
129 self
.enable_translation_i
= Signal() # CSRs indicate to enable SV39
130 self
.en_ld_st_translation_i
= Signal() # enable VM translation for ld/st
132 self
.lsu_is_store_i
= Signal() # translation triggered by store
133 # PTW memory interface
134 self
.req_port_i
= DCacheReqO()
135 self
.req_port_o
= DCacheReqI()
137 # to TLBs, update logic
138 self
.itlb_update_o
= TLBUpdate()
139 self
.dtlb_update_o
= TLBUpdate()
141 self
.update_vaddr_o
= Signal(39)
143 self
.asid_i
= Signal(ASID_WIDTH
)
146 self
.itlb_access_i
= Signal()
147 self
.itlb_hit_i
= Signal()
148 self
.itlb_vaddr_i
= Signal(64)
150 self
.dtlb_access_i
= Signal()
151 self
.dtlb_hit_i
= Signal()
152 self
.dtlb_vaddr_i
= Signal(64)
154 self
.satp_ppn_i
= Signal(44) # ppn from satp
155 self
.mxr_i
= Signal()
156 # Performance counters
157 self
.itlb_miss_o
= Signal()
158 self
.dtlb_miss_o
= Signal()
161 return [self
.ptw_active_o
, self
.walking_instr_o
, self
.ptw_error_o
,
164 self
.enable_translation_i
, self
.en_ld_st_translation_i
,
165 self
.lsu_is_store_i
, self
.req_port_i
, self
.req_port_o
,
168 self
.itlb_access_i
, self
.itlb_hit_i
, self
.itlb_vaddr_i
,
169 self
.dtlb_access_i
, self
.dtlb_hit_i
, self
.dtlb_vaddr_i
,
170 self
.satp_ppn_i
, self
.mxr_i
,
171 self
.itlb_miss_o
, self
.dtlb_miss_o
172 ] + self
.itlb_update_o
.ports() + self
.dtlb_update_o
.ports()
174 def elaborate(self
, platform
):
178 data_rvalid
= Signal()
179 data_rdata
= Signal(64)
182 m
.d
.comb
+= pte
.flatten().eq(data_rdata
)
184 # SV39 defines three levels of page tables
185 ptw_lvl
= Signal(2) # default=0=LVL1
189 m
.d
.comb
+= [ptw_lvl1
.eq(ptw_lvl
== LVL1
),
190 ptw_lvl2
.eq(ptw_lvl
== LVL2
),
191 ptw_lvl3
.eq(ptw_lvl
== LVL3
)]
193 # is this an instruction page table walk?
194 is_instr_ptw
= Signal()
195 global_mapping
= Signal()
199 tlb_update_asid
= Signal(ASID_WIDTH
)
200 # register VPN we need to walk, SV39 defines a 39 bit virtual addr
202 # 4 byte aligned physical pointer
203 ptw_pptr
= Signal(56)
205 end
= DCACHE_INDEX_WIDTH
+ DCACHE_TAG_WIDTH
208 self
.update_vaddr_o
.eq(vaddr
),
210 self
.walking_instr_o
.eq(is_instr_ptw
),
211 # directly output the correct physical address
212 self
.req_port_o
.address_index
.eq(ptw_pptr
[0:DCACHE_INDEX_WIDTH
]),
213 self
.req_port_o
.address_tag
.eq(ptw_pptr
[DCACHE_INDEX_WIDTH
:end
]),
214 # we are never going to kill this request
215 self
.req_port_o
.kill_req
.eq(0), # XXX assign comb?
216 # we are never going to write with the HPTW
217 self
.req_port_o
.data_wdata
.eq(Const(0, 64)), # XXX assign comb?
221 self
.itlb_update_o
.vpn
.eq(vaddr
[12:39]),
222 self
.dtlb_update_o
.vpn
.eq(vaddr
[12:39]),
223 # update the correct page table level
224 self
.itlb_update_o
.is_2M
.eq(ptw_lvl2
),
225 self
.itlb_update_o
.is_1G
.eq(ptw_lvl1
),
226 self
.dtlb_update_o
.is_2M
.eq(ptw_lvl2
),
227 self
.dtlb_update_o
.is_1G
.eq(ptw_lvl1
),
228 # output the correct ASID
229 self
.itlb_update_o
.asid
.eq(tlb_update_asid
),
230 self
.dtlb_update_o
.asid
.eq(tlb_update_asid
),
231 # set the global mapping bit
232 self
.itlb_update_o
.content
.eq(pte
),
233 self
.itlb_update_o
.content
.g
.eq(global_mapping
),
234 self
.dtlb_update_o
.content
.eq(pte
),
235 self
.dtlb_update_o
.content
.g
.eq(global_mapping
),
237 self
.req_port_o
.tag_valid
.eq(tag_valid
),
243 # A virtual address va is translated into a physical address pa as
245 # 1. Let a be sptbr.ppn × PAGESIZE, and let i = LEVELS-1. (For Sv39,
246 # PAGESIZE=2^12 and LEVELS=3.)
247 # 2. Let pte be the value of the PTE at address a+va.vpn[i]×PTESIZE.
248 # (For Sv32, PTESIZE=4.)
249 # 3. If pte.v = 0, or if pte.r = 0 and pte.w = 1, stop and raise an
251 # 4. Otherwise, the PTE is valid. If pte.r = 1 or pte.x = 1, go to
252 # step 5. Otherwise, this PTE is a pointer to the next level of
254 # Let i=i-1. If i < 0, stop and raise an access exception.
255 # Otherwise, let a = pte.ppn × PAGESIZE and go to step 2.
256 # 5. A leaf PTE has been found. Determine if the requested memory
257 # access is allowed by the pte.r, pte.w, and pte.x bits. If not,
258 # stop and raise an access exception. Otherwise, the translation is
259 # successful. Set pte.a to 1, and, if the memory access is a
260 # store, set pte.d to 1.
261 # The translated physical address is given as follows:
262 # - pa.pgoff = va.pgoff.
263 # - If i > 0, then this is a superpage translation and
264 # pa.ppn[i-1:0] = va.vpn[i-1:0].
265 # - pa.ppn[LEVELS-1:i] = pte.ppn[LEVELS-1:i].
266 # 6. If i > 0 and pa.ppn[i − 1 : 0] != 0, this is a misaligned
267 # superpage stop and raise a page-fault exception.
269 m
.d
.sync
+= tag_valid
.eq(0)
271 # default assignments
273 # PTW memory interface
274 self
.req_port_o
.data_req
.eq(0),
275 self
.req_port_o
.data_be
.eq(Const(0xFF, 8)),
276 self
.req_port_o
.data_size
.eq(Const(0b11, 2)),
277 self
.req_port_o
.data_we
.eq(0),
278 self
.ptw_error_o
.eq(0),
279 self
.itlb_update_o
.valid
.eq(0),
280 self
.dtlb_update_o
.valid
.eq(0),
282 self
.itlb_miss_o
.eq(0),
283 self
.dtlb_miss_o
.eq(0),
287 pte_rx
= Signal(reset_less
=True)
288 pte_exe
= Signal(reset_less
=True)
289 pte_inv
= Signal(reset_less
=True)
290 a
= Signal(reset_less
=True)
291 st_wd
= Signal(reset_less
=True)
292 m
.d
.comb
+= [pte_rx
.eq(pte
.r | pte
.x
),
293 pte_exe
.eq(~pte
.x | ~pte
.a
),
294 pte_inv
.eq(~pte
.v |
(~pte
.r
& pte
.w
)),
295 a
.eq(pte
.a
& (pte
.r |
(pte
.x
& self
.mxr_i
))),
296 st_wd
.eq(self
.lsu_is_store_i
& (~pte
.w | ~pte
.d
))]
298 l1err
= Signal(reset_less
=True)
299 l2err
= Signal(reset_less
=True)
300 m
.d
.comb
+= [l2err
.eq((ptw_lvl2
) & pte
.ppn
[0:9] != Const(0, 9)),
301 l1err
.eq((ptw_lvl1
) & pte
.ppn
[0:18] != Const(0, 18)) ]
309 with m
.State("IDLE"):
310 self
.idle(m
, is_instr_ptw
, ptw_lvl
, global_mapping
,
311 ptw_pptr
, vaddr
, tlb_update_asid
)
313 with m
.State("WAIT_GRANT"):
315 m
.d
.comb
+= self
.req_port_o
.data_req
.eq(1)
316 # wait for the WAIT_GRANT
317 with m
.If(self
.req_port_i
.data_gnt
):
318 # send the tag valid signal one cycle later
319 m
.d
.sync
+= tag_valid
.eq(1)
320 # should we have flushed before we got an rvalid,
321 # wait for it until going back to IDLE
322 with m
.If(self
.flush_i
):
323 with m
.If (~data_rvalid
):
324 m
.next
= "WAIT_RVALID"
328 m
.next
= "PTE_LOOKUP"
330 with m
.State("PTE_LOOKUP"):
331 # we wait for the valid signal
332 with m
.If(data_rvalid
):
334 # check if the global mapping bit is set
336 m
.d
.sync
+= global_mapping
.eq(1)
341 # If pte.v = 0, or if pte.r = 0 and pte.w = 1,
342 # stop and raise a page-fault exception.
344 m
.next
= "PROPAGATE_ERROR"
351 # if pte.r = 1 or pte.x = 1 it is a valid PTE
353 # Valid translation found (either 1G, 2M or 4K)
354 with m
.If(is_instr_ptw
):
358 # If page is not executable, we can
359 # directly raise an error. This
360 # doesn't put a useless entry into
361 # the TLB. The same idea applies
362 # to the access flag since we let
363 # the access flag be managed by SW.
367 m
.d
.comb
+= self
.itlb_update_o
.valid
.eq(1)
373 # Check if the access flag has been set,
374 # otherwise throw a page-fault
375 # and let the software handle those bits.
376 # If page is not readable (there are
377 # no write-only pages)
378 # we can directly raise an error. This
379 # doesn't put a useless
380 # entry into the TLB.
382 m
.d
.comb
+= self
.dtlb_update_o
.valid
.eq(1)
384 m
.next
= "PROPAGATE_ERROR"
385 # Request is a store: perform some
387 # If the request was a store and the
388 # page is not write-able, raise an error
389 # the same applies if the dirty flag is not set
391 m
.d
.comb
+= self
.dtlb_update_o
.valid
.eq(0)
392 m
.next
= "PROPAGATE_ERROR"
394 # check if the ppn is correctly aligned: Case (6)
395 with m
.If(l1err | l2err
):
396 m
.next
= "PROPAGATE_ERROR"
397 m
.d
.comb
+= [self
.dtlb_update_o
.valid
.eq(0),
398 self
.itlb_update_o
.valid
.eq(0)]
400 # this is a pointer to the next TLB level
402 # pointer to next level of page table
403 with m
.If (ptw_lvl1
):
404 # we are in the second level now
405 pptr
= Cat(Const(0, 3),
406 self
.dtlb_vaddr_i
[21:30],
408 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
411 # here we received a pointer to the third level
412 pptr
= Cat(Const(0, 3),
413 self
.dtlb_vaddr_i
[12:21],
415 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
418 self
.set_grant_state(m
)
420 with m
.If (ptw_lvl3
):
421 # Should already be the last level
422 # page table => Error
423 m
.d
.sync
+= ptw_lvl
.eq(LVL3
)
424 m
.next
= "PROPAGATE_ERROR"
425 # we've got a data WAIT_GRANT so tell the
426 # cache that the tag is valid
428 # Propagate error to MMU/LSU
429 with m
.State("PROPAGATE_ERROR"):
431 m
.d
.comb
+= self
.ptw_error_o
.eq(1)
433 # wait for the rvalid before going back to IDLE
434 with m
.State("WAIT_RVALID"):
435 with m
.If(data_rvalid
):
438 m
.d
.sync
+= [data_rdata
.eq(self
.req_port_i
.data_rdata
),
439 data_rvalid
.eq(self
.req_port_i
.data_rvalid
)
444 def set_grant_state(self
, m
):
445 # should we have flushed before we got an rvalid,
446 # wait for it until going back to IDLE
447 with m
.If(self
.flush_i
):
448 with m
.If (self
.req_port_i
.data_gnt
):
449 m
.next
= "WAIT_RVALID"
453 m
.next
= "WAIT_GRANT"
455 def idle(self
, m
, is_instr_ptw
, ptw_lvl
, global_mapping
,
456 ptw_pptr
, vaddr
, tlb_update_asid
):
457 # by default we start with the top-most page table
458 m
.d
.sync
+= [is_instr_ptw
.eq(0),
460 global_mapping
.eq(0),
461 self
.ptw_active_o
.eq(0), # deactive (IDLE)
463 # work out itlb/dtlb miss
464 m
.d
.comb
+= self
.itlb_miss_o
.eq(self
.enable_translation_i
& \
465 self
.itlb_access_i
& \
468 m
.d
.comb
+= self
.dtlb_miss_o
.eq(self
.en_ld_st_translation_i
& \
469 self
.dtlb_access_i
& \
471 # we got an ITLB miss?
472 with m
.If(self
.itlb_miss_o
):
473 pptr
= Cat(Const(0, 3), self
.itlb_vaddr_i
[30:39],
475 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
477 vaddr
.eq(self
.itlb_vaddr_i
),
478 tlb_update_asid
.eq(self
.asid_i
),
480 self
.set_grant_state(m
)
482 # we got a DTLB miss?
483 with m
.Elif(self
.dtlb_miss_o
):
484 pptr
= Cat(Const(0, 3), self
.dtlb_vaddr_i
[30:39],
486 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
487 vaddr
.eq(self
.dtlb_vaddr_i
),
488 tlb_update_asid
.eq(self
.asid_i
),
490 self
.set_grant_state(m
)
493 if __name__
== '__main__':
495 vl
= rtlil
.convert(ptw
, ports
=ptw
.ports())
496 with
open("test_ptw.il", "w") as f
: