2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 machine(L2Cache, "Token protocol")
30 : CacheMemory * L2cache,
32 Cycles l2_request_latency = 5,
33 Cycles l2_response_latency = 5,
34 bool filtering_enabled = true
38 // From local bank of L2 cache TO the network
40 // this L2 bank -> a local L1 || mod-directory
41 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
42 // this L2 bank -> mod-directory
43 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
44 // this L2 bank -> a local L1
45 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
48 // FROM the network to this local bank of L2 cache
50 // a local L1 || mod-directory -> this L2 bank
51 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
52 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
53 // mod-directory -> this L2 bank
54 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
55 // a local L1 -> this L2 bank
56 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
59 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
61 NP, AccessPermission:Invalid, desc="Not Present";
62 I, AccessPermission:Invalid, desc="Idle";
63 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
64 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
65 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
68 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
69 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
73 enumeration(Event, desc="Cache events") {
76 L1_GETS, desc="local L1 GETS request";
77 L1_GETS_Last_Token, desc="local L1 GETS request";
78 L1_GETX, desc="local L1 GETX request";
79 L1_INV, desc="L1 no longer has tokens";
80 Transient_GETX, desc="A GetX from another processor";
81 Transient_GETS, desc="A GetS from another processor";
82 Transient_GETS_Last_Token, desc="A GetS from another processor";
84 // events initiated by this L2
85 L2_Replacement, desc="L2 Replacement", format="!r";
87 // events of external L2 responses
90 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
91 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
92 Writeback_All_Tokens, desc="Received a writeback from L1";
93 Writeback_Owned, desc="Received a writeback from L1";
96 Data_Shared, desc="Received a data message, we are now a sharer";
97 Data_Owner, desc="Received a data message, we are now the owner";
98 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
99 Ack, desc="Received an ack message";
100 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
103 Persistent_GETX, desc="Another processor has priority to read/write";
104 Persistent_GETS, desc="Another processor has priority to read";
105 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
106 Own_Lock_or_Unlock, desc="This processor now has priority";
112 structure(Entry, desc="...", interface="AbstractCacheEntry") {
113 State CacheState, desc="cache state";
114 bool Dirty, desc="Is the data dirty (different than memory)?";
115 int Tokens, desc="The number of tokens we're holding for the line";
116 DataBlock DataBlk, desc="data for the block";
119 structure(DirEntry, desc="...") {
120 Set Sharers, desc="Set of the internal processors that want the block in shared state";
121 bool exclusive, default="false", desc="if local exclusive is likely";
124 structure(PerfectCacheMemory, external="yes") {
125 void allocate(Address);
126 void deallocate(Address);
127 DirEntry lookup(Address);
128 bool isTagPresent(Address);
131 structure(PersistentTable, external="yes") {
132 void persistentRequestLock(Address, MachineID, AccessType);
133 void persistentRequestUnlock(Address, MachineID);
134 MachineID findSmallest(Address);
135 AccessType typeOfSmallest(Address);
136 void markEntries(Address);
137 bool isLocked(Address);
138 int countStarvingForAddress(Address);
139 int countReadStarvingForAddress(Address);
142 PersistentTable persistentTable;
143 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
145 void set_cache_entry(AbstractCacheEntry b);
146 void unset_cache_entry();
148 Entry getCacheEntry(Address address), return_by_pointer="yes" {
149 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
153 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
154 return getCacheEntry(addr).DataBlk;
157 int getTokens(Entry cache_entry) {
158 if (is_valid(cache_entry)) {
159 return cache_entry.Tokens;
165 State getState(Entry cache_entry, Address addr) {
166 if (is_valid(cache_entry)) {
167 return cache_entry.CacheState;
168 } else if (persistentTable.isLocked(addr)) {
175 void setState(Entry cache_entry, Address addr, State state) {
177 if (is_valid(cache_entry)) {
178 // Make sure the token count is in range
179 assert(cache_entry.Tokens >= 0);
180 assert(cache_entry.Tokens <= max_tokens());
181 assert(cache_entry.Tokens != (max_tokens() / 2));
183 // Make sure we have no tokens in L
184 if ((state == State:I_L) ) {
185 assert(cache_entry.Tokens == 0);
188 // in M and E you have all the tokens
189 if (state == State:M ) {
190 assert(cache_entry.Tokens == max_tokens());
193 // in NP you have no tokens
194 if (state == State:NP) {
195 assert(cache_entry.Tokens == 0);
198 // You have at least one token in S-like states
199 if (state == State:S ) {
200 assert(cache_entry.Tokens > 0);
203 // You have at least half the token in O-like states
204 if (state == State:O ) {
205 assert(cache_entry.Tokens > (max_tokens() / 2));
208 cache_entry.CacheState := state;
212 AccessPermission getAccessPermission(Address addr) {
213 Entry cache_entry := getCacheEntry(addr);
214 if(is_valid(cache_entry)) {
215 return L2Cache_State_to_permission(cache_entry.CacheState);
218 return AccessPermission:NotPresent;
221 void setAccessPermission(Entry cache_entry, Address addr, State state) {
222 if (is_valid(cache_entry)) {
223 cache_entry.changePermission(L2Cache_State_to_permission(state));
227 void removeSharer(Address addr, NodeID id) {
229 if (localDirectory.isTagPresent(addr)) {
230 localDirectory[addr].Sharers.remove(id);
231 if (localDirectory[addr].Sharers.count() == 0) {
232 localDirectory.deallocate(addr);
237 bool sharersExist(Address addr) {
238 if (localDirectory.isTagPresent(addr)) {
239 if (localDirectory[addr].Sharers.count() > 0) {
251 bool exclusiveExists(Address addr) {
252 if (localDirectory.isTagPresent(addr)) {
253 if (localDirectory[addr].exclusive) {
265 // assumes that caller will check to make sure tag is present
266 Set getSharers(Address addr) {
267 return localDirectory[addr].Sharers;
270 void setNewWriter(Address addr, NodeID id) {
271 if (localDirectory.isTagPresent(addr) == false) {
272 localDirectory.allocate(addr);
274 localDirectory[addr].Sharers.clear();
275 localDirectory[addr].Sharers.add(id);
276 localDirectory[addr].exclusive := true;
279 void addNewSharer(Address addr, NodeID id) {
280 if (localDirectory.isTagPresent(addr) == false) {
281 localDirectory.allocate(addr);
283 localDirectory[addr].Sharers.add(id);
284 // localDirectory[addr].exclusive := false;
287 void clearExclusiveBitIfExists(Address addr) {
288 if (localDirectory.isTagPresent(addr)) {
289 localDirectory[addr].exclusive := false;
294 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
295 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
296 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
302 // Persistent Network
303 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
304 if (persistentNetwork_in.isReady()) {
305 peek(persistentNetwork_in, PersistentMsg) {
306 assert(in_msg.Destination.isElement(machineID));
308 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
309 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
310 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
311 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
312 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
313 persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
315 error("Unexpected message");
318 Entry cache_entry := getCacheEntry(in_msg.Addr);
319 // React to the message based on the current state of the table
320 if (persistentTable.isLocked(in_msg.Addr)) {
322 if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
323 if (getTokens(cache_entry) == 1 ||
324 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
325 trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
328 trigger(Event:Persistent_GETS, in_msg.Addr, cache_entry);
331 trigger(Event:Persistent_GETX, in_msg.Addr, cache_entry);
335 trigger(Event:Own_Lock_or_Unlock, in_msg.Addr, cache_entry);
343 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
344 if (requestNetwork_in.isReady()) {
345 peek(requestNetwork_in, RequestMsg) {
346 assert(in_msg.Destination.isElement(machineID));
348 Entry cache_entry := getCacheEntry(in_msg.Addr);
349 if (in_msg.Type == CoherenceRequestType:GETX) {
350 trigger(Event:Transient_GETX, in_msg.Addr, cache_entry);
351 } else if (in_msg.Type == CoherenceRequestType:GETS) {
352 if (getTokens(cache_entry) == 1) {
353 trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
357 trigger(Event:Transient_GETS, in_msg.Addr, cache_entry);
360 error("Unexpected message");
366 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
367 if (L1requestNetwork_in.isReady()) {
368 peek(L1requestNetwork_in, RequestMsg) {
369 assert(in_msg.Destination.isElement(machineID));
370 Entry cache_entry := getCacheEntry(in_msg.Addr);
371 if (in_msg.Type == CoherenceRequestType:GETX) {
372 trigger(Event:L1_GETX, in_msg.Addr, cache_entry);
373 } else if (in_msg.Type == CoherenceRequestType:GETS) {
374 if (getTokens(cache_entry) == 1 ||
375 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
376 trigger(Event:L1_GETS_Last_Token, in_msg.Addr, cache_entry);
379 trigger(Event:L1_GETS, in_msg.Addr, cache_entry);
382 error("Unexpected message");
390 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
391 if (responseNetwork_in.isReady()) {
392 peek(responseNetwork_in, ResponseMsg) {
393 assert(in_msg.Destination.isElement(machineID));
394 Entry cache_entry := getCacheEntry(in_msg.Addr);
396 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
397 if (in_msg.Type == CoherenceResponseType:ACK) {
398 assert(in_msg.Tokens < (max_tokens() / 2));
399 trigger(Event:Ack, in_msg.Addr, cache_entry);
400 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
401 trigger(Event:Data_Owner, in_msg.Addr, cache_entry);
402 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
403 trigger(Event:Data_Shared, in_msg.Addr, cache_entry);
404 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
405 in_msg.Type == CoherenceResponseType:WB_OWNED ||
406 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
408 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
410 // either room is available or the block is already present
412 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
413 assert(in_msg.Dirty == false);
414 trigger(Event:Writeback_Tokens, in_msg.Addr, cache_entry);
415 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
416 assert(in_msg.Dirty == false);
417 trigger(Event:Writeback_Shared_Data, in_msg.Addr, cache_entry);
419 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
420 //assert(in_msg.Dirty == false);
421 trigger(Event:Writeback_Owned, in_msg.Addr, cache_entry);
425 trigger(Event:L2_Replacement,
426 L2cache.cacheProbe(in_msg.Addr),
427 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
429 } else if (in_msg.Type == CoherenceResponseType:INV) {
430 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
432 error("Unexpected message");
435 if (in_msg.Type == CoherenceResponseType:ACK) {
436 assert(in_msg.Tokens < (max_tokens() / 2));
437 trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry);
438 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
439 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
440 trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry);
441 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
442 in_msg.Type == CoherenceResponseType:WB_OWNED ||
443 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
444 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
446 // either room is available or the block is already present
448 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
449 assert(in_msg.Dirty == false);
450 assert( (getState(cache_entry, in_msg.Addr) != State:NP)
451 && (getState(cache_entry, in_msg.Addr) != State:I) );
452 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
453 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
454 assert(in_msg.Dirty == false);
455 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
457 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
458 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
462 trigger(Event:L2_Replacement,
463 L2cache.cacheProbe(in_msg.Addr),
464 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
466 } else if (in_msg.Type == CoherenceResponseType:INV) {
467 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
469 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
470 error("Unexpected message");
480 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
482 peek(L1requestNetwork_in, RequestMsg) {
484 // if this is a retry or no local sharers, broadcast normally
485 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
486 out_msg.Addr := in_msg.Addr;
487 out_msg.Type := in_msg.Type;
488 out_msg.Requestor := in_msg.Requestor;
489 out_msg.RetryNum := in_msg.RetryNum;
492 // If a statically shared L2 cache, then no other L2 caches can
495 //out_msg.Destination.broadcast(MachineType:L2Cache);
496 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
497 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
499 out_msg.Destination.add(map_Address_to_Directory(address));
500 out_msg.MessageSize := MessageSizeType:Request_Control;
501 out_msg.AccessMode := in_msg.AccessMode;
502 out_msg.Prefetch := in_msg.Prefetch;
506 //profile_filter_action(0);
511 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
512 peek(responseNetwork_in, ResponseMsg) {
513 // FIXME, should use a 3rd vnet
514 enqueue(responseNetwork_out, ResponseMsg, 1) {
515 out_msg.Addr := address;
516 out_msg.Type := in_msg.Type;
517 out_msg.Sender := machineID;
518 out_msg.Destination.add(map_Address_to_Directory(address));
519 out_msg.Tokens := in_msg.Tokens;
520 out_msg.MessageSize := in_msg.MessageSize;
521 out_msg.DataBlk := in_msg.DataBlk;
522 out_msg.Dirty := in_msg.Dirty;
527 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
528 assert(is_valid(cache_entry));
529 if (cache_entry.Tokens > 0) {
530 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
531 out_msg.Addr := address;
532 out_msg.Type := CoherenceResponseType:ACK;
533 out_msg.Sender := machineID;
534 out_msg.Destination.add(map_Address_to_Directory(address));
535 out_msg.Tokens := cache_entry.Tokens;
536 out_msg.MessageSize := MessageSizeType:Writeback_Control;
538 cache_entry.Tokens := 0;
542 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
543 assert(is_valid(cache_entry));
544 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
545 out_msg.Addr := address;
546 out_msg.Sender := machineID;
547 out_msg.Destination.add(map_Address_to_Directory(address));
548 out_msg.Tokens := cache_entry.Tokens;
549 out_msg.DataBlk := cache_entry.DataBlk;
550 out_msg.Dirty := cache_entry.Dirty;
552 if (cache_entry.Dirty) {
553 out_msg.MessageSize := MessageSizeType:Writeback_Data;
554 out_msg.Type := CoherenceResponseType:DATA_OWNER;
556 out_msg.MessageSize := MessageSizeType:Writeback_Control;
557 out_msg.Type := CoherenceResponseType:ACK_OWNER;
560 cache_entry.Tokens := 0;
563 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
564 peek(requestNetwork_in, RequestMsg) {
565 assert(is_valid(cache_entry));
566 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
567 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
568 out_msg.Addr := address;
569 out_msg.Type := CoherenceResponseType:DATA_SHARED;
570 out_msg.Sender := machineID;
571 out_msg.Destination.add(in_msg.Requestor);
572 out_msg.Tokens := N_tokens;
573 out_msg.DataBlk := cache_entry.DataBlk;
574 out_msg.Dirty := false;
575 out_msg.MessageSize := MessageSizeType:Response_Data;
577 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
580 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
581 out_msg.Addr := address;
582 out_msg.Type := CoherenceResponseType:DATA_SHARED;
583 out_msg.Sender := machineID;
584 out_msg.Destination.add(in_msg.Requestor);
586 out_msg.DataBlk := cache_entry.DataBlk;
587 out_msg.Dirty := false;
588 out_msg.MessageSize := MessageSizeType:Response_Data;
590 cache_entry.Tokens := cache_entry.Tokens - 1;
595 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
596 assert(is_valid(cache_entry));
597 peek(requestNetwork_in, RequestMsg) {
598 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
599 out_msg.Addr := address;
600 out_msg.Type := CoherenceResponseType:DATA_OWNER;
601 out_msg.Sender := machineID;
602 out_msg.Destination.add(in_msg.Requestor);
603 assert(cache_entry.Tokens >= 1);
604 out_msg.Tokens := cache_entry.Tokens;
605 out_msg.DataBlk := cache_entry.DataBlk;
606 out_msg.Dirty := cache_entry.Dirty;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
610 cache_entry.Tokens := 0;
613 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
614 assert(is_valid(cache_entry));
615 if (cache_entry.Tokens > 0) {
616 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
617 out_msg.Addr := address;
618 out_msg.Type := CoherenceResponseType:ACK;
619 out_msg.Sender := machineID;
620 out_msg.Destination.add(persistentTable.findSmallest(address));
621 assert(cache_entry.Tokens >= 1);
622 out_msg.Tokens := cache_entry.Tokens;
623 out_msg.MessageSize := MessageSizeType:Response_Control;
626 cache_entry.Tokens := 0;
629 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
630 assert(is_valid(cache_entry));
631 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
632 out_msg.Addr := address;
633 out_msg.Type := CoherenceResponseType:DATA_OWNER;
634 out_msg.Sender := machineID;
635 out_msg.Destination.add(persistentTable.findSmallest(address));
636 assert(cache_entry.Tokens >= 1);
637 out_msg.Tokens := cache_entry.Tokens;
638 out_msg.DataBlk := cache_entry.DataBlk;
639 out_msg.Dirty := cache_entry.Dirty;
640 out_msg.MessageSize := MessageSizeType:Response_Data;
642 cache_entry.Tokens := 0;
645 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
646 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
647 assert(is_valid(cache_entry));
648 assert(cache_entry.Tokens > 0);
649 if (cache_entry.Tokens > 1) {
650 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
651 out_msg.Addr := address;
652 out_msg.Type := CoherenceResponseType:ACK;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(persistentTable.findSmallest(address));
655 assert(cache_entry.Tokens >= 1);
656 out_msg.Tokens := cache_entry.Tokens - 1;
657 out_msg.MessageSize := MessageSizeType:Response_Control;
660 cache_entry.Tokens := 1;
663 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
664 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
665 assert(is_valid(cache_entry));
666 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
667 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
668 out_msg.Addr := address;
669 out_msg.Type := CoherenceResponseType:DATA_OWNER;
670 out_msg.Sender := machineID;
671 out_msg.Destination.add(persistentTable.findSmallest(address));
672 out_msg.Tokens := cache_entry.Tokens - 1;
673 out_msg.DataBlk := cache_entry.DataBlk;
674 out_msg.Dirty := cache_entry.Dirty;
675 out_msg.MessageSize := MessageSizeType:Response_Data;
677 cache_entry.Tokens := 1;
680 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
681 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
682 assert(is_valid(cache_entry));
683 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
684 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
685 out_msg.Addr := address;
686 out_msg.Type := CoherenceResponseType:DATA_OWNER;
687 out_msg.Sender := machineID;
688 out_msg.Destination.add(persistentTable.findSmallest(address));
689 out_msg.Tokens := cache_entry.Tokens;
690 out_msg.DataBlk := cache_entry.DataBlk;
691 out_msg.Dirty := cache_entry.Dirty;
692 out_msg.MessageSize := MessageSizeType:Response_Data;
694 cache_entry.Tokens := 0;
699 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
700 // assert(persistentTable.isLocked(address));
701 peek(responseNetwork_in, ResponseMsg) {
702 // FIXME, should use a 3rd vnet in some cases
703 enqueue(responseNetwork_out, ResponseMsg, 1) {
704 out_msg.Addr := address;
705 out_msg.Type := in_msg.Type;
706 out_msg.Sender := machineID;
707 out_msg.Destination.add(persistentTable.findSmallest(address));
708 out_msg.Tokens := in_msg.Tokens;
709 out_msg.DataBlk := in_msg.DataBlk;
710 out_msg.Dirty := in_msg.Dirty;
711 out_msg.MessageSize := in_msg.MessageSize;
716 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
717 //assert(persistentTable.isLocked(address));
718 peek(responseNetwork_in, ResponseMsg) {
719 // FIXME, should use a 3rd vnet in some cases
720 enqueue(responseNetwork_out, ResponseMsg, 1) {
721 out_msg.Addr := address;
722 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
723 out_msg.Type := CoherenceResponseType:DATA_SHARED;
725 assert(in_msg.Tokens < (max_tokens() / 2));
726 out_msg.Type := CoherenceResponseType:ACK;
728 out_msg.Sender := machineID;
729 out_msg.Destination.add(persistentTable.findSmallest(address));
730 out_msg.Tokens := in_msg.Tokens;
731 out_msg.DataBlk := in_msg.DataBlk;
732 out_msg.Dirty := in_msg.Dirty;
733 out_msg.MessageSize := in_msg.MessageSize;
738 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
739 // assert(persistentTable.isLocked(address));
740 peek(responseNetwork_in, ResponseMsg) {
741 // FIXME, should use a 3rd vnet in some cases
742 enqueue(responseNetwork_out, ResponseMsg, 1) {
743 out_msg.Addr := address;
744 out_msg.Type := CoherenceResponseType:DATA_OWNER;
745 out_msg.Sender := machineID;
746 out_msg.Destination.add(persistentTable.findSmallest(address));
747 out_msg.Tokens := in_msg.Tokens;
748 out_msg.DataBlk := in_msg.DataBlk;
749 out_msg.Dirty := in_msg.Dirty;
750 out_msg.MessageSize := in_msg.MessageSize;
756 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
757 peek(responseNetwork_in, ResponseMsg) {
758 removeSharer(in_msg.Addr, machineIDToNodeID(in_msg.Sender));
762 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
763 peek(requestNetwork_in, RequestMsg) {
764 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
765 //profile_filter_action(1);
766 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
770 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
771 out_msg.Addr := in_msg.Addr;
772 out_msg.Requestor := in_msg.Requestor;
775 // Currently assuming only one chip so all L1s are local
777 //out_msg.Destination := getLocalL1IDs(machineID);
778 out_msg.Destination.broadcast(MachineType:L1Cache);
779 out_msg.Destination.remove(in_msg.Requestor);
781 out_msg.Type := in_msg.Type;
782 out_msg.isLocal := false;
783 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
784 out_msg.AccessMode := in_msg.AccessMode;
785 out_msg.Prefetch := in_msg.Prefetch;
787 //profile_filter_action(0);
792 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
793 peek(L1requestNetwork_in, RequestMsg) {
794 assert(is_valid(cache_entry));
795 assert(cache_entry.Tokens > 0);
796 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
797 out_msg.Addr := address;
798 out_msg.Type := CoherenceResponseType:DATA_SHARED;
799 out_msg.Sender := machineID;
800 out_msg.Destination.add(in_msg.Requestor);
801 out_msg.DataBlk := cache_entry.DataBlk;
802 out_msg.Dirty := false;
803 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
806 cache_entry.Tokens := cache_entry.Tokens - 1;
810 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
811 peek(L1requestNetwork_in, RequestMsg) {
812 assert(is_valid(cache_entry));
813 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
814 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
815 out_msg.Addr := address;
816 out_msg.Type := CoherenceResponseType:DATA_OWNER;
817 out_msg.Sender := machineID;
818 out_msg.Destination.add(in_msg.Requestor);
819 out_msg.DataBlk := cache_entry.DataBlk;
820 out_msg.Dirty := cache_entry.Dirty;
821 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
822 out_msg.Tokens := cache_entry.Tokens;
824 cache_entry.Tokens := 0;
828 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
829 peek(L1requestNetwork_in, RequestMsg) {
830 assert(is_valid(cache_entry));
831 // assert(cache_entry.Tokens == max_tokens());
832 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
833 out_msg.Addr := address;
834 out_msg.Type := CoherenceResponseType:DATA_OWNER;
835 out_msg.Sender := machineID;
836 out_msg.Destination.add(in_msg.Requestor);
837 out_msg.DataBlk := cache_entry.DataBlk;
838 out_msg.Dirty := cache_entry.Dirty;
839 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
840 //out_msg.Tokens := max_tokens();
841 out_msg.Tokens := cache_entry.Tokens;
843 cache_entry.Tokens := 0;
847 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
848 persistentNetwork_in.dequeue();
851 action(m_popRequestQueue, "m", desc="Pop request queue.") {
852 requestNetwork_in.dequeue();
855 action(n_popResponseQueue, "n", desc="Pop response queue") {
856 responseNetwork_in.dequeue();
859 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
860 L1requestNetwork_in.dequeue();
864 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
865 peek(responseNetwork_in, ResponseMsg) {
866 assert(is_valid(cache_entry));
867 assert(in_msg.Tokens != 0);
868 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
870 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
871 // may not trigger this action.
872 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
873 cache_entry.Dirty := true;
878 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
879 peek(L1requestNetwork_in, RequestMsg) {
880 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
881 if (in_msg.Type == CoherenceRequestType:GETX) {
882 setNewWriter(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
883 } else if (in_msg.Type == CoherenceRequestType:GETS) {
884 addNewSharer(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
890 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
891 clearExclusiveBitIfExists(address);
894 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
895 peek(L1requestNetwork_in, RequestMsg) {
896 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
897 (is_valid(cache_entry))) {
898 L2cache.setMRU(address);
903 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
904 assert(is_valid(cache_entry));
905 if (cache_entry.Tokens > 0) {
906 peek(requestNetwork_in, RequestMsg) {
907 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
908 out_msg.Addr := address;
909 out_msg.Type := CoherenceResponseType:ACK;
910 out_msg.Sender := machineID;
911 out_msg.Destination.add(in_msg.Requestor);
912 assert(cache_entry.Tokens >= 1);
913 out_msg.Tokens := cache_entry.Tokens;
914 out_msg.MessageSize := MessageSizeType:Response_Control;
918 cache_entry.Tokens := 0;
921 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
922 assert(is_valid(cache_entry));
923 if (cache_entry.Tokens > 0) {
924 peek(L1requestNetwork_in, RequestMsg) {
925 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
926 out_msg.Addr := address;
927 out_msg.Type := CoherenceResponseType:ACK;
928 out_msg.Sender := machineID;
929 out_msg.Destination.add(in_msg.Requestor);
930 assert(cache_entry.Tokens >= 1);
931 out_msg.Tokens := cache_entry.Tokens;
932 out_msg.MessageSize := MessageSizeType:Response_Control;
936 cache_entry.Tokens := 0;
939 action(u_writeDataToCache, "u", desc="Write data to cache") {
940 peek(responseNetwork_in, ResponseMsg) {
941 assert(is_valid(cache_entry));
942 cache_entry.DataBlk := in_msg.DataBlk;
943 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
944 cache_entry.Dirty := in_msg.Dirty;
949 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
950 set_cache_entry(L2cache.allocate(address, new Entry));
953 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
954 L2cache.deallocate(address);
958 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
959 ++L2cache.demand_misses;
962 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
963 ++L2cache.demand_hits;
966 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
967 peek(responseNetwork_in, ResponseMsg) {
968 if (in_msg.Type != CoherenceResponseType:ACK &&
969 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
970 assert(is_valid(cache_entry));
971 assert(cache_entry.DataBlk == in_msg.DataBlk);
977 //*****************************************************
979 //*****************************************************
981 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
983 h_updateFilterFromL1HintOrWB;
987 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
988 l_popPersistentQueue;
992 // Transitions from NP
994 transition(NP, {Transient_GETX, Transient_GETS}) {
995 // forward message to local sharers
997 j_forwardTransientRequestToLocalSharers;
1002 transition(NP, {L1_GETS, L1_GETX}) {
1003 a_broadcastLocalRequest;
1006 o_popL1RequestQueue;
1009 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1014 transition(NP, Writeback_Shared_Data, S) {
1015 vv_allocateL2CacheBlock;
1017 q_updateTokensFromResponse;
1018 h_updateFilterFromL1HintOrWB;
1022 transition(NP, Writeback_Tokens, I) {
1023 vv_allocateL2CacheBlock;
1024 q_updateTokensFromResponse;
1025 h_updateFilterFromL1HintOrWB;
1029 transition(NP, Writeback_All_Tokens, M) {
1030 vv_allocateL2CacheBlock;
1032 q_updateTokensFromResponse;
1033 h_updateFilterFromL1HintOrWB;
1037 transition(NP, Writeback_Owned, O) {
1038 vv_allocateL2CacheBlock;
1040 q_updateTokensFromResponse;
1041 h_updateFilterFromL1HintOrWB;
1047 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1049 l_popPersistentQueue;
1052 // Transitions from Idle
1054 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1055 a_broadcastLocalRequest;
1056 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1059 o_popL1RequestQueue;
1062 transition(I, L1_GETX) {
1063 a_broadcastLocalRequest;
1064 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1067 o_popL1RequestQueue;
1070 transition(I, L2_Replacement) {
1071 c_cleanReplacement; // Only needed in some cases
1072 rr_deallocateL2CacheBlock;
1075 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1077 t_sendAckWithCollectedTokens;
1078 j_forwardTransientRequestToLocalSharers;
1083 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1085 e_sendAckWithCollectedTokens;
1086 l_popPersistentQueue;
1090 transition(I, Ack) {
1091 q_updateTokensFromResponse;
1095 transition(I, Data_Shared, S) {
1097 q_updateTokensFromResponse;
1101 transition(I, Writeback_Shared_Data, S) {
1103 q_updateTokensFromResponse;
1104 h_updateFilterFromL1HintOrWB;
1108 transition(I, Writeback_Tokens) {
1109 q_updateTokensFromResponse;
1110 h_updateFilterFromL1HintOrWB;
1114 transition(I, Data_Owner, O) {
1116 q_updateTokensFromResponse;
1120 transition(I, Writeback_Owned, O) {
1122 q_updateTokensFromResponse;
1123 h_updateFilterFromL1HintOrWB;
1127 transition(I, Data_All_Tokens, M) {
1129 q_updateTokensFromResponse;
1134 transition(I, Writeback_All_Tokens, M) {
1136 q_updateTokensFromResponse;
1137 h_updateFilterFromL1HintOrWB;
1141 // Transitions from Shared
1143 transition(S, L2_Replacement, I) {
1145 rr_deallocateL2CacheBlock;
1148 transition(S, Transient_GETX, I) {
1150 t_sendAckWithCollectedTokens;
1151 j_forwardTransientRequestToLocalSharers;
1155 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1156 j_forwardTransientRequestToLocalSharers;
1161 transition(S, Persistent_GETX, I_L) {
1162 e_sendAckWithCollectedTokens;
1163 l_popPersistentQueue;
1167 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1168 f_sendAckWithAllButOneTokens;
1169 l_popPersistentQueue;
1173 transition(S, Ack) {
1174 q_updateTokensFromResponse;
1178 transition(S, Data_Shared) {
1179 w_assertIncomingDataAndCacheDataMatch;
1180 q_updateTokensFromResponse;
1184 transition(S, Writeback_Tokens) {
1185 q_updateTokensFromResponse;
1186 h_updateFilterFromL1HintOrWB;
1190 transition(S, Writeback_Shared_Data) {
1191 w_assertIncomingDataAndCacheDataMatch;
1192 q_updateTokensFromResponse;
1193 h_updateFilterFromL1HintOrWB;
1198 transition(S, Data_Owner, O) {
1199 w_assertIncomingDataAndCacheDataMatch;
1200 q_updateTokensFromResponse;
1204 transition(S, Writeback_Owned, O) {
1205 w_assertIncomingDataAndCacheDataMatch;
1206 q_updateTokensFromResponse;
1207 h_updateFilterFromL1HintOrWB;
1211 transition(S, Data_All_Tokens, M) {
1212 w_assertIncomingDataAndCacheDataMatch;
1213 q_updateTokensFromResponse;
1217 transition(S, Writeback_All_Tokens, M) {
1218 w_assertIncomingDataAndCacheDataMatch;
1219 q_updateTokensFromResponse;
1220 h_updateFilterFromL1HintOrWB;
1224 transition(S, L1_GETX, I) {
1225 a_broadcastLocalRequest;
1226 tt_sendLocalAckWithCollectedTokens;
1230 o_popL1RequestQueue;
1234 transition(S, L1_GETS) {
1235 k_dataFromL2CacheToL1Requestor;
1239 o_popL1RequestQueue;
1242 transition(S, L1_GETS_Last_Token, I) {
1244 k_dataFromL2CacheToL1Requestor;
1248 o_popL1RequestQueue;
1251 // Transitions from Owned
1253 transition(O, L2_Replacement, I) {
1254 cc_dirtyReplacement;
1255 rr_deallocateL2CacheBlock;
1258 transition(O, Transient_GETX, I) {
1260 dd_sendDataWithAllTokens;
1261 j_forwardTransientRequestToLocalSharers;
1265 transition(O, Persistent_GETX, I_L) {
1266 ee_sendDataWithAllTokens;
1267 l_popPersistentQueue;
1270 transition(O, Persistent_GETS, S_L) {
1271 ff_sendDataWithAllButOneTokens;
1272 l_popPersistentQueue;
1275 transition(O, Persistent_GETS_Last_Token, I_L) {
1276 fa_sendDataWithAllTokens;
1277 l_popPersistentQueue;
1280 transition(O, Transient_GETS) {
1281 // send multiple tokens
1283 d_sendDataWithTokens;
1287 transition(O, Transient_GETS_Last_Token) {
1288 // WAIT FOR IT TO GO PERSISTENT
1293 transition(O, Ack) {
1294 q_updateTokensFromResponse;
1298 transition(O, Ack_All_Tokens, M) {
1299 q_updateTokensFromResponse;
1303 transition(O, Data_Shared) {
1304 w_assertIncomingDataAndCacheDataMatch;
1305 q_updateTokensFromResponse;
1310 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1311 w_assertIncomingDataAndCacheDataMatch;
1312 q_updateTokensFromResponse;
1313 h_updateFilterFromL1HintOrWB;
1317 transition(O, Data_All_Tokens, M) {
1318 w_assertIncomingDataAndCacheDataMatch;
1319 q_updateTokensFromResponse;
1323 transition(O, Writeback_All_Tokens, M) {
1324 w_assertIncomingDataAndCacheDataMatch;
1325 q_updateTokensFromResponse;
1326 h_updateFilterFromL1HintOrWB;
1330 transition(O, L1_GETS) {
1331 k_dataFromL2CacheToL1Requestor;
1335 o_popL1RequestQueue;
1338 transition(O, L1_GETS_Last_Token, I) {
1339 k_dataOwnerFromL2CacheToL1Requestor;
1343 o_popL1RequestQueue;
1346 transition(O, L1_GETX, I) {
1347 a_broadcastLocalRequest;
1348 k_dataAndAllTokensFromL2CacheToL1Requestor;
1352 o_popL1RequestQueue;
1355 // Transitions from M
1357 transition(M, L2_Replacement, I) {
1358 cc_dirtyReplacement;
1359 rr_deallocateL2CacheBlock;
1362 // MRM_DEBUG: Give up all tokens even for GETS? ???
1363 transition(M, {Transient_GETX, Transient_GETS}, I) {
1365 dd_sendDataWithAllTokens;
1369 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1370 ee_sendDataWithAllTokens;
1371 l_popPersistentQueue;
1375 transition(M, L1_GETS, O) {
1376 k_dataFromL2CacheToL1Requestor;
1380 o_popL1RequestQueue;
1383 transition(M, L1_GETX, I) {
1384 k_dataAndAllTokensFromL2CacheToL1Requestor;
1388 o_popL1RequestQueue;
1392 //Transitions from locked states
1394 transition({I_L, S_L}, Ack) {
1395 gg_bounceResponseToStarver;
1399 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1400 gg_bounceResponseToStarver;
1404 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1405 gg_bounceWBSharedToStarver;
1406 h_updateFilterFromL1HintOrWB;
1410 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1411 gg_bounceWBOwnedToStarver;
1412 h_updateFilterFromL1HintOrWB;
1416 transition(S_L, L2_Replacement, I) {
1418 rr_deallocateL2CacheBlock;
1421 transition(I_L, L2_Replacement, I) {
1422 rr_deallocateL2CacheBlock;
1425 transition(I_L, Own_Lock_or_Unlock, I) {
1426 l_popPersistentQueue;
1429 transition(S_L, Own_Lock_or_Unlock, S) {
1430 l_popPersistentQueue;
1433 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1438 transition(I_L, {L1_GETX, L1_GETS}) {
1439 a_broadcastLocalRequest;
1442 o_popL1RequestQueue;
1445 transition(S_L, L1_GETX, I_L) {
1446 a_broadcastLocalRequest;
1447 tt_sendLocalAckWithCollectedTokens;
1451 o_popL1RequestQueue;
1454 transition(S_L, L1_GETS) {
1455 k_dataFromL2CacheToL1Requestor;
1459 o_popL1RequestQueue;
1462 transition(S_L, L1_GETS_Last_Token, I_L) {
1463 k_dataFromL2CacheToL1Requestor;
1467 o_popL1RequestQueue;
1470 transition(S_L, Persistent_GETX, I_L) {
1471 e_sendAckWithCollectedTokens;
1472 l_popPersistentQueue;
1475 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1476 l_popPersistentQueue;
1479 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1480 l_popPersistentQueue;