style: eliminate equality tests with true and false
[gem5.git] / src / mem / protocol / MOESI_CMP_token-L2cache.sm
1 /*
2 * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 machine(L2Cache, "Token protocol")
30 : CacheMemory * L2cache,
31 int N_tokens,
32 Cycles l2_request_latency = 5,
33 Cycles l2_response_latency = 5,
34 bool filtering_enabled = true
35 {
36
37 // L2 BANK QUEUES
38 // From local bank of L2 cache TO the network
39
40 // this L2 bank -> a local L1 || mod-directory
41 MessageBuffer responseFromL2Cache, network="To", virtual_network="4", ordered="false", vnet_type="response";
42 // this L2 bank -> mod-directory
43 MessageBuffer GlobalRequestFromL2Cache, network="To", virtual_network="2", ordered="false", vnet_type="request";
44 // this L2 bank -> a local L1
45 MessageBuffer L1RequestFromL2Cache, network="To", virtual_network="1", ordered="false", vnet_type="request";
46
47
48 // FROM the network to this local bank of L2 cache
49
50 // a local L1 || mod-directory -> this L2 bank
51 MessageBuffer responseToL2Cache, network="From", virtual_network="4", ordered="false", vnet_type="response";
52 MessageBuffer persistentToL2Cache, network="From", virtual_network="3", ordered="true", vnet_type="persistent";
53 // mod-directory -> this L2 bank
54 MessageBuffer GlobalRequestToL2Cache, network="From", virtual_network="2", ordered="false", vnet_type="request";
55 // a local L1 -> this L2 bank
56 MessageBuffer L1RequestToL2Cache, network="From", virtual_network="1", ordered="false", vnet_type="request";
57
58 // STATES
59 state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") {
60 // Base states
61 NP, AccessPermission:Invalid, desc="Not Present";
62 I, AccessPermission:Invalid, desc="Idle";
63 S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s";
64 O, AccessPermission:Read_Only, desc="Owned, not present in any L1s";
65 M, AccessPermission:Read_Write, desc="Modified, not present in any L1s";
66
67 // Locked states
68 I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked";
69 S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked";
70 }
71
72 // EVENTS
73 enumeration(Event, desc="Cache events") {
74
75 // Requests
76 L1_GETS, desc="local L1 GETS request";
77 L1_GETS_Last_Token, desc="local L1 GETS request";
78 L1_GETX, desc="local L1 GETX request";
79 L1_INV, desc="L1 no longer has tokens";
80 Transient_GETX, desc="A GetX from another processor";
81 Transient_GETS, desc="A GetS from another processor";
82 Transient_GETS_Last_Token, desc="A GetS from another processor";
83
84 // events initiated by this L2
85 L2_Replacement, desc="L2 Replacement", format="!r";
86
87 // events of external L2 responses
88
89 // Responses
90 Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)";
91 Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data";
92 Writeback_All_Tokens, desc="Received a writeback from L1";
93 Writeback_Owned, desc="Received a writeback from L1";
94
95
96 Data_Shared, desc="Received a data message, we are now a sharer";
97 Data_Owner, desc="Received a data message, we are now the owner";
98 Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens";
99 Ack, desc="Received an ack message";
100 Ack_All_Tokens, desc="Received an ack message, we now have all the tokens";
101
102 // Lock/Unlock
103 Persistent_GETX, desc="Another processor has priority to read/write";
104 Persistent_GETS, desc="Another processor has priority to read";
105 Persistent_GETS_Last_Token, desc="Another processor has priority to read";
106 Own_Lock_or_Unlock, desc="This processor now has priority";
107 }
108
109 // TYPES
110
111 // CacheEntry
112 structure(Entry, desc="...", interface="AbstractCacheEntry") {
113 State CacheState, desc="cache state";
114 bool Dirty, desc="Is the data dirty (different than memory)?";
115 int Tokens, desc="The number of tokens we're holding for the line";
116 DataBlock DataBlk, desc="data for the block";
117 }
118
119 structure(DirEntry, desc="...") {
120 Set Sharers, desc="Set of the internal processors that want the block in shared state";
121 bool exclusive, default="false", desc="if local exclusive is likely";
122 }
123
124 structure(PerfectCacheMemory, external="yes") {
125 void allocate(Address);
126 void deallocate(Address);
127 DirEntry lookup(Address);
128 bool isTagPresent(Address);
129 }
130
131 structure(PersistentTable, external="yes") {
132 void persistentRequestLock(Address, MachineID, AccessType);
133 void persistentRequestUnlock(Address, MachineID);
134 MachineID findSmallest(Address);
135 AccessType typeOfSmallest(Address);
136 void markEntries(Address);
137 bool isLocked(Address);
138 int countStarvingForAddress(Address);
139 int countReadStarvingForAddress(Address);
140 }
141
142 PersistentTable persistentTable;
143 PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
144
145 void set_cache_entry(AbstractCacheEntry b);
146 void unset_cache_entry();
147
148 Entry getCacheEntry(Address address), return_by_pointer="yes" {
149 Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address));
150 return cache_entry;
151 }
152
153 DataBlock getDataBlock(Address addr), return_by_ref="yes" {
154 return getCacheEntry(addr).DataBlk;
155 }
156
157 int getTokens(Entry cache_entry) {
158 if (is_valid(cache_entry)) {
159 return cache_entry.Tokens;
160 } else {
161 return 0;
162 }
163 }
164
165 State getState(Entry cache_entry, Address addr) {
166 if (is_valid(cache_entry)) {
167 return cache_entry.CacheState;
168 } else if (persistentTable.isLocked(addr)) {
169 return State:I_L;
170 } else {
171 return State:NP;
172 }
173 }
174
175 void setState(Entry cache_entry, Address addr, State state) {
176
177 if (is_valid(cache_entry)) {
178 // Make sure the token count is in range
179 assert(cache_entry.Tokens >= 0);
180 assert(cache_entry.Tokens <= max_tokens());
181 assert(cache_entry.Tokens != (max_tokens() / 2));
182
183 // Make sure we have no tokens in L
184 if ((state == State:I_L) ) {
185 assert(cache_entry.Tokens == 0);
186 }
187
188 // in M and E you have all the tokens
189 if (state == State:M ) {
190 assert(cache_entry.Tokens == max_tokens());
191 }
192
193 // in NP you have no tokens
194 if (state == State:NP) {
195 assert(cache_entry.Tokens == 0);
196 }
197
198 // You have at least one token in S-like states
199 if (state == State:S ) {
200 assert(cache_entry.Tokens > 0);
201 }
202
203 // You have at least half the token in O-like states
204 if (state == State:O ) {
205 assert(cache_entry.Tokens > (max_tokens() / 2));
206 }
207
208 cache_entry.CacheState := state;
209 }
210 }
211
212 AccessPermission getAccessPermission(Address addr) {
213 Entry cache_entry := getCacheEntry(addr);
214 if(is_valid(cache_entry)) {
215 return L2Cache_State_to_permission(cache_entry.CacheState);
216 }
217
218 return AccessPermission:NotPresent;
219 }
220
221 void setAccessPermission(Entry cache_entry, Address addr, State state) {
222 if (is_valid(cache_entry)) {
223 cache_entry.changePermission(L2Cache_State_to_permission(state));
224 }
225 }
226
227 void removeSharer(Address addr, NodeID id) {
228
229 if (localDirectory.isTagPresent(addr)) {
230 localDirectory[addr].Sharers.remove(id);
231 if (localDirectory[addr].Sharers.count() == 0) {
232 localDirectory.deallocate(addr);
233 }
234 }
235 }
236
237 bool sharersExist(Address addr) {
238 if (localDirectory.isTagPresent(addr)) {
239 if (localDirectory[addr].Sharers.count() > 0) {
240 return true;
241 }
242 else {
243 return false;
244 }
245 }
246 else {
247 return false;
248 }
249 }
250
251 bool exclusiveExists(Address addr) {
252 if (localDirectory.isTagPresent(addr)) {
253 if (localDirectory[addr].exclusive) {
254 return true;
255 }
256 else {
257 return false;
258 }
259 }
260 else {
261 return false;
262 }
263 }
264
265 // assumes that caller will check to make sure tag is present
266 Set getSharers(Address addr) {
267 return localDirectory[addr].Sharers;
268 }
269
270 void setNewWriter(Address addr, NodeID id) {
271 if (localDirectory.isTagPresent(addr) == false) {
272 localDirectory.allocate(addr);
273 }
274 localDirectory[addr].Sharers.clear();
275 localDirectory[addr].Sharers.add(id);
276 localDirectory[addr].exclusive := true;
277 }
278
279 void addNewSharer(Address addr, NodeID id) {
280 if (localDirectory.isTagPresent(addr) == false) {
281 localDirectory.allocate(addr);
282 }
283 localDirectory[addr].Sharers.add(id);
284 // localDirectory[addr].exclusive := false;
285 }
286
287 void clearExclusiveBitIfExists(Address addr) {
288 if (localDirectory.isTagPresent(addr)) {
289 localDirectory[addr].exclusive := false;
290 }
291 }
292
293 // ** OUT_PORTS **
294 out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
295 out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
296 out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache);
297
298
299
300 // ** IN_PORTS **
301
302 // Persistent Network
303 in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
304 if (persistentNetwork_in.isReady()) {
305 peek(persistentNetwork_in, PersistentMsg) {
306 assert(in_msg.Destination.isElement(machineID));
307
308 if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
309 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Write);
310 } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
311 persistentTable.persistentRequestLock(in_msg.Addr, in_msg.Requestor, AccessType:Read);
312 } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
313 persistentTable.persistentRequestUnlock(in_msg.Addr, in_msg.Requestor);
314 } else {
315 error("Unexpected message");
316 }
317
318 Entry cache_entry := getCacheEntry(in_msg.Addr);
319 // React to the message based on the current state of the table
320 if (persistentTable.isLocked(in_msg.Addr)) {
321
322 if (persistentTable.typeOfSmallest(in_msg.Addr) == AccessType:Read) {
323 if (getTokens(cache_entry) == 1 ||
324 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
325 trigger(Event:Persistent_GETS_Last_Token, in_msg.Addr,
326 cache_entry);
327 } else {
328 trigger(Event:Persistent_GETS, in_msg.Addr, cache_entry);
329 }
330 } else {
331 trigger(Event:Persistent_GETX, in_msg.Addr, cache_entry);
332 }
333 }
334 else {
335 trigger(Event:Own_Lock_or_Unlock, in_msg.Addr, cache_entry);
336 }
337 }
338 }
339 }
340
341
342 // Request Network
343 in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
344 if (requestNetwork_in.isReady()) {
345 peek(requestNetwork_in, RequestMsg) {
346 assert(in_msg.Destination.isElement(machineID));
347
348 Entry cache_entry := getCacheEntry(in_msg.Addr);
349 if (in_msg.Type == CoherenceRequestType:GETX) {
350 trigger(Event:Transient_GETX, in_msg.Addr, cache_entry);
351 } else if (in_msg.Type == CoherenceRequestType:GETS) {
352 if (getTokens(cache_entry) == 1) {
353 trigger(Event:Transient_GETS_Last_Token, in_msg.Addr,
354 cache_entry);
355 }
356 else {
357 trigger(Event:Transient_GETS, in_msg.Addr, cache_entry);
358 }
359 } else {
360 error("Unexpected message");
361 }
362 }
363 }
364 }
365
366 in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
367 if (L1requestNetwork_in.isReady()) {
368 peek(L1requestNetwork_in, RequestMsg) {
369 assert(in_msg.Destination.isElement(machineID));
370 Entry cache_entry := getCacheEntry(in_msg.Addr);
371 if (in_msg.Type == CoherenceRequestType:GETX) {
372 trigger(Event:L1_GETX, in_msg.Addr, cache_entry);
373 } else if (in_msg.Type == CoherenceRequestType:GETS) {
374 if (getTokens(cache_entry) == 1 ||
375 getTokens(cache_entry) == (max_tokens() / 2) + 1) {
376 trigger(Event:L1_GETS_Last_Token, in_msg.Addr, cache_entry);
377 }
378 else {
379 trigger(Event:L1_GETS, in_msg.Addr, cache_entry);
380 }
381 } else {
382 error("Unexpected message");
383 }
384 }
385 }
386 }
387
388
389 // Response Network
390 in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
391 if (responseNetwork_in.isReady()) {
392 peek(responseNetwork_in, ResponseMsg) {
393 assert(in_msg.Destination.isElement(machineID));
394 Entry cache_entry := getCacheEntry(in_msg.Addr);
395
396 if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) {
397 if (in_msg.Type == CoherenceResponseType:ACK) {
398 assert(in_msg.Tokens < (max_tokens() / 2));
399 trigger(Event:Ack, in_msg.Addr, cache_entry);
400 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
401 trigger(Event:Data_Owner, in_msg.Addr, cache_entry);
402 } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) {
403 trigger(Event:Data_Shared, in_msg.Addr, cache_entry);
404 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
405 in_msg.Type == CoherenceResponseType:WB_OWNED ||
406 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
407
408 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
409
410 // either room is available or the block is already present
411
412 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
413 assert(in_msg.Dirty == false);
414 trigger(Event:Writeback_Tokens, in_msg.Addr, cache_entry);
415 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
416 assert(in_msg.Dirty == false);
417 trigger(Event:Writeback_Shared_Data, in_msg.Addr, cache_entry);
418 }
419 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
420 //assert(in_msg.Dirty == false);
421 trigger(Event:Writeback_Owned, in_msg.Addr, cache_entry);
422 }
423 }
424 else {
425 trigger(Event:L2_Replacement,
426 L2cache.cacheProbe(in_msg.Addr),
427 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
428 }
429 } else if (in_msg.Type == CoherenceResponseType:INV) {
430 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
431 } else {
432 error("Unexpected message");
433 }
434 } else {
435 if (in_msg.Type == CoherenceResponseType:ACK) {
436 assert(in_msg.Tokens < (max_tokens() / 2));
437 trigger(Event:Ack_All_Tokens, in_msg.Addr, cache_entry);
438 } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER ||
439 in_msg.Type == CoherenceResponseType:DATA_SHARED) {
440 trigger(Event:Data_All_Tokens, in_msg.Addr, cache_entry);
441 } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS ||
442 in_msg.Type == CoherenceResponseType:WB_OWNED ||
443 in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
444 if (L2cache.cacheAvail(in_msg.Addr) || is_valid(cache_entry)) {
445
446 // either room is available or the block is already present
447
448 if (in_msg.Type == CoherenceResponseType:WB_TOKENS) {
449 assert(in_msg.Dirty == false);
450 assert( (getState(cache_entry, in_msg.Addr) != State:NP)
451 && (getState(cache_entry, in_msg.Addr) != State:I) );
452 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
453 } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
454 assert(in_msg.Dirty == false);
455 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
456 }
457 else if (in_msg.Type == CoherenceResponseType:WB_OWNED) {
458 trigger(Event:Writeback_All_Tokens, in_msg.Addr, cache_entry);
459 }
460 }
461 else {
462 trigger(Event:L2_Replacement,
463 L2cache.cacheProbe(in_msg.Addr),
464 getCacheEntry(L2cache.cacheProbe(in_msg.Addr)));
465 }
466 } else if (in_msg.Type == CoherenceResponseType:INV) {
467 trigger(Event:L1_INV, in_msg.Addr, cache_entry);
468 } else {
469 DPRINTF(RubySlicc, "%s\n", in_msg.Type);
470 error("Unexpected message");
471 }
472 }
473 }
474 }
475 }
476
477
478 // ACTIONS
479
480 action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") {
481
482 peek(L1requestNetwork_in, RequestMsg) {
483
484 // if this is a retry or no local sharers, broadcast normally
485 enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) {
486 out_msg.Addr := in_msg.Addr;
487 out_msg.Type := in_msg.Type;
488 out_msg.Requestor := in_msg.Requestor;
489 out_msg.RetryNum := in_msg.RetryNum;
490
491 //
492 // If a statically shared L2 cache, then no other L2 caches can
493 // store the block
494 //
495 //out_msg.Destination.broadcast(MachineType:L2Cache);
496 //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address));
497 //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor));
498
499 out_msg.Destination.add(map_Address_to_Directory(address));
500 out_msg.MessageSize := MessageSizeType:Request_Control;
501 out_msg.AccessMode := in_msg.AccessMode;
502 out_msg.Prefetch := in_msg.Prefetch;
503 } //enqueue
504 // } // if
505
506 //profile_filter_action(0);
507 } // peek
508 } //action
509
510
511 action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") {
512 peek(responseNetwork_in, ResponseMsg) {
513 // FIXME, should use a 3rd vnet
514 enqueue(responseNetwork_out, ResponseMsg, 1) {
515 out_msg.Addr := address;
516 out_msg.Type := in_msg.Type;
517 out_msg.Sender := machineID;
518 out_msg.Destination.add(map_Address_to_Directory(address));
519 out_msg.Tokens := in_msg.Tokens;
520 out_msg.MessageSize := in_msg.MessageSize;
521 out_msg.DataBlk := in_msg.DataBlk;
522 out_msg.Dirty := in_msg.Dirty;
523 }
524 }
525 }
526
527 action(c_cleanReplacement, "c", desc="Issue clean writeback") {
528 assert(is_valid(cache_entry));
529 if (cache_entry.Tokens > 0) {
530 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
531 out_msg.Addr := address;
532 out_msg.Type := CoherenceResponseType:ACK;
533 out_msg.Sender := machineID;
534 out_msg.Destination.add(map_Address_to_Directory(address));
535 out_msg.Tokens := cache_entry.Tokens;
536 out_msg.MessageSize := MessageSizeType:Writeback_Control;
537 }
538 cache_entry.Tokens := 0;
539 }
540 }
541
542 action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") {
543 assert(is_valid(cache_entry));
544 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
545 out_msg.Addr := address;
546 out_msg.Sender := machineID;
547 out_msg.Destination.add(map_Address_to_Directory(address));
548 out_msg.Tokens := cache_entry.Tokens;
549 out_msg.DataBlk := cache_entry.DataBlk;
550 out_msg.Dirty := cache_entry.Dirty;
551
552 if (cache_entry.Dirty) {
553 out_msg.MessageSize := MessageSizeType:Writeback_Data;
554 out_msg.Type := CoherenceResponseType:DATA_OWNER;
555 } else {
556 out_msg.MessageSize := MessageSizeType:Writeback_Control;
557 out_msg.Type := CoherenceResponseType:ACK_OWNER;
558 }
559 }
560 cache_entry.Tokens := 0;
561 }
562
563 action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") {
564 peek(requestNetwork_in, RequestMsg) {
565 assert(is_valid(cache_entry));
566 if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) {
567 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
568 out_msg.Addr := address;
569 out_msg.Type := CoherenceResponseType:DATA_SHARED;
570 out_msg.Sender := machineID;
571 out_msg.Destination.add(in_msg.Requestor);
572 out_msg.Tokens := N_tokens;
573 out_msg.DataBlk := cache_entry.DataBlk;
574 out_msg.Dirty := false;
575 out_msg.MessageSize := MessageSizeType:Response_Data;
576 }
577 cache_entry.Tokens := cache_entry.Tokens - N_tokens;
578 }
579 else {
580 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
581 out_msg.Addr := address;
582 out_msg.Type := CoherenceResponseType:DATA_SHARED;
583 out_msg.Sender := machineID;
584 out_msg.Destination.add(in_msg.Requestor);
585 out_msg.Tokens := 1;
586 out_msg.DataBlk := cache_entry.DataBlk;
587 out_msg.Dirty := false;
588 out_msg.MessageSize := MessageSizeType:Response_Data;
589 }
590 cache_entry.Tokens := cache_entry.Tokens - 1;
591 }
592 }
593 }
594
595 action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") {
596 assert(is_valid(cache_entry));
597 peek(requestNetwork_in, RequestMsg) {
598 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
599 out_msg.Addr := address;
600 out_msg.Type := CoherenceResponseType:DATA_OWNER;
601 out_msg.Sender := machineID;
602 out_msg.Destination.add(in_msg.Requestor);
603 assert(cache_entry.Tokens >= 1);
604 out_msg.Tokens := cache_entry.Tokens;
605 out_msg.DataBlk := cache_entry.DataBlk;
606 out_msg.Dirty := cache_entry.Dirty;
607 out_msg.MessageSize := MessageSizeType:Response_Data;
608 }
609 }
610 cache_entry.Tokens := 0;
611 }
612
613 action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") {
614 assert(is_valid(cache_entry));
615 if (cache_entry.Tokens > 0) {
616 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
617 out_msg.Addr := address;
618 out_msg.Type := CoherenceResponseType:ACK;
619 out_msg.Sender := machineID;
620 out_msg.Destination.add(persistentTable.findSmallest(address));
621 assert(cache_entry.Tokens >= 1);
622 out_msg.Tokens := cache_entry.Tokens;
623 out_msg.MessageSize := MessageSizeType:Response_Control;
624 }
625 }
626 cache_entry.Tokens := 0;
627 }
628
629 action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") {
630 assert(is_valid(cache_entry));
631 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
632 out_msg.Addr := address;
633 out_msg.Type := CoherenceResponseType:DATA_OWNER;
634 out_msg.Sender := machineID;
635 out_msg.Destination.add(persistentTable.findSmallest(address));
636 assert(cache_entry.Tokens >= 1);
637 out_msg.Tokens := cache_entry.Tokens;
638 out_msg.DataBlk := cache_entry.DataBlk;
639 out_msg.Dirty := cache_entry.Dirty;
640 out_msg.MessageSize := MessageSizeType:Response_Data;
641 }
642 cache_entry.Tokens := 0;
643 }
644
645 action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") {
646 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
647 assert(is_valid(cache_entry));
648 assert(cache_entry.Tokens > 0);
649 if (cache_entry.Tokens > 1) {
650 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
651 out_msg.Addr := address;
652 out_msg.Type := CoherenceResponseType:ACK;
653 out_msg.Sender := machineID;
654 out_msg.Destination.add(persistentTable.findSmallest(address));
655 assert(cache_entry.Tokens >= 1);
656 out_msg.Tokens := cache_entry.Tokens - 1;
657 out_msg.MessageSize := MessageSizeType:Response_Control;
658 }
659 }
660 cache_entry.Tokens := 1;
661 }
662
663 action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") {
664 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
665 assert(is_valid(cache_entry));
666 assert(cache_entry.Tokens > (max_tokens() / 2) + 1);
667 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
668 out_msg.Addr := address;
669 out_msg.Type := CoherenceResponseType:DATA_OWNER;
670 out_msg.Sender := machineID;
671 out_msg.Destination.add(persistentTable.findSmallest(address));
672 out_msg.Tokens := cache_entry.Tokens - 1;
673 out_msg.DataBlk := cache_entry.DataBlk;
674 out_msg.Dirty := cache_entry.Dirty;
675 out_msg.MessageSize := MessageSizeType:Response_Data;
676 }
677 cache_entry.Tokens := 1;
678 }
679
680 action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") {
681 //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself
682 assert(is_valid(cache_entry));
683 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
684 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
685 out_msg.Addr := address;
686 out_msg.Type := CoherenceResponseType:DATA_OWNER;
687 out_msg.Sender := machineID;
688 out_msg.Destination.add(persistentTable.findSmallest(address));
689 out_msg.Tokens := cache_entry.Tokens;
690 out_msg.DataBlk := cache_entry.DataBlk;
691 out_msg.Dirty := cache_entry.Dirty;
692 out_msg.MessageSize := MessageSizeType:Response_Data;
693 }
694 cache_entry.Tokens := 0;
695 }
696
697
698
699 action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") {
700 // assert(persistentTable.isLocked(address));
701 peek(responseNetwork_in, ResponseMsg) {
702 // FIXME, should use a 3rd vnet in some cases
703 enqueue(responseNetwork_out, ResponseMsg, 1) {
704 out_msg.Addr := address;
705 out_msg.Type := in_msg.Type;
706 out_msg.Sender := machineID;
707 out_msg.Destination.add(persistentTable.findSmallest(address));
708 out_msg.Tokens := in_msg.Tokens;
709 out_msg.DataBlk := in_msg.DataBlk;
710 out_msg.Dirty := in_msg.Dirty;
711 out_msg.MessageSize := in_msg.MessageSize;
712 }
713 }
714 }
715
716 action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") {
717 //assert(persistentTable.isLocked(address));
718 peek(responseNetwork_in, ResponseMsg) {
719 // FIXME, should use a 3rd vnet in some cases
720 enqueue(responseNetwork_out, ResponseMsg, 1) {
721 out_msg.Addr := address;
722 if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) {
723 out_msg.Type := CoherenceResponseType:DATA_SHARED;
724 } else {
725 assert(in_msg.Tokens < (max_tokens() / 2));
726 out_msg.Type := CoherenceResponseType:ACK;
727 }
728 out_msg.Sender := machineID;
729 out_msg.Destination.add(persistentTable.findSmallest(address));
730 out_msg.Tokens := in_msg.Tokens;
731 out_msg.DataBlk := in_msg.DataBlk;
732 out_msg.Dirty := in_msg.Dirty;
733 out_msg.MessageSize := in_msg.MessageSize;
734 }
735 }
736 }
737
738 action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") {
739 // assert(persistentTable.isLocked(address));
740 peek(responseNetwork_in, ResponseMsg) {
741 // FIXME, should use a 3rd vnet in some cases
742 enqueue(responseNetwork_out, ResponseMsg, 1) {
743 out_msg.Addr := address;
744 out_msg.Type := CoherenceResponseType:DATA_OWNER;
745 out_msg.Sender := machineID;
746 out_msg.Destination.add(persistentTable.findSmallest(address));
747 out_msg.Tokens := in_msg.Tokens;
748 out_msg.DataBlk := in_msg.DataBlk;
749 out_msg.Dirty := in_msg.Dirty;
750 out_msg.MessageSize := in_msg.MessageSize;
751 }
752 }
753 }
754
755
756 action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") {
757 peek(responseNetwork_in, ResponseMsg) {
758 removeSharer(in_msg.Addr, machineIDToNodeID(in_msg.Sender));
759 }
760 }
761
762 action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") {
763 peek(requestNetwork_in, RequestMsg) {
764 if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.Addr) == false) {
765 //profile_filter_action(1);
766 DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n",
767 in_msg.RetryNum);
768 }
769 else {
770 enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) {
771 out_msg.Addr := in_msg.Addr;
772 out_msg.Requestor := in_msg.Requestor;
773
774 //
775 // Currently assuming only one chip so all L1s are local
776 //
777 //out_msg.Destination := getLocalL1IDs(machineID);
778 out_msg.Destination.broadcast(MachineType:L1Cache);
779 out_msg.Destination.remove(in_msg.Requestor);
780
781 out_msg.Type := in_msg.Type;
782 out_msg.isLocal := false;
783 out_msg.MessageSize := MessageSizeType:Broadcast_Control;
784 out_msg.AccessMode := in_msg.AccessMode;
785 out_msg.Prefetch := in_msg.Prefetch;
786 }
787 //profile_filter_action(0);
788 }
789 }
790 }
791
792 action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") {
793 peek(L1requestNetwork_in, RequestMsg) {
794 assert(is_valid(cache_entry));
795 assert(cache_entry.Tokens > 0);
796 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
797 out_msg.Addr := address;
798 out_msg.Type := CoherenceResponseType:DATA_SHARED;
799 out_msg.Sender := machineID;
800 out_msg.Destination.add(in_msg.Requestor);
801 out_msg.DataBlk := cache_entry.DataBlk;
802 out_msg.Dirty := false;
803 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
804 out_msg.Tokens := 1;
805 }
806 cache_entry.Tokens := cache_entry.Tokens - 1;
807 }
808 }
809
810 action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") {
811 peek(L1requestNetwork_in, RequestMsg) {
812 assert(is_valid(cache_entry));
813 assert(cache_entry.Tokens == (max_tokens() / 2) + 1);
814 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
815 out_msg.Addr := address;
816 out_msg.Type := CoherenceResponseType:DATA_OWNER;
817 out_msg.Sender := machineID;
818 out_msg.Destination.add(in_msg.Requestor);
819 out_msg.DataBlk := cache_entry.DataBlk;
820 out_msg.Dirty := cache_entry.Dirty;
821 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
822 out_msg.Tokens := cache_entry.Tokens;
823 }
824 cache_entry.Tokens := 0;
825 }
826 }
827
828 action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") {
829 peek(L1requestNetwork_in, RequestMsg) {
830 assert(is_valid(cache_entry));
831 // assert(cache_entry.Tokens == max_tokens());
832 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
833 out_msg.Addr := address;
834 out_msg.Type := CoherenceResponseType:DATA_OWNER;
835 out_msg.Sender := machineID;
836 out_msg.Destination.add(in_msg.Requestor);
837 out_msg.DataBlk := cache_entry.DataBlk;
838 out_msg.Dirty := cache_entry.Dirty;
839 out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data;
840 //out_msg.Tokens := max_tokens();
841 out_msg.Tokens := cache_entry.Tokens;
842 }
843 cache_entry.Tokens := 0;
844 }
845 }
846
847 action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
848 persistentNetwork_in.dequeue();
849 }
850
851 action(m_popRequestQueue, "m", desc="Pop request queue.") {
852 requestNetwork_in.dequeue();
853 }
854
855 action(n_popResponseQueue, "n", desc="Pop response queue") {
856 responseNetwork_in.dequeue();
857 }
858
859 action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
860 L1requestNetwork_in.dequeue();
861 }
862
863
864 action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") {
865 peek(responseNetwork_in, ResponseMsg) {
866 assert(is_valid(cache_entry));
867 assert(in_msg.Tokens != 0);
868 cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens;
869
870 // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens
871 // may not trigger this action.
872 if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) {
873 cache_entry.Dirty := true;
874 }
875 }
876 }
877
878 action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") {
879 peek(L1requestNetwork_in, RequestMsg) {
880 if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) {
881 if (in_msg.Type == CoherenceRequestType:GETX) {
882 setNewWriter(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
883 } else if (in_msg.Type == CoherenceRequestType:GETS) {
884 addNewSharer(in_msg.Addr, machineIDToNodeID(in_msg.Requestor));
885 }
886 }
887 }
888 }
889
890 action(r_clearExclusive, "\rrr", desc="clear exclusive bit") {
891 clearExclusiveBitIfExists(address);
892 }
893
894 action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) {
895 peek(L1requestNetwork_in, RequestMsg) {
896 if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) &&
897 (is_valid(cache_entry))) {
898 L2cache.setMRU(address);
899 }
900 }
901 }
902
903 action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") {
904 assert(is_valid(cache_entry));
905 if (cache_entry.Tokens > 0) {
906 peek(requestNetwork_in, RequestMsg) {
907 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
908 out_msg.Addr := address;
909 out_msg.Type := CoherenceResponseType:ACK;
910 out_msg.Sender := machineID;
911 out_msg.Destination.add(in_msg.Requestor);
912 assert(cache_entry.Tokens >= 1);
913 out_msg.Tokens := cache_entry.Tokens;
914 out_msg.MessageSize := MessageSizeType:Response_Control;
915 }
916 }
917 }
918 cache_entry.Tokens := 0;
919 }
920
921 action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") {
922 assert(is_valid(cache_entry));
923 if (cache_entry.Tokens > 0) {
924 peek(L1requestNetwork_in, RequestMsg) {
925 enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) {
926 out_msg.Addr := address;
927 out_msg.Type := CoherenceResponseType:ACK;
928 out_msg.Sender := machineID;
929 out_msg.Destination.add(in_msg.Requestor);
930 assert(cache_entry.Tokens >= 1);
931 out_msg.Tokens := cache_entry.Tokens;
932 out_msg.MessageSize := MessageSizeType:Response_Control;
933 }
934 }
935 }
936 cache_entry.Tokens := 0;
937 }
938
939 action(u_writeDataToCache, "u", desc="Write data to cache") {
940 peek(responseNetwork_in, ResponseMsg) {
941 assert(is_valid(cache_entry));
942 cache_entry.DataBlk := in_msg.DataBlk;
943 if ((cache_entry.Dirty == false) && in_msg.Dirty) {
944 cache_entry.Dirty := in_msg.Dirty;
945 }
946 }
947 }
948
949 action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
950 set_cache_entry(L2cache.allocate(address, new Entry));
951 }
952
953 action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
954 L2cache.deallocate(address);
955 unset_cache_entry();
956 }
957
958 action(uu_profileMiss, "\um", desc="Profile the demand miss") {
959 ++L2cache.demand_misses;
960 }
961
962 action(uu_profileHit, "\uh", desc="Profile the demand hit") {
963 ++L2cache.demand_hits;
964 }
965
966 action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
967 peek(responseNetwork_in, ResponseMsg) {
968 if (in_msg.Type != CoherenceResponseType:ACK &&
969 in_msg.Type != CoherenceResponseType:WB_TOKENS) {
970 assert(is_valid(cache_entry));
971 assert(cache_entry.DataBlk == in_msg.DataBlk);
972 }
973 }
974 }
975
976
977 //*****************************************************
978 // TRANSITIONS
979 //*****************************************************
980
981 transition({NP, I, S, O, M, I_L, S_L}, L1_INV) {
982
983 h_updateFilterFromL1HintOrWB;
984 n_popResponseQueue;
985 }
986
987 transition({NP, I, S, O, M}, Own_Lock_or_Unlock) {
988 l_popPersistentQueue;
989 }
990
991
992 // Transitions from NP
993
994 transition(NP, {Transient_GETX, Transient_GETS}) {
995 // forward message to local sharers
996 r_clearExclusive;
997 j_forwardTransientRequestToLocalSharers;
998 m_popRequestQueue;
999 }
1000
1001
1002 transition(NP, {L1_GETS, L1_GETX}) {
1003 a_broadcastLocalRequest;
1004 r_markNewSharer;
1005 uu_profileMiss;
1006 o_popL1RequestQueue;
1007 }
1008
1009 transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) {
1010 bb_bounceResponse;
1011 n_popResponseQueue;
1012 }
1013
1014 transition(NP, Writeback_Shared_Data, S) {
1015 vv_allocateL2CacheBlock;
1016 u_writeDataToCache;
1017 q_updateTokensFromResponse;
1018 h_updateFilterFromL1HintOrWB;
1019 n_popResponseQueue;
1020 }
1021
1022 transition(NP, Writeback_Tokens, I) {
1023 vv_allocateL2CacheBlock;
1024 q_updateTokensFromResponse;
1025 h_updateFilterFromL1HintOrWB;
1026 n_popResponseQueue;
1027 }
1028
1029 transition(NP, Writeback_All_Tokens, M) {
1030 vv_allocateL2CacheBlock;
1031 u_writeDataToCache;
1032 q_updateTokensFromResponse;
1033 h_updateFilterFromL1HintOrWB;
1034 n_popResponseQueue;
1035 }
1036
1037 transition(NP, Writeback_Owned, O) {
1038 vv_allocateL2CacheBlock;
1039 u_writeDataToCache;
1040 q_updateTokensFromResponse;
1041 h_updateFilterFromL1HintOrWB;
1042 n_popResponseQueue;
1043 }
1044
1045
1046 transition(NP,
1047 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1048 I_L) {
1049 l_popPersistentQueue;
1050 }
1051
1052 // Transitions from Idle
1053
1054 transition(I, {L1_GETS, L1_GETS_Last_Token}) {
1055 a_broadcastLocalRequest;
1056 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1057 r_markNewSharer;
1058 uu_profileMiss;
1059 o_popL1RequestQueue;
1060 }
1061
1062 transition(I, L1_GETX) {
1063 a_broadcastLocalRequest;
1064 tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected
1065 r_markNewSharer;
1066 uu_profileMiss;
1067 o_popL1RequestQueue;
1068 }
1069
1070 transition(I, L2_Replacement) {
1071 c_cleanReplacement; // Only needed in some cases
1072 rr_deallocateL2CacheBlock;
1073 }
1074
1075 transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) {
1076 r_clearExclusive;
1077 t_sendAckWithCollectedTokens;
1078 j_forwardTransientRequestToLocalSharers;
1079 m_popRequestQueue;
1080 }
1081
1082 transition(I,
1083 {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token},
1084 I_L) {
1085 e_sendAckWithCollectedTokens;
1086 l_popPersistentQueue;
1087 }
1088
1089
1090 transition(I, Ack) {
1091 q_updateTokensFromResponse;
1092 n_popResponseQueue;
1093 }
1094
1095 transition(I, Data_Shared, S) {
1096 u_writeDataToCache;
1097 q_updateTokensFromResponse;
1098 n_popResponseQueue;
1099 }
1100
1101 transition(I, Writeback_Shared_Data, S) {
1102 u_writeDataToCache;
1103 q_updateTokensFromResponse;
1104 h_updateFilterFromL1HintOrWB;
1105 n_popResponseQueue;
1106 }
1107
1108 transition(I, Writeback_Tokens) {
1109 q_updateTokensFromResponse;
1110 h_updateFilterFromL1HintOrWB;
1111 n_popResponseQueue;
1112 }
1113
1114 transition(I, Data_Owner, O) {
1115 u_writeDataToCache;
1116 q_updateTokensFromResponse;
1117 n_popResponseQueue;
1118 }
1119
1120 transition(I, Writeback_Owned, O) {
1121 u_writeDataToCache;
1122 q_updateTokensFromResponse;
1123 h_updateFilterFromL1HintOrWB;
1124 n_popResponseQueue;
1125 }
1126
1127 transition(I, Data_All_Tokens, M) {
1128 u_writeDataToCache;
1129 q_updateTokensFromResponse;
1130 n_popResponseQueue;
1131 }
1132
1133
1134 transition(I, Writeback_All_Tokens, M) {
1135 u_writeDataToCache;
1136 q_updateTokensFromResponse;
1137 h_updateFilterFromL1HintOrWB;
1138 n_popResponseQueue;
1139 }
1140
1141 // Transitions from Shared
1142
1143 transition(S, L2_Replacement, I) {
1144 c_cleanReplacement;
1145 rr_deallocateL2CacheBlock;
1146 }
1147
1148 transition(S, Transient_GETX, I) {
1149 r_clearExclusive;
1150 t_sendAckWithCollectedTokens;
1151 j_forwardTransientRequestToLocalSharers;
1152 m_popRequestQueue;
1153 }
1154
1155 transition(S, {Transient_GETS, Transient_GETS_Last_Token}) {
1156 j_forwardTransientRequestToLocalSharers;
1157 r_clearExclusive;
1158 m_popRequestQueue;
1159 }
1160
1161 transition(S, Persistent_GETX, I_L) {
1162 e_sendAckWithCollectedTokens;
1163 l_popPersistentQueue;
1164 }
1165
1166
1167 transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) {
1168 f_sendAckWithAllButOneTokens;
1169 l_popPersistentQueue;
1170 }
1171
1172
1173 transition(S, Ack) {
1174 q_updateTokensFromResponse;
1175 n_popResponseQueue;
1176 }
1177
1178 transition(S, Data_Shared) {
1179 w_assertIncomingDataAndCacheDataMatch;
1180 q_updateTokensFromResponse;
1181 n_popResponseQueue;
1182 }
1183
1184 transition(S, Writeback_Tokens) {
1185 q_updateTokensFromResponse;
1186 h_updateFilterFromL1HintOrWB;
1187 n_popResponseQueue;
1188 }
1189
1190 transition(S, Writeback_Shared_Data) {
1191 w_assertIncomingDataAndCacheDataMatch;
1192 q_updateTokensFromResponse;
1193 h_updateFilterFromL1HintOrWB;
1194 n_popResponseQueue;
1195 }
1196
1197
1198 transition(S, Data_Owner, O) {
1199 w_assertIncomingDataAndCacheDataMatch;
1200 q_updateTokensFromResponse;
1201 n_popResponseQueue;
1202 }
1203
1204 transition(S, Writeback_Owned, O) {
1205 w_assertIncomingDataAndCacheDataMatch;
1206 q_updateTokensFromResponse;
1207 h_updateFilterFromL1HintOrWB;
1208 n_popResponseQueue;
1209 }
1210
1211 transition(S, Data_All_Tokens, M) {
1212 w_assertIncomingDataAndCacheDataMatch;
1213 q_updateTokensFromResponse;
1214 n_popResponseQueue;
1215 }
1216
1217 transition(S, Writeback_All_Tokens, M) {
1218 w_assertIncomingDataAndCacheDataMatch;
1219 q_updateTokensFromResponse;
1220 h_updateFilterFromL1HintOrWB;
1221 n_popResponseQueue;
1222 }
1223
1224 transition(S, L1_GETX, I) {
1225 a_broadcastLocalRequest;
1226 tt_sendLocalAckWithCollectedTokens;
1227 r_markNewSharer;
1228 r_setMRU;
1229 uu_profileMiss;
1230 o_popL1RequestQueue;
1231 }
1232
1233
1234 transition(S, L1_GETS) {
1235 k_dataFromL2CacheToL1Requestor;
1236 r_markNewSharer;
1237 r_setMRU;
1238 uu_profileHit;
1239 o_popL1RequestQueue;
1240 }
1241
1242 transition(S, L1_GETS_Last_Token, I) {
1243
1244 k_dataFromL2CacheToL1Requestor;
1245 r_markNewSharer;
1246 r_setMRU;
1247 uu_profileHit;
1248 o_popL1RequestQueue;
1249 }
1250
1251 // Transitions from Owned
1252
1253 transition(O, L2_Replacement, I) {
1254 cc_dirtyReplacement;
1255 rr_deallocateL2CacheBlock;
1256 }
1257
1258 transition(O, Transient_GETX, I) {
1259 r_clearExclusive;
1260 dd_sendDataWithAllTokens;
1261 j_forwardTransientRequestToLocalSharers;
1262 m_popRequestQueue;
1263 }
1264
1265 transition(O, Persistent_GETX, I_L) {
1266 ee_sendDataWithAllTokens;
1267 l_popPersistentQueue;
1268 }
1269
1270 transition(O, Persistent_GETS, S_L) {
1271 ff_sendDataWithAllButOneTokens;
1272 l_popPersistentQueue;
1273 }
1274
1275 transition(O, Persistent_GETS_Last_Token, I_L) {
1276 fa_sendDataWithAllTokens;
1277 l_popPersistentQueue;
1278 }
1279
1280 transition(O, Transient_GETS) {
1281 // send multiple tokens
1282 r_clearExclusive;
1283 d_sendDataWithTokens;
1284 m_popRequestQueue;
1285 }
1286
1287 transition(O, Transient_GETS_Last_Token) {
1288 // WAIT FOR IT TO GO PERSISTENT
1289 r_clearExclusive;
1290 m_popRequestQueue;
1291 }
1292
1293 transition(O, Ack) {
1294 q_updateTokensFromResponse;
1295 n_popResponseQueue;
1296 }
1297
1298 transition(O, Ack_All_Tokens, M) {
1299 q_updateTokensFromResponse;
1300 n_popResponseQueue;
1301 }
1302
1303 transition(O, Data_Shared) {
1304 w_assertIncomingDataAndCacheDataMatch;
1305 q_updateTokensFromResponse;
1306 n_popResponseQueue;
1307 }
1308
1309
1310 transition(O, {Writeback_Tokens, Writeback_Shared_Data}) {
1311 w_assertIncomingDataAndCacheDataMatch;
1312 q_updateTokensFromResponse;
1313 h_updateFilterFromL1HintOrWB;
1314 n_popResponseQueue;
1315 }
1316
1317 transition(O, Data_All_Tokens, M) {
1318 w_assertIncomingDataAndCacheDataMatch;
1319 q_updateTokensFromResponse;
1320 n_popResponseQueue;
1321 }
1322
1323 transition(O, Writeback_All_Tokens, M) {
1324 w_assertIncomingDataAndCacheDataMatch;
1325 q_updateTokensFromResponse;
1326 h_updateFilterFromL1HintOrWB;
1327 n_popResponseQueue;
1328 }
1329
1330 transition(O, L1_GETS) {
1331 k_dataFromL2CacheToL1Requestor;
1332 r_markNewSharer;
1333 r_setMRU;
1334 uu_profileHit;
1335 o_popL1RequestQueue;
1336 }
1337
1338 transition(O, L1_GETS_Last_Token, I) {
1339 k_dataOwnerFromL2CacheToL1Requestor;
1340 r_markNewSharer;
1341 r_setMRU;
1342 uu_profileHit;
1343 o_popL1RequestQueue;
1344 }
1345
1346 transition(O, L1_GETX, I) {
1347 a_broadcastLocalRequest;
1348 k_dataAndAllTokensFromL2CacheToL1Requestor;
1349 r_markNewSharer;
1350 r_setMRU;
1351 uu_profileMiss;
1352 o_popL1RequestQueue;
1353 }
1354
1355 // Transitions from M
1356
1357 transition(M, L2_Replacement, I) {
1358 cc_dirtyReplacement;
1359 rr_deallocateL2CacheBlock;
1360 }
1361
1362 // MRM_DEBUG: Give up all tokens even for GETS? ???
1363 transition(M, {Transient_GETX, Transient_GETS}, I) {
1364 r_clearExclusive;
1365 dd_sendDataWithAllTokens;
1366 m_popRequestQueue;
1367 }
1368
1369 transition(M, {Persistent_GETS, Persistent_GETX}, I_L) {
1370 ee_sendDataWithAllTokens;
1371 l_popPersistentQueue;
1372 }
1373
1374
1375 transition(M, L1_GETS, O) {
1376 k_dataFromL2CacheToL1Requestor;
1377 r_markNewSharer;
1378 r_setMRU;
1379 uu_profileHit;
1380 o_popL1RequestQueue;
1381 }
1382
1383 transition(M, L1_GETX, I) {
1384 k_dataAndAllTokensFromL2CacheToL1Requestor;
1385 r_markNewSharer;
1386 r_setMRU;
1387 uu_profileHit;
1388 o_popL1RequestQueue;
1389 }
1390
1391
1392 //Transitions from locked states
1393
1394 transition({I_L, S_L}, Ack) {
1395 gg_bounceResponseToStarver;
1396 n_popResponseQueue;
1397 }
1398
1399 transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) {
1400 gg_bounceResponseToStarver;
1401 n_popResponseQueue;
1402 }
1403
1404 transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) {
1405 gg_bounceWBSharedToStarver;
1406 h_updateFilterFromL1HintOrWB;
1407 n_popResponseQueue;
1408 }
1409
1410 transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) {
1411 gg_bounceWBOwnedToStarver;
1412 h_updateFilterFromL1HintOrWB;
1413 n_popResponseQueue;
1414 }
1415
1416 transition(S_L, L2_Replacement, I) {
1417 c_cleanReplacement;
1418 rr_deallocateL2CacheBlock;
1419 }
1420
1421 transition(I_L, L2_Replacement, I) {
1422 rr_deallocateL2CacheBlock;
1423 }
1424
1425 transition(I_L, Own_Lock_or_Unlock, I) {
1426 l_popPersistentQueue;
1427 }
1428
1429 transition(S_L, Own_Lock_or_Unlock, S) {
1430 l_popPersistentQueue;
1431 }
1432
1433 transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) {
1434 r_clearExclusive;
1435 m_popRequestQueue;
1436 }
1437
1438 transition(I_L, {L1_GETX, L1_GETS}) {
1439 a_broadcastLocalRequest;
1440 r_markNewSharer;
1441 uu_profileMiss;
1442 o_popL1RequestQueue;
1443 }
1444
1445 transition(S_L, L1_GETX, I_L) {
1446 a_broadcastLocalRequest;
1447 tt_sendLocalAckWithCollectedTokens;
1448 r_markNewSharer;
1449 r_setMRU;
1450 uu_profileMiss;
1451 o_popL1RequestQueue;
1452 }
1453
1454 transition(S_L, L1_GETS) {
1455 k_dataFromL2CacheToL1Requestor;
1456 r_markNewSharer;
1457 r_setMRU;
1458 uu_profileHit;
1459 o_popL1RequestQueue;
1460 }
1461
1462 transition(S_L, L1_GETS_Last_Token, I_L) {
1463 k_dataFromL2CacheToL1Requestor;
1464 r_markNewSharer;
1465 r_setMRU;
1466 uu_profileHit;
1467 o_popL1RequestQueue;
1468 }
1469
1470 transition(S_L, Persistent_GETX, I_L) {
1471 e_sendAckWithCollectedTokens;
1472 l_popPersistentQueue;
1473 }
1474
1475 transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) {
1476 l_popPersistentQueue;
1477 }
1478
1479 transition(I_L, {Persistent_GETX, Persistent_GETS}) {
1480 l_popPersistentQueue;
1481 }
1482 }