style: eliminate equality tests with true and false
[gem5.git] / src / cpu / o3 / inst_queue_impl.hh
1 /*
2 * Copyright (c) 2011-2013 ARM Limited
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2004-2006 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Kevin Lim
42 * Korey Sewell
43 */
44
45 #ifndef __CPU_O3_INST_QUEUE_IMPL_HH__
46 #define __CPU_O3_INST_QUEUE_IMPL_HH__
47
48 #include <limits>
49 #include <vector>
50
51 #include "cpu/o3/fu_pool.hh"
52 #include "cpu/o3/inst_queue.hh"
53 #include "debug/IQ.hh"
54 #include "enums/OpClass.hh"
55 #include "params/DerivO3CPU.hh"
56 #include "sim/core.hh"
57
58 // clang complains about std::set being overloaded with Packet::set if
59 // we open up the entire namespace std
60 using std::list;
61
62 template <class Impl>
63 InstructionQueue<Impl>::FUCompletion::FUCompletion(DynInstPtr &_inst,
64 int fu_idx, InstructionQueue<Impl> *iq_ptr)
65 : Event(Stat_Event_Pri, AutoDelete),
66 inst(_inst), fuIdx(fu_idx), iqPtr(iq_ptr), freeFU(false)
67 {
68 }
69
70 template <class Impl>
71 void
72 InstructionQueue<Impl>::FUCompletion::process()
73 {
74 iqPtr->processFUCompletion(inst, freeFU ? fuIdx : -1);
75 inst = NULL;
76 }
77
78
79 template <class Impl>
80 const char *
81 InstructionQueue<Impl>::FUCompletion::description() const
82 {
83 return "Functional unit completion";
84 }
85
86 template <class Impl>
87 InstructionQueue<Impl>::InstructionQueue(O3CPU *cpu_ptr, IEW *iew_ptr,
88 DerivO3CPUParams *params)
89 : cpu(cpu_ptr),
90 iewStage(iew_ptr),
91 fuPool(params->fuPool),
92 numEntries(params->numIQEntries),
93 totalWidth(params->issueWidth),
94 commitToIEWDelay(params->commitToIEWDelay)
95 {
96 assert(fuPool);
97
98 numThreads = params->numThreads;
99
100 // Set the number of total physical registers
101 numPhysRegs = params->numPhysIntRegs + params->numPhysFloatRegs +
102 params->numPhysCCRegs;
103
104 //Create an entry for each physical register within the
105 //dependency graph.
106 dependGraph.resize(numPhysRegs);
107
108 // Resize the register scoreboard.
109 regScoreboard.resize(numPhysRegs);
110
111 //Initialize Mem Dependence Units
112 for (ThreadID tid = 0; tid < numThreads; tid++) {
113 memDepUnit[tid].init(params, tid);
114 memDepUnit[tid].setIQ(this);
115 }
116
117 resetState();
118
119 std::string policy = params->smtIQPolicy;
120
121 //Convert string to lowercase
122 std::transform(policy.begin(), policy.end(), policy.begin(),
123 (int(*)(int)) tolower);
124
125 //Figure out resource sharing policy
126 if (policy == "dynamic") {
127 iqPolicy = Dynamic;
128
129 //Set Max Entries to Total ROB Capacity
130 for (ThreadID tid = 0; tid < numThreads; tid++) {
131 maxEntries[tid] = numEntries;
132 }
133
134 } else if (policy == "partitioned") {
135 iqPolicy = Partitioned;
136
137 //@todo:make work if part_amt doesnt divide evenly.
138 int part_amt = numEntries / numThreads;
139
140 //Divide ROB up evenly
141 for (ThreadID tid = 0; tid < numThreads; tid++) {
142 maxEntries[tid] = part_amt;
143 }
144
145 DPRINTF(IQ, "IQ sharing policy set to Partitioned:"
146 "%i entries per thread.\n",part_amt);
147 } else if (policy == "threshold") {
148 iqPolicy = Threshold;
149
150 double threshold = (double)params->smtIQThreshold / 100;
151
152 int thresholdIQ = (int)((double)threshold * numEntries);
153
154 //Divide up by threshold amount
155 for (ThreadID tid = 0; tid < numThreads; tid++) {
156 maxEntries[tid] = thresholdIQ;
157 }
158
159 DPRINTF(IQ, "IQ sharing policy set to Threshold:"
160 "%i entries per thread.\n",thresholdIQ);
161 } else {
162 assert(0 && "Invalid IQ Sharing Policy.Options Are:{Dynamic,"
163 "Partitioned, Threshold}");
164 }
165 }
166
167 template <class Impl>
168 InstructionQueue<Impl>::~InstructionQueue()
169 {
170 dependGraph.reset();
171 #ifdef DEBUG
172 cprintf("Nodes traversed: %i, removed: %i\n",
173 dependGraph.nodesTraversed, dependGraph.nodesRemoved);
174 #endif
175 }
176
177 template <class Impl>
178 std::string
179 InstructionQueue<Impl>::name() const
180 {
181 return cpu->name() + ".iq";
182 }
183
184 template <class Impl>
185 void
186 InstructionQueue<Impl>::regStats()
187 {
188 using namespace Stats;
189 iqInstsAdded
190 .name(name() + ".iqInstsAdded")
191 .desc("Number of instructions added to the IQ (excludes non-spec)")
192 .prereq(iqInstsAdded);
193
194 iqNonSpecInstsAdded
195 .name(name() + ".iqNonSpecInstsAdded")
196 .desc("Number of non-speculative instructions added to the IQ")
197 .prereq(iqNonSpecInstsAdded);
198
199 iqInstsIssued
200 .name(name() + ".iqInstsIssued")
201 .desc("Number of instructions issued")
202 .prereq(iqInstsIssued);
203
204 iqIntInstsIssued
205 .name(name() + ".iqIntInstsIssued")
206 .desc("Number of integer instructions issued")
207 .prereq(iqIntInstsIssued);
208
209 iqFloatInstsIssued
210 .name(name() + ".iqFloatInstsIssued")
211 .desc("Number of float instructions issued")
212 .prereq(iqFloatInstsIssued);
213
214 iqBranchInstsIssued
215 .name(name() + ".iqBranchInstsIssued")
216 .desc("Number of branch instructions issued")
217 .prereq(iqBranchInstsIssued);
218
219 iqMemInstsIssued
220 .name(name() + ".iqMemInstsIssued")
221 .desc("Number of memory instructions issued")
222 .prereq(iqMemInstsIssued);
223
224 iqMiscInstsIssued
225 .name(name() + ".iqMiscInstsIssued")
226 .desc("Number of miscellaneous instructions issued")
227 .prereq(iqMiscInstsIssued);
228
229 iqSquashedInstsIssued
230 .name(name() + ".iqSquashedInstsIssued")
231 .desc("Number of squashed instructions issued")
232 .prereq(iqSquashedInstsIssued);
233
234 iqSquashedInstsExamined
235 .name(name() + ".iqSquashedInstsExamined")
236 .desc("Number of squashed instructions iterated over during squash;"
237 " mainly for profiling")
238 .prereq(iqSquashedInstsExamined);
239
240 iqSquashedOperandsExamined
241 .name(name() + ".iqSquashedOperandsExamined")
242 .desc("Number of squashed operands that are examined and possibly "
243 "removed from graph")
244 .prereq(iqSquashedOperandsExamined);
245
246 iqSquashedNonSpecRemoved
247 .name(name() + ".iqSquashedNonSpecRemoved")
248 .desc("Number of squashed non-spec instructions that were removed")
249 .prereq(iqSquashedNonSpecRemoved);
250 /*
251 queueResDist
252 .init(Num_OpClasses, 0, 99, 2)
253 .name(name() + ".IQ:residence:")
254 .desc("cycles from dispatch to issue")
255 .flags(total | pdf | cdf )
256 ;
257 for (int i = 0; i < Num_OpClasses; ++i) {
258 queueResDist.subname(i, opClassStrings[i]);
259 }
260 */
261 numIssuedDist
262 .init(0,totalWidth,1)
263 .name(name() + ".issued_per_cycle")
264 .desc("Number of insts issued each cycle")
265 .flags(pdf)
266 ;
267 /*
268 dist_unissued
269 .init(Num_OpClasses+2)
270 .name(name() + ".unissued_cause")
271 .desc("Reason ready instruction not issued")
272 .flags(pdf | dist)
273 ;
274 for (int i=0; i < (Num_OpClasses + 2); ++i) {
275 dist_unissued.subname(i, unissued_names[i]);
276 }
277 */
278 statIssuedInstType
279 .init(numThreads,Enums::Num_OpClass)
280 .name(name() + ".FU_type")
281 .desc("Type of FU issued")
282 .flags(total | pdf | dist)
283 ;
284 statIssuedInstType.ysubnames(Enums::OpClassStrings);
285
286 //
287 // How long did instructions for a particular FU type wait prior to issue
288 //
289 /*
290 issueDelayDist
291 .init(Num_OpClasses,0,99,2)
292 .name(name() + ".")
293 .desc("cycles from operands ready to issue")
294 .flags(pdf | cdf)
295 ;
296
297 for (int i=0; i<Num_OpClasses; ++i) {
298 std::stringstream subname;
299 subname << opClassStrings[i] << "_delay";
300 issueDelayDist.subname(i, subname.str());
301 }
302 */
303 issueRate
304 .name(name() + ".rate")
305 .desc("Inst issue rate")
306 .flags(total)
307 ;
308 issueRate = iqInstsIssued / cpu->numCycles;
309
310 statFuBusy
311 .init(Num_OpClasses)
312 .name(name() + ".fu_full")
313 .desc("attempts to use FU when none available")
314 .flags(pdf | dist)
315 ;
316 for (int i=0; i < Num_OpClasses; ++i) {
317 statFuBusy.subname(i, Enums::OpClassStrings[i]);
318 }
319
320 fuBusy
321 .init(numThreads)
322 .name(name() + ".fu_busy_cnt")
323 .desc("FU busy when requested")
324 .flags(total)
325 ;
326
327 fuBusyRate
328 .name(name() + ".fu_busy_rate")
329 .desc("FU busy rate (busy events/executed inst)")
330 .flags(total)
331 ;
332 fuBusyRate = fuBusy / iqInstsIssued;
333
334 for (ThreadID tid = 0; tid < numThreads; tid++) {
335 // Tell mem dependence unit to reg stats as well.
336 memDepUnit[tid].regStats();
337 }
338
339 intInstQueueReads
340 .name(name() + ".int_inst_queue_reads")
341 .desc("Number of integer instruction queue reads")
342 .flags(total);
343
344 intInstQueueWrites
345 .name(name() + ".int_inst_queue_writes")
346 .desc("Number of integer instruction queue writes")
347 .flags(total);
348
349 intInstQueueWakeupAccesses
350 .name(name() + ".int_inst_queue_wakeup_accesses")
351 .desc("Number of integer instruction queue wakeup accesses")
352 .flags(total);
353
354 fpInstQueueReads
355 .name(name() + ".fp_inst_queue_reads")
356 .desc("Number of floating instruction queue reads")
357 .flags(total);
358
359 fpInstQueueWrites
360 .name(name() + ".fp_inst_queue_writes")
361 .desc("Number of floating instruction queue writes")
362 .flags(total);
363
364 fpInstQueueWakeupQccesses
365 .name(name() + ".fp_inst_queue_wakeup_accesses")
366 .desc("Number of floating instruction queue wakeup accesses")
367 .flags(total);
368
369 intAluAccesses
370 .name(name() + ".int_alu_accesses")
371 .desc("Number of integer alu accesses")
372 .flags(total);
373
374 fpAluAccesses
375 .name(name() + ".fp_alu_accesses")
376 .desc("Number of floating point alu accesses")
377 .flags(total);
378
379 }
380
381 template <class Impl>
382 void
383 InstructionQueue<Impl>::resetState()
384 {
385 //Initialize thread IQ counts
386 for (ThreadID tid = 0; tid <numThreads; tid++) {
387 count[tid] = 0;
388 instList[tid].clear();
389 }
390
391 // Initialize the number of free IQ entries.
392 freeEntries = numEntries;
393
394 // Note that in actuality, the registers corresponding to the logical
395 // registers start off as ready. However this doesn't matter for the
396 // IQ as the instruction should have been correctly told if those
397 // registers are ready in rename. Thus it can all be initialized as
398 // unready.
399 for (int i = 0; i < numPhysRegs; ++i) {
400 regScoreboard[i] = false;
401 }
402
403 for (ThreadID tid = 0; tid < numThreads; ++tid) {
404 squashedSeqNum[tid] = 0;
405 }
406
407 for (int i = 0; i < Num_OpClasses; ++i) {
408 while (!readyInsts[i].empty())
409 readyInsts[i].pop();
410 queueOnList[i] = false;
411 readyIt[i] = listOrder.end();
412 }
413 nonSpecInsts.clear();
414 listOrder.clear();
415 deferredMemInsts.clear();
416 }
417
418 template <class Impl>
419 void
420 InstructionQueue<Impl>::setActiveThreads(list<ThreadID> *at_ptr)
421 {
422 activeThreads = at_ptr;
423 }
424
425 template <class Impl>
426 void
427 InstructionQueue<Impl>::setIssueToExecuteQueue(TimeBuffer<IssueStruct> *i2e_ptr)
428 {
429 issueToExecuteQueue = i2e_ptr;
430 }
431
432 template <class Impl>
433 void
434 InstructionQueue<Impl>::setTimeBuffer(TimeBuffer<TimeStruct> *tb_ptr)
435 {
436 timeBuffer = tb_ptr;
437
438 fromCommit = timeBuffer->getWire(-commitToIEWDelay);
439 }
440
441 template <class Impl>
442 void
443 InstructionQueue<Impl>::drainSanityCheck() const
444 {
445 assert(dependGraph.empty());
446 assert(instsToExecute.empty());
447 for (ThreadID tid = 0; tid < numThreads; ++tid)
448 memDepUnit[tid].drainSanityCheck();
449 }
450
451 template <class Impl>
452 void
453 InstructionQueue<Impl>::takeOverFrom()
454 {
455 resetState();
456 }
457
458 template <class Impl>
459 int
460 InstructionQueue<Impl>::entryAmount(ThreadID num_threads)
461 {
462 if (iqPolicy == Partitioned) {
463 return numEntries / num_threads;
464 } else {
465 return 0;
466 }
467 }
468
469
470 template <class Impl>
471 void
472 InstructionQueue<Impl>::resetEntries()
473 {
474 if (iqPolicy != Dynamic || numThreads > 1) {
475 int active_threads = activeThreads->size();
476
477 list<ThreadID>::iterator threads = activeThreads->begin();
478 list<ThreadID>::iterator end = activeThreads->end();
479
480 while (threads != end) {
481 ThreadID tid = *threads++;
482
483 if (iqPolicy == Partitioned) {
484 maxEntries[tid] = numEntries / active_threads;
485 } else if(iqPolicy == Threshold && active_threads == 1) {
486 maxEntries[tid] = numEntries;
487 }
488 }
489 }
490 }
491
492 template <class Impl>
493 unsigned
494 InstructionQueue<Impl>::numFreeEntries()
495 {
496 return freeEntries;
497 }
498
499 template <class Impl>
500 unsigned
501 InstructionQueue<Impl>::numFreeEntries(ThreadID tid)
502 {
503 return maxEntries[tid] - count[tid];
504 }
505
506 // Might want to do something more complex if it knows how many instructions
507 // will be issued this cycle.
508 template <class Impl>
509 bool
510 InstructionQueue<Impl>::isFull()
511 {
512 if (freeEntries == 0) {
513 return(true);
514 } else {
515 return(false);
516 }
517 }
518
519 template <class Impl>
520 bool
521 InstructionQueue<Impl>::isFull(ThreadID tid)
522 {
523 if (numFreeEntries(tid) == 0) {
524 return(true);
525 } else {
526 return(false);
527 }
528 }
529
530 template <class Impl>
531 bool
532 InstructionQueue<Impl>::hasReadyInsts()
533 {
534 if (!listOrder.empty()) {
535 return true;
536 }
537
538 for (int i = 0; i < Num_OpClasses; ++i) {
539 if (!readyInsts[i].empty()) {
540 return true;
541 }
542 }
543
544 return false;
545 }
546
547 template <class Impl>
548 void
549 InstructionQueue<Impl>::insert(DynInstPtr &new_inst)
550 {
551 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
552 // Make sure the instruction is valid
553 assert(new_inst);
554
555 DPRINTF(IQ, "Adding instruction [sn:%lli] PC %s to the IQ.\n",
556 new_inst->seqNum, new_inst->pcState());
557
558 assert(freeEntries != 0);
559
560 instList[new_inst->threadNumber].push_back(new_inst);
561
562 --freeEntries;
563
564 new_inst->setInIQ();
565
566 // Look through its source registers (physical regs), and mark any
567 // dependencies.
568 addToDependents(new_inst);
569
570 // Have this instruction set itself as the producer of its destination
571 // register(s).
572 addToProducers(new_inst);
573
574 if (new_inst->isMemRef()) {
575 memDepUnit[new_inst->threadNumber].insert(new_inst);
576 } else {
577 addIfReady(new_inst);
578 }
579
580 ++iqInstsAdded;
581
582 count[new_inst->threadNumber]++;
583
584 assert(freeEntries == (numEntries - countInsts()));
585 }
586
587 template <class Impl>
588 void
589 InstructionQueue<Impl>::insertNonSpec(DynInstPtr &new_inst)
590 {
591 // @todo: Clean up this code; can do it by setting inst as unable
592 // to issue, then calling normal insert on the inst.
593 new_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
594
595 assert(new_inst);
596
597 nonSpecInsts[new_inst->seqNum] = new_inst;
598
599 DPRINTF(IQ, "Adding non-speculative instruction [sn:%lli] PC %s "
600 "to the IQ.\n",
601 new_inst->seqNum, new_inst->pcState());
602
603 assert(freeEntries != 0);
604
605 instList[new_inst->threadNumber].push_back(new_inst);
606
607 --freeEntries;
608
609 new_inst->setInIQ();
610
611 // Have this instruction set itself as the producer of its destination
612 // register(s).
613 addToProducers(new_inst);
614
615 // If it's a memory instruction, add it to the memory dependency
616 // unit.
617 if (new_inst->isMemRef()) {
618 memDepUnit[new_inst->threadNumber].insertNonSpec(new_inst);
619 }
620
621 ++iqNonSpecInstsAdded;
622
623 count[new_inst->threadNumber]++;
624
625 assert(freeEntries == (numEntries - countInsts()));
626 }
627
628 template <class Impl>
629 void
630 InstructionQueue<Impl>::insertBarrier(DynInstPtr &barr_inst)
631 {
632 memDepUnit[barr_inst->threadNumber].insertBarrier(barr_inst);
633
634 insertNonSpec(barr_inst);
635 }
636
637 template <class Impl>
638 typename Impl::DynInstPtr
639 InstructionQueue<Impl>::getInstToExecute()
640 {
641 assert(!instsToExecute.empty());
642 DynInstPtr inst = instsToExecute.front();
643 instsToExecute.pop_front();
644 if (inst->isFloating()){
645 fpInstQueueReads++;
646 } else {
647 intInstQueueReads++;
648 }
649 return inst;
650 }
651
652 template <class Impl>
653 void
654 InstructionQueue<Impl>::addToOrderList(OpClass op_class)
655 {
656 assert(!readyInsts[op_class].empty());
657
658 ListOrderEntry queue_entry;
659
660 queue_entry.queueType = op_class;
661
662 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
663
664 ListOrderIt list_it = listOrder.begin();
665 ListOrderIt list_end_it = listOrder.end();
666
667 while (list_it != list_end_it) {
668 if ((*list_it).oldestInst > queue_entry.oldestInst) {
669 break;
670 }
671
672 list_it++;
673 }
674
675 readyIt[op_class] = listOrder.insert(list_it, queue_entry);
676 queueOnList[op_class] = true;
677 }
678
679 template <class Impl>
680 void
681 InstructionQueue<Impl>::moveToYoungerInst(ListOrderIt list_order_it)
682 {
683 // Get iterator of next item on the list
684 // Delete the original iterator
685 // Determine if the next item is either the end of the list or younger
686 // than the new instruction. If so, then add in a new iterator right here.
687 // If not, then move along.
688 ListOrderEntry queue_entry;
689 OpClass op_class = (*list_order_it).queueType;
690 ListOrderIt next_it = list_order_it;
691
692 ++next_it;
693
694 queue_entry.queueType = op_class;
695 queue_entry.oldestInst = readyInsts[op_class].top()->seqNum;
696
697 while (next_it != listOrder.end() &&
698 (*next_it).oldestInst < queue_entry.oldestInst) {
699 ++next_it;
700 }
701
702 readyIt[op_class] = listOrder.insert(next_it, queue_entry);
703 }
704
705 template <class Impl>
706 void
707 InstructionQueue<Impl>::processFUCompletion(DynInstPtr &inst, int fu_idx)
708 {
709 DPRINTF(IQ, "Processing FU completion [sn:%lli]\n", inst->seqNum);
710 assert(!cpu->switchedOut());
711 // The CPU could have been sleeping until this op completed (*extremely*
712 // long latency op). Wake it if it was. This may be overkill.
713 iewStage->wakeCPU();
714
715 if (fu_idx > -1)
716 fuPool->freeUnitNextCycle(fu_idx);
717
718 // @todo: Ensure that these FU Completions happen at the beginning
719 // of a cycle, otherwise they could add too many instructions to
720 // the queue.
721 issueToExecuteQueue->access(-1)->size++;
722 instsToExecute.push_back(inst);
723 }
724
725 // @todo: Figure out a better way to remove the squashed items from the
726 // lists. Checking the top item of each list to see if it's squashed
727 // wastes time and forces jumps.
728 template <class Impl>
729 void
730 InstructionQueue<Impl>::scheduleReadyInsts()
731 {
732 DPRINTF(IQ, "Attempting to schedule ready instructions from "
733 "the IQ.\n");
734
735 IssueStruct *i2e_info = issueToExecuteQueue->access(0);
736
737 DynInstPtr deferred_mem_inst;
738 int total_deferred_mem_issued = 0;
739 while (total_deferred_mem_issued < totalWidth &&
740 (deferred_mem_inst = getDeferredMemInstToExecute()) != 0) {
741 issueToExecuteQueue->access(0)->size++;
742 instsToExecute.push_back(deferred_mem_inst);
743 total_deferred_mem_issued++;
744 }
745
746 // Have iterator to head of the list
747 // While I haven't exceeded bandwidth or reached the end of the list,
748 // Try to get a FU that can do what this op needs.
749 // If successful, change the oldestInst to the new top of the list, put
750 // the queue in the proper place in the list.
751 // Increment the iterator.
752 // This will avoid trying to schedule a certain op class if there are no
753 // FUs that handle it.
754 ListOrderIt order_it = listOrder.begin();
755 ListOrderIt order_end_it = listOrder.end();
756 int total_issued = 0;
757
758 while (total_issued < (totalWidth - total_deferred_mem_issued) &&
759 iewStage->canIssue() &&
760 order_it != order_end_it) {
761 OpClass op_class = (*order_it).queueType;
762
763 assert(!readyInsts[op_class].empty());
764
765 DynInstPtr issuing_inst = readyInsts[op_class].top();
766
767 issuing_inst->isFloating() ? fpInstQueueReads++ : intInstQueueReads++;
768
769 assert(issuing_inst->seqNum == (*order_it).oldestInst);
770
771 if (issuing_inst->isSquashed()) {
772 readyInsts[op_class].pop();
773
774 if (!readyInsts[op_class].empty()) {
775 moveToYoungerInst(order_it);
776 } else {
777 readyIt[op_class] = listOrder.end();
778 queueOnList[op_class] = false;
779 }
780
781 listOrder.erase(order_it++);
782
783 ++iqSquashedInstsIssued;
784
785 continue;
786 }
787
788 int idx = -2;
789 Cycles op_latency = Cycles(1);
790 ThreadID tid = issuing_inst->threadNumber;
791
792 if (op_class != No_OpClass) {
793 idx = fuPool->getUnit(op_class);
794 issuing_inst->isFloating() ? fpAluAccesses++ : intAluAccesses++;
795 if (idx > -1) {
796 op_latency = fuPool->getOpLatency(op_class);
797 }
798 }
799
800 // If we have an instruction that doesn't require a FU, or a
801 // valid FU, then schedule for execution.
802 if (idx == -2 || idx != -1) {
803 if (op_latency == Cycles(1)) {
804 i2e_info->size++;
805 instsToExecute.push_back(issuing_inst);
806
807 // Add the FU onto the list of FU's to be freed next
808 // cycle if we used one.
809 if (idx >= 0)
810 fuPool->freeUnitNextCycle(idx);
811 } else {
812 Cycles issue_latency = fuPool->getIssueLatency(op_class);
813 // Generate completion event for the FU
814 FUCompletion *execution = new FUCompletion(issuing_inst,
815 idx, this);
816
817 cpu->schedule(execution,
818 cpu->clockEdge(Cycles(op_latency - 1)));
819
820 // @todo: Enforce that issue_latency == 1 or op_latency
821 if (issue_latency > Cycles(1)) {
822 // If FU isn't pipelined, then it must be freed
823 // upon the execution completing.
824 execution->setFreeFU();
825 } else {
826 // Add the FU onto the list of FU's to be freed next cycle.
827 fuPool->freeUnitNextCycle(idx);
828 }
829 }
830
831 DPRINTF(IQ, "Thread %i: Issuing instruction PC %s "
832 "[sn:%lli]\n",
833 tid, issuing_inst->pcState(),
834 issuing_inst->seqNum);
835
836 readyInsts[op_class].pop();
837
838 if (!readyInsts[op_class].empty()) {
839 moveToYoungerInst(order_it);
840 } else {
841 readyIt[op_class] = listOrder.end();
842 queueOnList[op_class] = false;
843 }
844
845 issuing_inst->setIssued();
846 ++total_issued;
847
848 #if TRACING_ON
849 issuing_inst->issueTick = curTick() - issuing_inst->fetchTick;
850 #endif
851
852 if (!issuing_inst->isMemRef()) {
853 // Memory instructions can not be freed from the IQ until they
854 // complete.
855 ++freeEntries;
856 count[tid]--;
857 issuing_inst->clearInIQ();
858 } else {
859 memDepUnit[tid].issue(issuing_inst);
860 }
861
862 listOrder.erase(order_it++);
863 statIssuedInstType[tid][op_class]++;
864 iewStage->incrWb(issuing_inst->seqNum);
865 } else {
866 statFuBusy[op_class]++;
867 fuBusy[tid]++;
868 ++order_it;
869 }
870 }
871
872 numIssuedDist.sample(total_issued);
873 iqInstsIssued+= total_issued;
874
875 // If we issued any instructions, tell the CPU we had activity.
876 // @todo If the way deferred memory instructions are handeled due to
877 // translation changes then the deferredMemInsts condition should be removed
878 // from the code below.
879 if (total_issued || total_deferred_mem_issued || deferredMemInsts.size()) {
880 cpu->activityThisCycle();
881 } else {
882 DPRINTF(IQ, "Not able to schedule any instructions.\n");
883 }
884 }
885
886 template <class Impl>
887 void
888 InstructionQueue<Impl>::scheduleNonSpec(const InstSeqNum &inst)
889 {
890 DPRINTF(IQ, "Marking nonspeculative instruction [sn:%lli] as ready "
891 "to execute.\n", inst);
892
893 NonSpecMapIt inst_it = nonSpecInsts.find(inst);
894
895 assert(inst_it != nonSpecInsts.end());
896
897 ThreadID tid = (*inst_it).second->threadNumber;
898
899 (*inst_it).second->setAtCommit();
900
901 (*inst_it).second->setCanIssue();
902
903 if (!(*inst_it).second->isMemRef()) {
904 addIfReady((*inst_it).second);
905 } else {
906 memDepUnit[tid].nonSpecInstReady((*inst_it).second);
907 }
908
909 (*inst_it).second = NULL;
910
911 nonSpecInsts.erase(inst_it);
912 }
913
914 template <class Impl>
915 void
916 InstructionQueue<Impl>::commit(const InstSeqNum &inst, ThreadID tid)
917 {
918 DPRINTF(IQ, "[tid:%i]: Committing instructions older than [sn:%i]\n",
919 tid,inst);
920
921 ListIt iq_it = instList[tid].begin();
922
923 while (iq_it != instList[tid].end() &&
924 (*iq_it)->seqNum <= inst) {
925 ++iq_it;
926 instList[tid].pop_front();
927 }
928
929 assert(freeEntries == (numEntries - countInsts()));
930 }
931
932 template <class Impl>
933 int
934 InstructionQueue<Impl>::wakeDependents(DynInstPtr &completed_inst)
935 {
936 int dependents = 0;
937
938 // The instruction queue here takes care of both floating and int ops
939 if (completed_inst->isFloating()) {
940 fpInstQueueWakeupQccesses++;
941 } else {
942 intInstQueueWakeupAccesses++;
943 }
944
945 DPRINTF(IQ, "Waking dependents of completed instruction.\n");
946
947 assert(!completed_inst->isSquashed());
948
949 // Tell the memory dependence unit to wake any dependents on this
950 // instruction if it is a memory instruction. Also complete the memory
951 // instruction at this point since we know it executed without issues.
952 // @todo: Might want to rename "completeMemInst" to something that
953 // indicates that it won't need to be replayed, and call this
954 // earlier. Might not be a big deal.
955 if (completed_inst->isMemRef()) {
956 memDepUnit[completed_inst->threadNumber].wakeDependents(completed_inst);
957 completeMemInst(completed_inst);
958 } else if (completed_inst->isMemBarrier() ||
959 completed_inst->isWriteBarrier()) {
960 memDepUnit[completed_inst->threadNumber].completeBarrier(completed_inst);
961 }
962
963 for (int dest_reg_idx = 0;
964 dest_reg_idx < completed_inst->numDestRegs();
965 dest_reg_idx++)
966 {
967 PhysRegIndex dest_reg =
968 completed_inst->renamedDestRegIdx(dest_reg_idx);
969
970 // Special case of uniq or control registers. They are not
971 // handled by the IQ and thus have no dependency graph entry.
972 // @todo Figure out a cleaner way to handle this.
973 if (dest_reg >= numPhysRegs) {
974 DPRINTF(IQ, "dest_reg :%d, numPhysRegs: %d\n", dest_reg,
975 numPhysRegs);
976 continue;
977 }
978
979 DPRINTF(IQ, "Waking any dependents on register %i.\n",
980 (int) dest_reg);
981
982 //Go through the dependency chain, marking the registers as
983 //ready within the waiting instructions.
984 DynInstPtr dep_inst = dependGraph.pop(dest_reg);
985
986 while (dep_inst) {
987 DPRINTF(IQ, "Waking up a dependent instruction, [sn:%lli] "
988 "PC %s.\n", dep_inst->seqNum, dep_inst->pcState());
989
990 // Might want to give more information to the instruction
991 // so that it knows which of its source registers is
992 // ready. However that would mean that the dependency
993 // graph entries would need to hold the src_reg_idx.
994 dep_inst->markSrcRegReady();
995
996 addIfReady(dep_inst);
997
998 dep_inst = dependGraph.pop(dest_reg);
999
1000 ++dependents;
1001 }
1002
1003 // Reset the head node now that all of its dependents have
1004 // been woken up.
1005 assert(dependGraph.empty(dest_reg));
1006 dependGraph.clearInst(dest_reg);
1007
1008 // Mark the scoreboard as having that register ready.
1009 regScoreboard[dest_reg] = true;
1010 }
1011 return dependents;
1012 }
1013
1014 template <class Impl>
1015 void
1016 InstructionQueue<Impl>::addReadyMemInst(DynInstPtr &ready_inst)
1017 {
1018 OpClass op_class = ready_inst->opClass();
1019
1020 readyInsts[op_class].push(ready_inst);
1021
1022 // Will need to reorder the list if either a queue is not on the list,
1023 // or it has an older instruction than last time.
1024 if (!queueOnList[op_class]) {
1025 addToOrderList(op_class);
1026 } else if (readyInsts[op_class].top()->seqNum <
1027 (*readyIt[op_class]).oldestInst) {
1028 listOrder.erase(readyIt[op_class]);
1029 addToOrderList(op_class);
1030 }
1031
1032 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1033 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1034 ready_inst->pcState(), op_class, ready_inst->seqNum);
1035 }
1036
1037 template <class Impl>
1038 void
1039 InstructionQueue<Impl>::rescheduleMemInst(DynInstPtr &resched_inst)
1040 {
1041 DPRINTF(IQ, "Rescheduling mem inst [sn:%lli]\n", resched_inst->seqNum);
1042
1043 // Reset DTB translation state
1044 resched_inst->translationStarted(false);
1045 resched_inst->translationCompleted(false);
1046
1047 resched_inst->clearCanIssue();
1048 memDepUnit[resched_inst->threadNumber].reschedule(resched_inst);
1049 }
1050
1051 template <class Impl>
1052 void
1053 InstructionQueue<Impl>::replayMemInst(DynInstPtr &replay_inst)
1054 {
1055 memDepUnit[replay_inst->threadNumber].replay(replay_inst);
1056 }
1057
1058 template <class Impl>
1059 void
1060 InstructionQueue<Impl>::completeMemInst(DynInstPtr &completed_inst)
1061 {
1062 ThreadID tid = completed_inst->threadNumber;
1063
1064 DPRINTF(IQ, "Completing mem instruction PC: %s [sn:%lli]\n",
1065 completed_inst->pcState(), completed_inst->seqNum);
1066
1067 ++freeEntries;
1068
1069 completed_inst->memOpDone(true);
1070
1071 memDepUnit[tid].completed(completed_inst);
1072 count[tid]--;
1073 }
1074
1075 template <class Impl>
1076 void
1077 InstructionQueue<Impl>::deferMemInst(DynInstPtr &deferred_inst)
1078 {
1079 deferredMemInsts.push_back(deferred_inst);
1080 }
1081
1082 template <class Impl>
1083 typename Impl::DynInstPtr
1084 InstructionQueue<Impl>::getDeferredMemInstToExecute()
1085 {
1086 for (ListIt it = deferredMemInsts.begin(); it != deferredMemInsts.end();
1087 ++it) {
1088 if ((*it)->translationCompleted() || (*it)->isSquashed()) {
1089 DynInstPtr ret = *it;
1090 deferredMemInsts.erase(it);
1091 return ret;
1092 }
1093 }
1094 return NULL;
1095 }
1096
1097 template <class Impl>
1098 void
1099 InstructionQueue<Impl>::violation(DynInstPtr &store,
1100 DynInstPtr &faulting_load)
1101 {
1102 intInstQueueWrites++;
1103 memDepUnit[store->threadNumber].violation(store, faulting_load);
1104 }
1105
1106 template <class Impl>
1107 void
1108 InstructionQueue<Impl>::squash(ThreadID tid)
1109 {
1110 DPRINTF(IQ, "[tid:%i]: Starting to squash instructions in "
1111 "the IQ.\n", tid);
1112
1113 // Read instruction sequence number of last instruction out of the
1114 // time buffer.
1115 squashedSeqNum[tid] = fromCommit->commitInfo[tid].doneSeqNum;
1116
1117 // Call doSquash if there are insts in the IQ
1118 if (count[tid] > 0) {
1119 doSquash(tid);
1120 }
1121
1122 // Also tell the memory dependence unit to squash.
1123 memDepUnit[tid].squash(squashedSeqNum[tid], tid);
1124 }
1125
1126 template <class Impl>
1127 void
1128 InstructionQueue<Impl>::doSquash(ThreadID tid)
1129 {
1130 // Start at the tail.
1131 ListIt squash_it = instList[tid].end();
1132 --squash_it;
1133
1134 DPRINTF(IQ, "[tid:%i]: Squashing until sequence number %i!\n",
1135 tid, squashedSeqNum[tid]);
1136
1137 // Squash any instructions younger than the squashed sequence number
1138 // given.
1139 while (squash_it != instList[tid].end() &&
1140 (*squash_it)->seqNum > squashedSeqNum[tid]) {
1141
1142 DynInstPtr squashed_inst = (*squash_it);
1143 squashed_inst->isFloating() ? fpInstQueueWrites++ : intInstQueueWrites++;
1144
1145 // Only handle the instruction if it actually is in the IQ and
1146 // hasn't already been squashed in the IQ.
1147 if (squashed_inst->threadNumber != tid ||
1148 squashed_inst->isSquashedInIQ()) {
1149 --squash_it;
1150 continue;
1151 }
1152
1153 if (!squashed_inst->isIssued() ||
1154 (squashed_inst->isMemRef() &&
1155 !squashed_inst->memOpDone())) {
1156
1157 DPRINTF(IQ, "[tid:%i]: Instruction [sn:%lli] PC %s squashed.\n",
1158 tid, squashed_inst->seqNum, squashed_inst->pcState());
1159
1160 bool is_acq_rel = squashed_inst->isMemBarrier() &&
1161 (squashed_inst->isLoad() ||
1162 (squashed_inst->isStore() &&
1163 !squashed_inst->isStoreConditional()));
1164
1165 // Remove the instruction from the dependency list.
1166 if (is_acq_rel ||
1167 (!squashed_inst->isNonSpeculative() &&
1168 !squashed_inst->isStoreConditional() &&
1169 !squashed_inst->isMemBarrier() &&
1170 !squashed_inst->isWriteBarrier())) {
1171
1172 for (int src_reg_idx = 0;
1173 src_reg_idx < squashed_inst->numSrcRegs();
1174 src_reg_idx++)
1175 {
1176 PhysRegIndex src_reg =
1177 squashed_inst->renamedSrcRegIdx(src_reg_idx);
1178
1179 // Only remove it from the dependency graph if it
1180 // was placed there in the first place.
1181
1182 // Instead of doing a linked list traversal, we
1183 // can just remove these squashed instructions
1184 // either at issue time, or when the register is
1185 // overwritten. The only downside to this is it
1186 // leaves more room for error.
1187
1188 if (!squashed_inst->isReadySrcRegIdx(src_reg_idx) &&
1189 src_reg < numPhysRegs) {
1190 dependGraph.remove(src_reg, squashed_inst);
1191 }
1192
1193
1194 ++iqSquashedOperandsExamined;
1195 }
1196 } else if (!squashed_inst->isStoreConditional() ||
1197 !squashed_inst->isCompleted()) {
1198 NonSpecMapIt ns_inst_it =
1199 nonSpecInsts.find(squashed_inst->seqNum);
1200
1201 // we remove non-speculative instructions from
1202 // nonSpecInsts already when they are ready, and so we
1203 // cannot always expect to find them
1204 if (ns_inst_it == nonSpecInsts.end()) {
1205 // loads that became ready but stalled on a
1206 // blocked cache are alreayd removed from
1207 // nonSpecInsts, and have not faulted
1208 assert(squashed_inst->getFault() != NoFault ||
1209 squashed_inst->isMemRef());
1210 } else {
1211
1212 (*ns_inst_it).second = NULL;
1213
1214 nonSpecInsts.erase(ns_inst_it);
1215
1216 ++iqSquashedNonSpecRemoved;
1217 }
1218 }
1219
1220 // Might want to also clear out the head of the dependency graph.
1221
1222 // Mark it as squashed within the IQ.
1223 squashed_inst->setSquashedInIQ();
1224
1225 // @todo: Remove this hack where several statuses are set so the
1226 // inst will flow through the rest of the pipeline.
1227 squashed_inst->setIssued();
1228 squashed_inst->setCanCommit();
1229 squashed_inst->clearInIQ();
1230
1231 //Update Thread IQ Count
1232 count[squashed_inst->threadNumber]--;
1233
1234 ++freeEntries;
1235 }
1236
1237 instList[tid].erase(squash_it--);
1238 ++iqSquashedInstsExamined;
1239 }
1240 }
1241
1242 template <class Impl>
1243 bool
1244 InstructionQueue<Impl>::addToDependents(DynInstPtr &new_inst)
1245 {
1246 // Loop through the instruction's source registers, adding
1247 // them to the dependency list if they are not ready.
1248 int8_t total_src_regs = new_inst->numSrcRegs();
1249 bool return_val = false;
1250
1251 for (int src_reg_idx = 0;
1252 src_reg_idx < total_src_regs;
1253 src_reg_idx++)
1254 {
1255 // Only add it to the dependency graph if it's not ready.
1256 if (!new_inst->isReadySrcRegIdx(src_reg_idx)) {
1257 PhysRegIndex src_reg = new_inst->renamedSrcRegIdx(src_reg_idx);
1258
1259 // Check the IQ's scoreboard to make sure the register
1260 // hasn't become ready while the instruction was in flight
1261 // between stages. Only if it really isn't ready should
1262 // it be added to the dependency graph.
1263 if (src_reg >= numPhysRegs) {
1264 continue;
1265 } else if (!regScoreboard[src_reg]) {
1266 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1267 "is being added to the dependency chain.\n",
1268 new_inst->pcState(), src_reg);
1269
1270 dependGraph.insert(src_reg, new_inst);
1271
1272 // Change the return value to indicate that something
1273 // was added to the dependency graph.
1274 return_val = true;
1275 } else {
1276 DPRINTF(IQ, "Instruction PC %s has src reg %i that "
1277 "became ready before it reached the IQ.\n",
1278 new_inst->pcState(), src_reg);
1279 // Mark a register ready within the instruction.
1280 new_inst->markSrcRegReady(src_reg_idx);
1281 }
1282 }
1283 }
1284
1285 return return_val;
1286 }
1287
1288 template <class Impl>
1289 void
1290 InstructionQueue<Impl>::addToProducers(DynInstPtr &new_inst)
1291 {
1292 // Nothing really needs to be marked when an instruction becomes
1293 // the producer of a register's value, but for convenience a ptr
1294 // to the producing instruction will be placed in the head node of
1295 // the dependency links.
1296 int8_t total_dest_regs = new_inst->numDestRegs();
1297
1298 for (int dest_reg_idx = 0;
1299 dest_reg_idx < total_dest_regs;
1300 dest_reg_idx++)
1301 {
1302 PhysRegIndex dest_reg = new_inst->renamedDestRegIdx(dest_reg_idx);
1303
1304 // Instructions that use the misc regs will have a reg number
1305 // higher than the normal physical registers. In this case these
1306 // registers are not renamed, and there is no need to track
1307 // dependencies as these instructions must be executed at commit.
1308 if (dest_reg >= numPhysRegs) {
1309 continue;
1310 }
1311
1312 if (!dependGraph.empty(dest_reg)) {
1313 dependGraph.dump();
1314 panic("Dependency graph %i not empty!", dest_reg);
1315 }
1316
1317 dependGraph.setInst(dest_reg, new_inst);
1318
1319 // Mark the scoreboard to say it's not yet ready.
1320 regScoreboard[dest_reg] = false;
1321 }
1322 }
1323
1324 template <class Impl>
1325 void
1326 InstructionQueue<Impl>::addIfReady(DynInstPtr &inst)
1327 {
1328 // If the instruction now has all of its source registers
1329 // available, then add it to the list of ready instructions.
1330 if (inst->readyToIssue()) {
1331
1332 //Add the instruction to the proper ready list.
1333 if (inst->isMemRef()) {
1334
1335 DPRINTF(IQ, "Checking if memory instruction can issue.\n");
1336
1337 // Message to the mem dependence unit that this instruction has
1338 // its registers ready.
1339 memDepUnit[inst->threadNumber].regsReady(inst);
1340
1341 return;
1342 }
1343
1344 OpClass op_class = inst->opClass();
1345
1346 DPRINTF(IQ, "Instruction is ready to issue, putting it onto "
1347 "the ready list, PC %s opclass:%i [sn:%lli].\n",
1348 inst->pcState(), op_class, inst->seqNum);
1349
1350 readyInsts[op_class].push(inst);
1351
1352 // Will need to reorder the list if either a queue is not on the list,
1353 // or it has an older instruction than last time.
1354 if (!queueOnList[op_class]) {
1355 addToOrderList(op_class);
1356 } else if (readyInsts[op_class].top()->seqNum <
1357 (*readyIt[op_class]).oldestInst) {
1358 listOrder.erase(readyIt[op_class]);
1359 addToOrderList(op_class);
1360 }
1361 }
1362 }
1363
1364 template <class Impl>
1365 int
1366 InstructionQueue<Impl>::countInsts()
1367 {
1368 #if 0
1369 //ksewell:This works but definitely could use a cleaner write
1370 //with a more intuitive way of counting. Right now it's
1371 //just brute force ....
1372 // Change the #if if you want to use this method.
1373 int total_insts = 0;
1374
1375 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1376 ListIt count_it = instList[tid].begin();
1377
1378 while (count_it != instList[tid].end()) {
1379 if (!(*count_it)->isSquashed() && !(*count_it)->isSquashedInIQ()) {
1380 if (!(*count_it)->isIssued()) {
1381 ++total_insts;
1382 } else if ((*count_it)->isMemRef() &&
1383 !(*count_it)->memOpDone) {
1384 // Loads that have not been marked as executed still count
1385 // towards the total instructions.
1386 ++total_insts;
1387 }
1388 }
1389
1390 ++count_it;
1391 }
1392 }
1393
1394 return total_insts;
1395 #else
1396 return numEntries - freeEntries;
1397 #endif
1398 }
1399
1400 template <class Impl>
1401 void
1402 InstructionQueue<Impl>::dumpLists()
1403 {
1404 for (int i = 0; i < Num_OpClasses; ++i) {
1405 cprintf("Ready list %i size: %i\n", i, readyInsts[i].size());
1406
1407 cprintf("\n");
1408 }
1409
1410 cprintf("Non speculative list size: %i\n", nonSpecInsts.size());
1411
1412 NonSpecMapIt non_spec_it = nonSpecInsts.begin();
1413 NonSpecMapIt non_spec_end_it = nonSpecInsts.end();
1414
1415 cprintf("Non speculative list: ");
1416
1417 while (non_spec_it != non_spec_end_it) {
1418 cprintf("%s [sn:%lli]", (*non_spec_it).second->pcState(),
1419 (*non_spec_it).second->seqNum);
1420 ++non_spec_it;
1421 }
1422
1423 cprintf("\n");
1424
1425 ListOrderIt list_order_it = listOrder.begin();
1426 ListOrderIt list_order_end_it = listOrder.end();
1427 int i = 1;
1428
1429 cprintf("List order: ");
1430
1431 while (list_order_it != list_order_end_it) {
1432 cprintf("%i OpClass:%i [sn:%lli] ", i, (*list_order_it).queueType,
1433 (*list_order_it).oldestInst);
1434
1435 ++list_order_it;
1436 ++i;
1437 }
1438
1439 cprintf("\n");
1440 }
1441
1442
1443 template <class Impl>
1444 void
1445 InstructionQueue<Impl>::dumpInsts()
1446 {
1447 for (ThreadID tid = 0; tid < numThreads; ++tid) {
1448 int num = 0;
1449 int valid_num = 0;
1450 ListIt inst_list_it = instList[tid].begin();
1451
1452 while (inst_list_it != instList[tid].end()) {
1453 cprintf("Instruction:%i\n", num);
1454 if (!(*inst_list_it)->isSquashed()) {
1455 if (!(*inst_list_it)->isIssued()) {
1456 ++valid_num;
1457 cprintf("Count:%i\n", valid_num);
1458 } else if ((*inst_list_it)->isMemRef() &&
1459 !(*inst_list_it)->memOpDone()) {
1460 // Loads that have not been marked as executed
1461 // still count towards the total instructions.
1462 ++valid_num;
1463 cprintf("Count:%i\n", valid_num);
1464 }
1465 }
1466
1467 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1468 "Issued:%i\nSquashed:%i\n",
1469 (*inst_list_it)->pcState(),
1470 (*inst_list_it)->seqNum,
1471 (*inst_list_it)->threadNumber,
1472 (*inst_list_it)->isIssued(),
1473 (*inst_list_it)->isSquashed());
1474
1475 if ((*inst_list_it)->isMemRef()) {
1476 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1477 }
1478
1479 cprintf("\n");
1480
1481 inst_list_it++;
1482 ++num;
1483 }
1484 }
1485
1486 cprintf("Insts to Execute list:\n");
1487
1488 int num = 0;
1489 int valid_num = 0;
1490 ListIt inst_list_it = instsToExecute.begin();
1491
1492 while (inst_list_it != instsToExecute.end())
1493 {
1494 cprintf("Instruction:%i\n",
1495 num);
1496 if (!(*inst_list_it)->isSquashed()) {
1497 if (!(*inst_list_it)->isIssued()) {
1498 ++valid_num;
1499 cprintf("Count:%i\n", valid_num);
1500 } else if ((*inst_list_it)->isMemRef() &&
1501 !(*inst_list_it)->memOpDone()) {
1502 // Loads that have not been marked as executed
1503 // still count towards the total instructions.
1504 ++valid_num;
1505 cprintf("Count:%i\n", valid_num);
1506 }
1507 }
1508
1509 cprintf("PC: %s\n[sn:%lli]\n[tid:%i]\n"
1510 "Issued:%i\nSquashed:%i\n",
1511 (*inst_list_it)->pcState(),
1512 (*inst_list_it)->seqNum,
1513 (*inst_list_it)->threadNumber,
1514 (*inst_list_it)->isIssued(),
1515 (*inst_list_it)->isSquashed());
1516
1517 if ((*inst_list_it)->isMemRef()) {
1518 cprintf("MemOpDone:%i\n", (*inst_list_it)->memOpDone());
1519 }
1520
1521 cprintf("\n");
1522
1523 inst_list_it++;
1524 ++num;
1525 }
1526 }
1527
1528 #endif//__CPU_O3_INST_QUEUE_IMPL_HH__