mem: Pick the next DRAM request based on bank availability
authorAndreas Hansson <andreas.hansson@arm.com>
Fri, 1 Nov 2013 15:56:20 +0000 (11:56 -0400)
committerAndreas Hansson <andreas.hansson@arm.com>
Fri, 1 Nov 2013 15:56:20 +0000 (11:56 -0400)
This patch changes the FCFS bit of FR-FCFS such that requests that
target the earliest available bank are picked first (as suggested in
the original work on FR-FCFS by Rixner et al). To accommodate this we
add functionality to identify a bank through a one-dimensional
identifier (bank id). The member names of the DRAMPacket are also
update to match the style guide.

src/mem/simple_dram.cc
src/mem/simple_dram.hh

index 62f825395c7d622539db5355351b86576ebf672b..9cbca6a1ce8a7717a668f32cb0e45cc8b1cc76cd 100644 (file)
  *
  * Authors: Andreas Hansson
  *          Ani Udipi
+ *          Neha Agarwal
  */
 
 #include "base/trace.hh"
+#include "base/bitfield.hh"
 #include "debug/Drain.hh"
 #include "debug/DRAM.hh"
 #include "mem/simple_dram.hh"
@@ -191,7 +193,7 @@ SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRe
     // Ra, Co, Ba and Ch denoting rank, column, bank and channel,
     // respectively
     uint8_t rank;
-    uint16_t bank;
+    uint8_t bank;
     uint16_t row;
 
     // truncate the address to the access granularity
@@ -278,8 +280,9 @@ SimpleDRAM::decodeAddr(PacketPtr pkt, Addr dramPktAddr, unsigned size, bool isRe
     // create the corresponding DRAM packet with the entry time and
     // ready time set to the current tick, the latter will be updated
     // later
-    return new DRAMPacket(pkt, isRead, rank, bank, row, dramPktAddr, size,
-                          banks[rank][bank]);
+    uint16_t bank_id = banksPerRank * rank + bank;
+    return new DRAMPacket(pkt, isRead, rank, bank, row, bank_id, dramPktAddr,
+                          size, banks[rank][bank]);
 }
 
 void
@@ -347,9 +350,8 @@ SimpleDRAM::addToReadQueue(PacketPtr pkt, unsigned int pktCount)
             readQueue.push_back(dram_pkt);
 
             // Update stats
-            uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
-            assert(bank_id < ranksPerChannel * banksPerRank);
-            perBankRdReqs[bank_id]++;
+            assert(dram_pkt->bankId < ranksPerChannel * banksPerRank);
+            perBankRdReqs[dram_pkt->bankId]++;
 
             avgRdQLen = readQueue.size() + respQueue.size();
         }
@@ -540,9 +542,8 @@ SimpleDRAM::addToWriteQueue(PacketPtr pkt, unsigned int pktCount)
             writeQueue.push_back(dram_pkt);
 
             // Update stats
-            uint32_t bank_id = banksPerRank * dram_pkt->rank + dram_pkt->bank;
-            assert(bank_id < ranksPerChannel * banksPerRank);
-            perBankWrReqs[bank_id]++;
+            assert(dram_pkt->bankId < ranksPerChannel * banksPerRank);
+            perBankWrReqs[dram_pkt->bankId]++;
 
             avgWrQLen = writeQueue.size();
         }
@@ -781,18 +782,31 @@ SimpleDRAM::chooseNextWrite()
     if (memSchedPolicy == Enums::fcfs) {
         // Do nothing, since the correct request is already head
     } else if (memSchedPolicy == Enums::frfcfs) {
+        // Only determine bank availability when needed
+        uint64_t earliest_banks = 0;
+
         auto i = writeQueue.begin();
         bool foundRowHit = false;
         while (!foundRowHit && i != writeQueue.end()) {
             DRAMPacket* dram_pkt = *i;
-            const Bank& bank = dram_pkt->bank_ref;
-            if (bank.openRow == dram_pkt->row) { //FR part
+            const Bank& bank = dram_pkt->bankRef;
+            if (bank.openRow == dram_pkt->row) {
                 DPRINTF(DRAM, "Write row buffer hit\n");
                 writeQueue.erase(i);
                 writeQueue.push_front(dram_pkt);
                 foundRowHit = true;
-            } else { //FCFS part
-                ;
+            } else {
+                // No row hit, go for first ready
+                if (earliest_banks == 0)
+                    earliest_banks = minBankFreeAt(writeQueue);
+
+                // Bank is ready or is one of the first available bank
+                if (bank.freeAt <= curTick() ||
+                    bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
+                    writeQueue.erase(i);
+                    writeQueue.push_front(dram_pkt);
+                    break;
+                }
             }
             ++i;
         }
@@ -822,17 +836,30 @@ SimpleDRAM::chooseNextRead()
         // Do nothing, since the request to serve is already the first
         // one in the read queue
     } else if (memSchedPolicy == Enums::frfcfs) {
+        // Only determine this when needed
+        uint64_t earliest_banks = 0;
+
         for (auto i = readQueue.begin(); i != readQueue.end() ; ++i) {
             DRAMPacket* dram_pkt = *i;
-            const Bank& bank = dram_pkt->bank_ref;
+            const Bank& bank = dram_pkt->bankRef;
             // Check if it is a row hit
-            if (bank.openRow == dram_pkt->row) { //FR part
+            if (bank.openRow == dram_pkt->row) {
                 DPRINTF(DRAM, "Row buffer hit\n");
                 readQueue.erase(i);
                 readQueue.push_front(dram_pkt);
                 break;
-            } else { //FCFS part
-                ;
+            } else {
+                // No row hit, go for first ready
+                if (earliest_banks == 0)
+                    earliest_banks = minBankFreeAt(readQueue);
+
+                // Bank is ready or is the first available bank
+                if (bank.freeAt <= curTick() ||
+                    bits(earliest_banks, dram_pkt->bankId, dram_pkt->bankId)) {
+                    readQueue.erase(i);
+                    readQueue.push_front(dram_pkt);
+                    break;
+                }
             }
         }
     } else
@@ -887,7 +914,7 @@ SimpleDRAM::estimateLatency(DRAMPacket* dram_pkt, Tick inTime)
     Tick bankLat = 0;
     rowHitFlag = false;
 
-    const Bank& bank = dram_pkt->bank_ref;
+    const Bank& bank = dram_pkt->bankRef;
     if (pageMgmt == Enums::open) { // open-page policy
         if (bank.openRow == dram_pkt->row) {
             // When we have a row-buffer hit,
@@ -1008,7 +1035,7 @@ SimpleDRAM::doDRAMAccess(DRAMPacket* dram_pkt)
     Tick addDelay = (curTick() + accessLat < busBusyUntil) ?
         busBusyUntil - (curTick() + accessLat) : 0;
 
-    Bank& bank = dram_pkt->bank_ref;
+    Bank& bank = dram_pkt->bankRef;
 
     // Update bank state
     if (pageMgmt == Enums::open) {
@@ -1183,6 +1210,38 @@ SimpleDRAM::maxBankFreeAt() const
     return banksFree;
 }
 
+uint64_t
+SimpleDRAM::minBankFreeAt(const deque<DRAMPacket*>& queue) const
+{
+    uint64_t bank_mask = 0;
+    Tick freeAt = MaxTick;
+
+    // detemrine if we have queued transactions targetting the
+    // bank in question
+    vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
+    for (auto p = queue.begin(); p != queue.end(); ++p) {
+        got_waiting[(*p)->bankId] = true;
+    }
+
+    for (int i = 0; i < ranksPerChannel; i++) {
+        for (int j = 0; j < banksPerRank; j++) {
+            // if we have waiting requests for the bank, and it is
+            // amongst the first available, update the mask
+            if (got_waiting[i * banksPerRank + j] &&
+                banks[i][j].freeAt <= freeAt) {
+                // reset bank mask if new minimum is found
+                if (banks[i][j].freeAt < freeAt)
+                    bank_mask = 0;
+                // set the bit corresponding to the available bank
+                uint8_t bit_index = i * ranksPerChannel + j;
+                replaceBits(bank_mask, bit_index, bit_index, 1);
+                freeAt = banks[i][j].freeAt;
+            }
+        }
+    }
+    return bank_mask;
+}
+
 void
 SimpleDRAM::processRefreshEvent()
 {
index edba587747ecf025e62b48b9b7605584d4462dd5..19d23efba5228b99b18a1ed545ef411a1b261ae4 100644 (file)
@@ -39,6 +39,7 @@
  *
  * Authors: Andreas Hansson
  *          Ani Udipi
+ *          Neha Agarwal
  */
 
 /**
@@ -202,9 +203,16 @@ class SimpleDRAM : public AbstractMemory
 
         /** Will be populated by address decoder */
         const uint8_t rank;
-        const uint16_t bank;
+        const uint8_t bank;
         const uint16_t row;
 
+        /**
+         * Bank id is calculated considering banks in all the ranks
+         * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and
+         * bankId = 8 --> rank1, bank0
+         */
+        const uint16_t bankId;
+
         /**
          * The starting address of the DRAM packet.
          * This address could be unaligned to burst size boundaries. The
@@ -224,14 +232,15 @@ class SimpleDRAM : public AbstractMemory
          * If not a split packet (common case), this is set to NULL
          */
         BurstHelper* burstHelper;
-        Bank& bank_ref;
+        Bank& bankRef;
 
-        DRAMPacket(PacketPtr _pkt, bool _isRead, uint8_t _rank, uint16_t _bank,
-                   uint16_t _row, Addr _addr, unsigned int _size,
-                   Bank& _bank_ref)
+        DRAMPacket(PacketPtr _pkt, bool is_read, uint8_t _rank, uint8_t _bank,
+                   uint16_t _row, uint16_t bank_id, Addr _addr,
+                   unsigned int _size, Bank& bank_ref)
             : entryTime(curTick()), readyTime(curTick()),
-              pkt(_pkt), isRead(_isRead), rank(_rank), bank(_bank), row(_row),
-              addr(_addr), size(_size), burstHelper(NULL), bank_ref(_bank_ref)
+              pkt(_pkt), isRead(is_read), rank(_rank), bank(_bank), row(_row),
+              bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
+              bankRef(bank_ref)
         { }
 
     };
@@ -394,6 +403,14 @@ class SimpleDRAM : public AbstractMemory
      */
     Tick maxBankFreeAt() const;
 
+    /**
+     * Find which are the earliest available banks for the enqueued
+     * requests. Assumes maximum of 64 banks per DIMM
+     *
+     * @param Queued requests to consider
+     * @return One-hot encoded mask of bank indices
+     */
+    uint64_t minBankFreeAt(const std::deque<DRAMPacket*>& queue) const;
 
     /**
      * Keep track of when row activations happen, in order to enforce