diff --git a/configs/common/Caches.py b/configs/common/Caches.py index 4e7f97a490..c8f0c79420 100644 --- a/configs/common/Caches.py +++ b/configs/common/Caches.py @@ -87,6 +87,8 @@ class L1_DCache(L1Cache): enable_wayprediction = False + write_buffers = 16 + class L2Cache(Cache): mshrs = 64 tgts_per_mshr = 20 @@ -106,6 +108,8 @@ class L2Cache(Cache): slice_num = 4 + write_buffers = 16 + class L3Cache(Cache): mshrs = 64 tgts_per_mshr = 20 @@ -122,6 +126,8 @@ class L3Cache(Cache): cache_level = 3 enable_wayprediction = False + write_buffers = 16 + class IOCache(Cache): assoc = 8 tag_latency = 50 @@ -201,8 +207,8 @@ class L3ToMemBus(CoherentXBar): # A handful pipeline stages for each portion of the latency # contributions. frontend_latency = 0 - forward_latency = 30 - response_latency = 30 + forward_latency = 48 + response_latency = 48 snoop_response_latency = 4 # Use a snoop-filter by default diff --git a/src/cpu/o3/commit.cc b/src/cpu/o3/commit.cc index e8353736cd..1426e0d252 100644 --- a/src/cpu/o3/commit.cc +++ b/src/cpu/o3/commit.cc @@ -741,7 +741,8 @@ Commit::tick() if (cpu->curCycle() - lastCommitCycle > 20000) { if (maybeStucked) { - warn("[sn:%s] %s", rob->head->get()->seqNum, rob->head->get()->staticInst->disassemble(0)); + if (rob->numInstsInROB) + warn("[sn:%s] %s", rob->head->get()->seqNum, rob->head->get()->staticInst->disassemble(0)); panic("cpu stucked!!\n"); } warn("cpu may be stucked\n"); diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc index 42e43b2729..7ecac418bb 100644 --- a/src/mem/abstract_mem.cc +++ b/src/mem/abstract_mem.cc @@ -389,7 +389,8 @@ AbstractMemory::access(PacketPtr pkt) if (pkt->cmd == MemCmd::CleanEvict || pkt->cmd == MemCmd::WritebackClean) { DPRINTF(MemoryAccess, "CleanEvict on 0x%x: not responding\n", pkt->getAddr()); - return; + pkt->makeResponse(); + return; } assert(pkt->getAddrRange().isSubset(range)); diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index a67ad117ac..f84bdc697d 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -393,8 +393,8 @@ BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time, b // It is a force hit assert(pkt->isResponse()); } - DPRINTF(Cache, "Making timing response for %s, schedule it at %llu, is force hit: %i\n", - pkt->print(), request_time, pkt->isResponse()); + DPRINTF(Cache, "Making timing response for %s, schedule it at %llu\n", + pkt->print(), request_time); if (pkt->isRead() && first_acc_after_pf && prefetcher && prefetcher->hasHintDownStream()) { DPRINTF(Cache, "Notify down stream on pf hit\n"); @@ -619,7 +619,7 @@ BaseCache::recvTimingReq(PacketPtr pkt) if (!satisfied && forceHit && !pkt->req->isInstFetch() && pkt->isRead() && pkt->req->hasPC() && forceHitPCs.count(pkt->req->getPC())) { bool mshr_hit = mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure()) != nullptr; - bool wb_hit = writeBuffer.findMatch(pkt->getBlockAddr(blkSize), pkt->isSecure()) != nullptr; + bool wb_hit = writeBuffer.findMatchNoService(pkt->getBlockAddr(blkSize), pkt->isSecure()) != nullptr; if (!(mshr_hit || wb_hit)) { DPRINTF(Cache, "%s: generate functional access for PC %#lx\n", __func__, pkt->req->getPC()); @@ -852,6 +852,32 @@ BaseCache::recvTimingResp(PacketPtr pkt) DPRINTF(Cache, "%s: Handling response %s\n", __func__, pkt->print()); + if (pkt->isWriteBackResp()) { + DPRINTF(Cache, "Writeback response for addr %s\n", pkt->print()); + WriteQueueEntry* wbentry = dynamic_cast(pkt->popSenderState()); + panic_if(!wbentry, "Writeback response without sender state\n"); + bool wasfull = writeBuffer.isFull(); + + stats.cmdStats(pkt) + .missLatencyDist.sample(ticksToCycles(curTick() - wbentry->getTarget()->recvTime)); + + wbentry->popTarget(); + writeBuffer.deallocate(wbentry); + if (wasfull && !writeBuffer.isFull()) { + clearBlocked(Blocked_NoWBBuffers); + } + + if (pkt->cmd==MemCmd::WritebackResp) { + if (pkt->senderState) { + cpuSidePort.schedTimingResp(pkt, curTick() + pkt->headerDelay); + pkt->headerDelay = 0; + return; + } + delete pkt; + return; + } + } + // if this is a write, we should be looking at an uncacheable // write if (pkt->isWrite() && pkt->cmd != MemCmd::LockedRMWWriteResp) { @@ -1266,7 +1292,7 @@ BaseCache::getNextQueueEntry() } else if (miss_mshr) { // need to check for conflicting earlier writeback WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr); - if (conflict_mshr) { + if (conflict_mshr && !conflict_mshr->inService) { // not sure why we don't check order here... it was in the // original code but commented out. @@ -1323,7 +1349,7 @@ BaseCache::getNextQueueEntry() prefetcher->streamPflate(); // free the request and packet delete pkt; - } else if (writeBuffer.findMatch(pf_addr, pkt->isSecure())) { + } else if (writeBuffer.findMatchNoService(pf_addr, pkt->isSecure())) { DPRINTF(HWPrefetch, "Prefetch %#x has hit in the " "Write Buffer, dropped.\n", pf_addr); prefetcher->pfHitInWB(pf_type); @@ -1746,7 +1772,7 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, // generating CleanEvict and Writeback or simply CleanEvict and // CleanEvict almost simultaneously will be caught by snoops sent out // by crossbar. - WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(), + WriteQueueEntry *wb_entry = writeBuffer.findMatchNoService(pkt->getAddr(), pkt->isSecure()); if (wb_entry) { assert(wb_entry->getNumTargets() == 1); @@ -1773,7 +1799,8 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, // Dirty writeback from above trumps our clean // writeback... discard here // Note: markInService will remove entry from writeback buffer. - markInService(wb_entry); + wb_entry->popTarget(); + writeBuffer.deallocate(wb_entry); delete wbPkt; } } @@ -1843,7 +1870,7 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat, blk->setCoherenceBits(CacheBlk::WritableBit); } // nothing else to do; writeback doesn't expect response - assert(!pkt->needsResponse()); + // assert(!pkt->needsResponse()); updateBlockData(blk, pkt, has_old_data); DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print()); @@ -2033,7 +2060,7 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks, // When handling a fill, we should have no writes to this line. assert(addr == pkt->getBlockAddr(blkSize)); - assert(!writeBuffer.findMatch(addr, is_secure)); + auto entry = writeBuffer.findMatchNoService(addr, is_secure); if (!blk) { // better have read new data... @@ -2535,7 +2562,20 @@ BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry) // it gets retried return true; } else { + bool full = writeBuffer.isFull(); + assert(tgt_pkt->cmd != MemCmd::ReadReq); + tgt_pkt->pushSenderState(wq_entry); + tgt_pkt->setWriteBackResp(); markInService(wq_entry); + if ((tgt_pkt->isCleanEviction() && tgt_pkt->isBlockCached()) + || (tgt_pkt->cacheResponding() && + (!tgt_pkt->needsWritable() || tgt_pkt->responderHadWritable()))) { + wq_entry->popTarget(); + writeBuffer.deallocate(wq_entry); + } + if (full && !writeBuffer.isFull()) { + clearBlocked(Blocked_NoWBBuffers); + } return false; } } @@ -2993,6 +3033,7 @@ BaseCache::CacheStats::regStats() blockedCycles.init(NUM_BLOCKED_CAUSES); blockedCycles .subname(Blocked_NoMSHRs, "no_mshrs") + .subname(Blocked_NoWBBuffers, "no_WBBuffer") .subname(Blocked_NoTargets, "no_targets") ; @@ -3000,11 +3041,13 @@ BaseCache::CacheStats::regStats() blockedCauses.init(NUM_BLOCKED_CAUSES); blockedCauses .subname(Blocked_NoMSHRs, "no_mshrs") + .subname(Blocked_NoWBBuffers, "no_WBBuffer") .subname(Blocked_NoTargets, "no_targets") ; avgBlocked .subname(Blocked_NoMSHRs, "no_mshrs") + .subname(Blocked_NoWBBuffers, "no_WBBuffer") .subname(Blocked_NoTargets, "no_targets") ; avgBlocked = blockedCycles / blockedCauses; diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index 040c2e65e9..67b0e69b20 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -466,12 +466,7 @@ class BaseCache : public ClockedObject, CacheAccessor void markInService(WriteQueueEntry *entry) { - bool wasFull = writeBuffer.isFull(); writeBuffer.markInService(entry); - - if (wasFull && !writeBuffer.isFull()) { - clearBlocked(Blocked_NoWBBuffers); - } } /** @@ -1368,6 +1363,8 @@ class BaseCache : public ClockedObject, CacheAccessor // should only see writes or clean evicts here assert(pkt->isWrite() || pkt->cmd == MemCmd::CleanEvict); + DPRINTF(Cache, "Write buffer allocation for addr %s\n", pkt->print()); + Addr blk_addr = pkt->getBlockAddr(blkSize); // If using compression, on evictions the block is decompressed and diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc index 2735864e48..f7f61dee28 100644 --- a/src/mem/cache/cache.cc +++ b/src/mem/cache/cache.cc @@ -813,8 +813,6 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) stats.cmdStats(tgt_pkt) .missLatency[tgt_pkt->req->requestorId()] += completion_time - target.recvTime; - stats.cmdStats(tgt_pkt) - .missLatencyDist.sample((completion_time - target.recvTime)/500); if (tgt_pkt->cmd == MemCmd::LockedRMWReadReq) { // We're going to leave a target in the MSHR until the @@ -886,6 +884,10 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) // carried over to cache above tgt_pkt->copyResponderFlags(pkt); } + + stats.cmdStats(tgt_pkt) + .missLatencyDist.sample(ticksToCycles(completion_time - target.recvTime)); + tgt_pkt->makeTimingResponse(); // if this packet is an error copy that to the new packet if (is_error) @@ -986,7 +988,7 @@ Cache::sendHintViaMSHRTargets(MSHR *mshr, const PacketPtr pkt) firstTgtDelayed = transfer_offset != 0 && pkt->payloadDelay != 0; } Tick sendHintTime = curTick() + ((transfer_offset || firstTgtDelayed) ? pkt->payloadDelay : 0); - DPRINTF(Cache, "sendHintViaMSHRTargets: pkt: %#lx, sendHintTime: %ld", tgt_pkt->getAddr(), sendHintTime); + DPRINTF(Cache, "sendHintViaMSHRTargets: pkt: %#lx, sendHintTime: %ld\n", tgt_pkt->getAddr(), sendHintTime); if (sendHintTime == curTick()) { BaseCache::cpuSidePort.sendCustomSignal(tgt_pkt, DcacheRespType::Hint); } else { @@ -1370,7 +1372,7 @@ Cache::recvTimingSnoopReq(PacketPtr pkt) } //We also need to check the writeback buffers and handle those - WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure); + WriteQueueEntry *wb_entry = writeBuffer.findMatchNoService(blk_addr, is_secure); if (wb_entry) { DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n", pkt->getAddr(), is_secure ? "s" : "ns"); @@ -1426,7 +1428,8 @@ Cache::recvTimingSnoopReq(PacketPtr pkt) if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) { // Invalidation trumps our writeback... discard here // Note: markInService will remove entry from writeback buffer. - markInService(wb_entry); + wb_entry->popTarget(); + writeBuffer.deallocate(wb_entry); delete wb_pkt; } } diff --git a/src/mem/cache/write_queue.cc b/src/mem/cache/write_queue.cc index a6c688810b..af708ba19a 100644 --- a/src/mem/cache/write_queue.cc +++ b/src/mem/cache/write_queue.cc @@ -74,6 +74,26 @@ WriteQueue::allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, return entry; } + +WriteQueueEntry* +WriteQueue::findMatchNoService(Addr blk_addr, bool is_secure, + bool ignore_uncacheable) const +{ + for (const auto& entry : allocatedList) { + // we ignore any entries allocated for uncacheable + // accesses and simply ignore them when matching, in the + // cache we never check for matches when adding new + // uncacheable entries, and we do not want normal + // cacheable accesses being added to an WriteQueueEntry + // serving an uncacheable access + if (!(ignore_uncacheable && entry->isUncacheable()) && !entry->inService && + entry->matchBlockAddr(blk_addr, is_secure)) { + return entry; + } + } + return nullptr; +} + void WriteQueue::markInService(WriteQueueEntry *entry) { @@ -81,8 +101,10 @@ WriteQueue::markInService(WriteQueueEntry *entry) // there is no more to do as we are done from the perspective of // this cache, and for uncacheable write we do not need the entry // as part of the response handling - entry->popTarget(); - deallocate(entry); + + entry->inService = true; + readyList.erase(entry->readyIter); + _numInService += 1; } } // namespace gem5 diff --git a/src/mem/cache/write_queue.hh b/src/mem/cache/write_queue.hh index 0690c8499e..b76a12bdc4 100644 --- a/src/mem/cache/write_queue.hh +++ b/src/mem/cache/write_queue.hh @@ -88,6 +88,9 @@ class WriteQueue : public Queue WriteQueueEntry *allocate(Addr blk_addr, unsigned blk_size, PacketPtr pkt, Tick when_ready, Counter order); + WriteQueueEntry* findMatchNoService(Addr blk_addr, bool is_secure, + bool ignore_uncacheable = true) const; + /** * Mark the given entry as in service. This removes the entry from * the readyList or deallocates the entry if it does not expect a diff --git a/src/mem/coherent_xbar.cc b/src/mem/coherent_xbar.cc index 659296e4aa..3d008a1840 100644 --- a/src/mem/coherent_xbar.cc +++ b/src/mem/coherent_xbar.cc @@ -272,6 +272,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id) if (sink_packet) { DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__, pkt->print()); + expect_response = false; } else { // determine if we are forwarding the packet, or responding to // it diff --git a/src/mem/dramsim3.cc b/src/mem/dramsim3.cc index bb0e92018a..a78e52b9be 100644 --- a/src/mem/dramsim3.cc +++ b/src/mem/dramsim3.cc @@ -109,16 +109,21 @@ DRAMsim3::sendResponse() DPRINTF(DRAMsim3, "Attempting to send response\n"); - bool success = port.sendTimingResp(responseQueue.front()); + auto [pkt, time] = responseQueue.top(); + assert(time <= curTick()); + bool success = port.sendTimingResp(pkt); if (success) { - responseQueue.pop_front(); + responseQueue.pop(); DPRINTF(DRAMsim3, "Have %d read, %d write, %d responses outstanding\n", nbrOutstandingReads, nbrOutstandingWrites, responseQueue.size()); - if (!responseQueue.empty() && !sendResponseEvent.scheduled()) - schedule(sendResponseEvent, curTick()); + if (!responseQueue.empty() && !sendResponseEvent.scheduled()) { + Tick nextReadyTime = responseQueue.top().second > curTick() ? + responseQueue.top().second : curTick(); + schedule(sendResponseEvent, nextReadyTime); + } if (nbrOutstanding() == 0) signalDrainDone(); @@ -177,7 +182,7 @@ DRAMsim3::recvFunctional(PacketPtr pkt) // potentially update the packets in our response queue as well for (auto i = responseQueue.begin(); i != responseQueue.end(); ++i) - pkt->trySatisfyFunctional(*i); + pkt->trySatisfyFunctional((*i).first); pkt->popLabel(); } @@ -230,7 +235,7 @@ DRAMsim3::recvTimingReq(PacketPtr pkt) ++nbrOutstandingWrites; // perform the access for writes - accessAndRespond(pkt); + // accessAndRespond(pkt); } } else { // keep it simple and just respond if necessary @@ -292,7 +297,7 @@ DRAMsim3::accessAndRespond(PacketPtr pkt) pkt->getAddr()); // queue it to be sent back - responseQueue.push_back(pkt); + responseQueue.push({pkt, time}); // if we are not already waiting for a retry, or are scheduled // to send a response, schedule an event @@ -341,15 +346,16 @@ void DRAMsim3::writeComplete(unsigned id, uint64_t addr) // we have already responded, and this is only to keep track of // what is outstanding + PacketPtr pkt = p->second.front(); p->second.pop(); + if (p->second.empty()) outstandingWrites.erase(p); assert(nbrOutstandingWrites != 0); --nbrOutstandingWrites; - if (nbrOutstanding() == 0) - signalDrainDone(); + accessAndRespond(pkt); } Port& diff --git a/src/mem/dramsim3.hh b/src/mem/dramsim3.hh index f8fd54a263..f39a02bac6 100644 --- a/src/mem/dramsim3.hh +++ b/src/mem/dramsim3.hh @@ -47,6 +47,8 @@ #include #include +#include + #include "mem/abstract_mem.hh" #include "mem/dramsim3_wrapper.hh" #include "mem/qport.hh" @@ -142,7 +144,15 @@ class DRAMsim3 : public AbstractMemory * back. This is needed as DRAMsim3 unconditionally passes * responses back without any flow control. */ - std::deque responseQueue; + + struct sort_policy + { + bool operator()(const std::pair a, std::pair b) const { + return a.second > b.second; + } + }; + + boost::heap::priority_queue, boost::heap::compare> responseQueue; unsigned int nbrOutstanding() const; diff --git a/src/mem/packet.cc b/src/mem/packet.cc index 15d7ef2e5a..7aac6a9d03 100644 --- a/src/mem/packet.cc +++ b/src/mem/packet.cc @@ -92,18 +92,19 @@ MemCmd::commandInfo[] = * dependences are handled in the GPU ISA. */ { {IsWrite, IsResponse}, InvalidCmd, "WriteCompleteResp" }, /* WritebackDirty */ - { {IsWrite, IsRequest, IsEviction, HasData, FromCache}, - InvalidCmd, "WritebackDirty" }, + { {IsWrite, IsRequest, IsEviction, NeedsResponse, HasData, FromCache}, + WritebackResp, "WritebackDirty" }, /* WritebackClean - This allows the upstream cache to writeback a * line to the downstream cache without it being considered * dirty. */ - { {IsWrite, IsRequest, IsEviction, HasData, FromCache}, - InvalidCmd, "WritebackClean" }, + { {IsWrite, IsRequest, IsEviction, NeedsResponse, HasData, FromCache}, + WritebackResp, "WritebackClean" }, + { {IsWrite, IsResponse}, InvalidCmd, "WritebackResp"}, /* WriteClean - This allows a cache to write a dirty block to a memory below without evicting its copy. */ - { {IsWrite, IsRequest, HasData, FromCache}, InvalidCmd, "WriteClean" }, + { {IsWrite, IsRequest, NeedsResponse, HasData, FromCache}, WritebackResp, "WriteClean" }, /* CleanEvict */ - { {IsRequest, IsEviction, FromCache}, InvalidCmd, "CleanEvict" }, + { {IsRequest, IsEviction, NeedsResponse, FromCache}, WritebackResp, "CleanEvict" }, /* SoftPFReq */ { {IsRead, IsRequest, IsSWPrefetch, NeedsResponse}, SoftPFResp, "SoftPFReq" }, diff --git a/src/mem/packet.hh b/src/mem/packet.hh index 8ca4c130d9..a4e3ff88fb 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -92,6 +92,7 @@ class MemCmd WriteCompleteResp, WritebackDirty, WritebackClean, + WritebackResp, WriteClean, // writes dirty data below without evicting CleanEvict, SoftPFReq, @@ -301,7 +302,7 @@ class Packet : public Printable enum : FlagsType { // Flags to transfer across when copying a packet - COPY_FLAGS = 0x000000FF, + COPY_FLAGS = 0x00000FFF, // Flags that are used to create reponse packets RESPONDER_FLAGS = 0x00000009, @@ -342,25 +343,27 @@ class Packet : public Printable // in transactional mode, i.e. in a transaction. FROM_TRANSACTION = 0x00000080, + NEED_RELEASE_WRITEBUFFER = 0x00000100, + /// Are the 'addr' and 'size' fields valid? - VALID_ADDR = 0x00000100, - VALID_SIZE = 0x00000200, + VALID_ADDR = 0x00001000, + VALID_SIZE = 0x00002000, /// Is the data pointer set to a value that shouldn't be freed /// when the packet is destroyed? - STATIC_DATA = 0x00001000, + STATIC_DATA = 0x00010000, /// The data pointer points to a value that should be freed when /// the packet is destroyed. The pointer is assumed to be pointing /// to an array, and delete [] is consequently called - DYNAMIC_DATA = 0x00002000, + DYNAMIC_DATA = 0x00020000, /// suppress the error if this packet encounters a functional /// access failure. - SUPPRESS_FUNC_ERROR = 0x00008000, + SUPPRESS_FUNC_ERROR = 0x00080000, // Signal block present to squash prefetch and cache evict packets // through express snoop flag - BLOCK_CACHED = 0x00010000 + BLOCK_CACHED = 0x00100000 }; Flags flags; @@ -639,6 +642,21 @@ class Packet : public Printable getOffset(blk_size) == 0 && getSize() == blk_size; } + void setWriteBackResp() + { + flags.set(NEED_RELEASE_WRITEBUFFER); + } + + bool isWriteBackResp() + { + return flags.isSet(NEED_RELEASE_WRITEBUFFER); + } + + void clearWriteBackResp() + { + flags.set(NEED_RELEASE_WRITEBUFFER, false); + } + //@{ /// Snoop flags /** diff --git a/src/mem/snoop_filter.cc b/src/mem/snoop_filter.cc index fc4648cb4f..e1b1ee52d4 100644 --- a/src/mem/snoop_filter.cc +++ b/src/mem/snoop_filter.cc @@ -116,7 +116,7 @@ SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& return snoopSelected(maskToPortList(interested & ~req_port), lookupLatency); - if (cpkt->needsResponse()) { + if (cpkt->needsResponse() && cpkt->cmd.responseCommand() != MemCmd::WritebackResp) { if (!cpkt->cacheResponding()) { // Max one request per address per port panic_if((sf_item.requested & req_port).any(), @@ -355,6 +355,7 @@ SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& __func__, cpu_side_port.name(), cpkt->print()); assert(cpkt->isResponse()); + if (cpkt->cmd == Packet::Command::WritebackResp) return; // we only allocate if the packet actually came from a cache, but // start by checking if the port is snooping diff --git a/src/mem/xbar.cc b/src/mem/xbar.cc index 859f2b6424..44db7ab617 100644 --- a/src/mem/xbar.cc +++ b/src/mem/xbar.cc @@ -123,18 +123,6 @@ BaseXBar::calcPacketTiming(PacketPtr pkt, Tick header_delay) panic_if(pkt->headerDelay > sim_clock::as_int::us, "Encountered header delay exceeding 1 us\n"); - if (pkt->hasData()) { - // the payloadDelay takes into account the relative time to - // deliver the payload of the packet, after the header delay, - // we take the maximum since the payload delay could already - // be longer than what this parcitular crossbar enforces. - DPRINTF(BaseXBar, "Payload delay: %lu, packet size: %lu, width: %lu, clock period: %lu\n", pkt->payloadDelay, - pkt->getSize(), width, clockPeriod()); - pkt->payloadDelay = std::max(pkt->payloadDelay, - divCeil(pkt->getSize(), width) * - clockPeriod()); - } - // the payload delay is not paying for the clock offset as that is // already done using the header delay, and the payload delay is // also used to determine how long the crossbar layer is busy and