Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mayne lsu #215

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
172 changes: 131 additions & 41 deletions core/LSU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ namespace olympia
replay_buffer_("replay_buffer", p->replay_buffer_size, getClock()),
replay_buffer_size_(p->replay_buffer_size),
replay_issue_delay_(p->replay_issue_delay),
store_buffer_("store_buffer", p->ldst_inst_queue_size, getClock()), // Add this line
store_buffer_size_(p->ldst_inst_queue_size),
ready_queue_(),
load_store_info_allocator_(sparta::notNull(OlympiaAllocators::getOlympiaAllocators(node))
->load_store_info_allocator),
Expand All @@ -31,7 +33,7 @@ namespace olympia
cache_read_stage_(cache_lookup_stage_
+ 1), // Get data from the cache in the cycle after cache lookup
complete_stage_(
cache_read_stage_
cache_read_stage_
+ p->cache_read_stage_length), // Complete stage is after the cache read stage
ldst_pipeline_("LoadStorePipeline", (complete_stage_ + 1),
getClock()), // complete_stage_ + 1 is number of stages
Expand All @@ -48,6 +50,7 @@ namespace olympia
ldst_pipeline_.enableCollection(node);
ldst_inst_queue_.enableCollection(node);
replay_buffer_.enableCollection(node);
store_buffer_.enableCollection(node);

// Startup handler for sending initial credits
sparta::StartupEvent(node, CREATE_SPARTA_HANDLER(LSU, sendInitialCredits_));
Expand Down Expand Up @@ -177,6 +180,12 @@ namespace olympia
{
ILOG("New instruction added to the ldst queue " << inst_ptr);
allocateInstToIssueQueue_(inst_ptr);
// allocate to Store buffer
if (inst_ptr->isStoreInst())
{
allocateInstToStoreBuffer_(inst_ptr);
}

handleOperandIssueCheck_(inst_ptr);
lsu_insts_dispatched_++;
}
Expand Down Expand Up @@ -265,7 +274,20 @@ namespace olympia
sparta_assert(inst_ptr->getStatus() == Inst::Status::RETIRED,
"Get ROB Ack, but the store inst hasn't retired yet!");

++stores_retired_;
if (inst_ptr->isStoreInst())
{
auto oldest_store = getOldestStore_();
sparta_assert(oldest_store && oldest_store->getInstPtr()->getUniqueID() == inst_ptr->getUniqueID(),
"Attempting to retire store out of order! Expected: "
<< (oldest_store ? oldest_store->getInstPtr()->getUniqueID() : 0)
<< " Got: " << inst_ptr->getUniqueID());

// Remove from store buffer and commit to cache
out_cache_lookup_req_.send(oldest_store->getMemoryAccessInfoPtr());
store_buffer_.erase(store_buffer_.begin());;
++stores_retired_;
}


updateIssuePriorityAfterStoreInstRetire_(inst_ptr);
if (isReadyToIssueInsts_())
Expand Down Expand Up @@ -438,6 +460,31 @@ namespace olympia
const MemoryAccessInfoPtr & mem_access_info_ptr =
load_store_info_ptr->getMemoryAccessInfoPtr();
const bool phy_addr_is_ready = mem_access_info_ptr->getPhyAddrStatus();
const InstPtr & inst_ptr = mem_access_info_ptr->getInstPtr();

// first check physical address and bypass conditions
const bool is_already_hit =
(mem_access_info_ptr->getCacheState() == MemoryAccessInfo::CacheState::HIT);
const bool is_unretired_store =
inst_ptr->isStoreInst() && (inst_ptr->getStatus() != Inst::Status::RETIRED);
const bool cache_bypass = is_already_hit || !phy_addr_is_ready || is_unretired_store;

if (cache_bypass)
{
if (is_already_hit)
{
ILOG("Cache Lookup is skipped (Cache already hit)");
}
else if (is_unretired_store)
{
ILOG("Cache Lookup is skipped (store instruction not oldest)");
}
else
{
sparta_assert(false, "Cache access is bypassed without a valid reason!");
}
return;
}

// If we did not have an MMU hit from previous stage, invalidate and bail
if (false == phy_addr_is_ready)
Expand All @@ -462,66 +509,59 @@ namespace olympia
return;
}

const InstPtr & inst_ptr = mem_access_info_ptr->getInstPtr();
ILOG(load_store_info_ptr << " " << mem_access_info_ptr);

// If have passed translation and the instruction is a store,
// then it's good to be retired (i.e. mark it completed).
// Stores typically do not cause a flush after a successful
// translation. We now wait for the Retire block to "retire"
// it, meaning it's good to go to the cache
if (inst_ptr->isStoreInst() && (inst_ptr->getStatus() == Inst::Status::SCHEDULED))
if (inst_ptr->isStoreInst())
{
ILOG("Store marked as completed " << inst_ptr);
inst_ptr->setStatus(Inst::Status::COMPLETED);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
if (inst_ptr->getStatus() == Inst::Status::SCHEDULED)
{
updateInstReplayReady_(load_store_info_ptr);
ILOG("Store marked as completed " << inst_ptr);
inst_ptr->setStatus(Inst::Status::COMPLETED);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
{
updateInstReplayReady_(load_store_info_ptr);
}
return;
}
return;
}

// Loads dont perform a cache lookup if there are older stores present in the load store
// queue
if (!inst_ptr->isStoreInst() && olderStoresExists_(inst_ptr)
&& allow_speculative_load_exec_)
else // Loads handling
{
ILOG("Dropping speculative load " << inst_ptr);
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
if (allow_speculative_load_exec_)
// Check for speculative execution constraints
// since we use data forwarding, we only need to check whether all older store was issued
if (allow_speculative_load_exec_ && !allOlderStoresIssued_(inst_ptr))
{
ILOG("Dropping speculative load " << inst_ptr << " due to unissued older stores");
load_store_info_ptr->setState(LoadStoreInstInfo::IssueState::READY);
ldst_pipeline_.invalidateStage(cache_lookup_stage_);
updateInstReplayReady_(load_store_info_ptr);
return;
}
return;
}

const bool is_already_hit =
(mem_access_info_ptr->getCacheState() == MemoryAccessInfo::CacheState::HIT);
const bool is_unretired_store =
inst_ptr->isStoreInst() && (inst_ptr->getStatus() != Inst::Status::RETIRED);
const bool cache_bypass = is_already_hit || !phy_addr_is_ready || is_unretired_store;
//check if we can forward from store buffer first
uint64_t load_addr = inst_ptr->getTargetVAddr();
auto forwarding_store = findYoungestMatchingStore_(load_addr);

if (cache_bypass)
{
if (is_already_hit)
if (forwarding_store)
{
ILOG("Cache Lookup is skipped (Cache already hit)");
}
else if (is_unretired_store)
{
ILOG("Cache Lookup is skipped (store instruction not oldest)");
ILOG("Found forwarding store for load " << inst_ptr);
mem_access_info_ptr->setDataReady(true);
mem_access_info_ptr->setCacheState(MemoryAccessInfo::CacheState::HIT);
return;
}
else
{
sparta_assert(false, "Cache access is bypassed without a valid reason!");

// No forwarding possible - need cache access
if (!mem_access_info_ptr->isCacheHit()) {
out_cache_lookup_req_.send(mem_access_info_ptr);
}
return;
}

out_cache_lookup_req_.send(mem_access_info_ptr);

}

void LSU::getAckFromCache_(const MemoryAccessInfoPtr & mem_access_info_ptr)
Expand Down Expand Up @@ -790,6 +830,7 @@ namespace olympia
flushIssueQueue_(criteria);
flushReplayBuffer_(criteria);
flushReadyQueue_(criteria);
flushStoreBuffer_(criteria);

// Cancel replay events
auto flush = [&criteria](const LoadStoreInstInfoPtr & ldst_info_ptr) -> bool
Expand Down Expand Up @@ -894,6 +935,40 @@ namespace olympia
ILOG("Append new load/store instruction to issue queue!");
}

void LSU::allocateInstToStoreBuffer_(const InstPtr & inst_ptr)
{
auto store_info_ptr = createLoadStoreInst_(inst_ptr);

sparta_assert(store_buffer_.size() < ldst_inst_queue_size_,
"Appending store buffer causes overflows!");

store_buffer_.push_back(store_info_ptr);
ILOG("Store added to store buffer: " << inst_ptr);
}

LoadStoreInstInfoPtr LSU::findYoungestMatchingStore_(uint64_t addr)
{
LoadStoreInstInfoPtr matching_store = nullptr;

for (auto it = store_buffer_.begin(); it != store_buffer_.end(); ++it)
{
auto & store = *it;
if (store->getInstPtr()->getTargetVAddr() == addr)
{
matching_store = store;
}
}
return matching_store;
}

LoadStoreInstInfoPtr LSU::getOldestStore_() const
{
if(store_buffer_.empty()) {
return nullptr;
}
return store_buffer_.read(0);
}

bool LSU::allOlderStoresIssued_(const InstPtr & inst_ptr)
{
for (const auto & ldst_info_ptr : ldst_inst_queue_)
Expand Down Expand Up @@ -1368,4 +1443,19 @@ namespace olympia
}
}

void LSU::flushStoreBuffer_(const FlushCriteria & criteria)
{
auto sb_iter = store_buffer_.begin();
while(sb_iter != store_buffer_.end()) {
auto inst_ptr = (*sb_iter)->getInstPtr();
if(criteria.includedInFlush(inst_ptr)) {
auto delete_iter = sb_iter++;
store_buffer_.erase(delete_iter);
ILOG("Flushed store from store buffer: " << inst_ptr);
} else {
++sb_iter;
}
}
}

} // namespace olympia
16 changes: 16 additions & 0 deletions core/LSU.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ namespace olympia
PARAMETER(uint32_t, ldst_inst_queue_size, 8, "LSU ldst inst queue size")
PARAMETER(uint32_t, replay_buffer_size, ldst_inst_queue_size, "Replay buffer size")
PARAMETER(uint32_t, replay_issue_delay, 3, "Replay Issue delay")
// PARAMETER(uint32_t, store_buffer_size, ldst_inst_queue_size, "Size of the store buffer")
// LSU microarchitecture parameters
PARAMETER(
bool, allow_speculative_load_exec, true,
Expand Down Expand Up @@ -137,6 +138,10 @@ namespace olympia
const uint32_t replay_buffer_size_;
const uint32_t replay_issue_delay_;

// Store Buffer
sparta::Buffer<LoadStoreInstInfoPtr> store_buffer_;
const uint32_t store_buffer_size_;

sparta::PriorityQueue<LoadStoreInstInfoPtr> ready_queue_;
// MMU unit
bool mmu_busy_ = false;
Expand Down Expand Up @@ -258,6 +263,15 @@ namespace olympia

void allocateInstToIssueQueue_(const InstPtr & inst_ptr);

// allocate store inst to store buffer
void allocateInstToStoreBuffer_(const InstPtr & inst_ptr);

// Search store buffer in FIFO order for youngest matching store
LoadStoreInstInfoPtr findYoungestMatchingStore_(uint64_t addr);

// get oldest store
LoadStoreInstInfoPtr getOldestStore_() const;

bool olderStoresExists_(const InstPtr & inst_ptr);

bool allOlderStoresIssued_(const InstPtr & inst_ptr);
Expand Down Expand Up @@ -315,6 +329,8 @@ namespace olympia
// Flush Replay Buffer
void flushReplayBuffer_(const FlushCriteria &);

void flushStoreBuffer_(const FlushCriteria &);

// Counters
sparta::Counter lsu_insts_dispatched_{getStatisticSet(), "lsu_insts_dispatched",
"Number of LSU instructions dispatched",
Expand Down
Loading