diff --git a/nano/node/bounded_backlog.cpp b/nano/node/bounded_backlog.cpp index 71f6536749..970a91896c 100644 --- a/nano/node/bounded_backlog.cpp +++ b/nano/node/bounded_backlog.cpp @@ -228,7 +228,7 @@ void nano::bounded_backlog::run () lock.unlock (); stats.add (nano::stat::type::bounded_backlog, nano::stat::detail::gathered_targets, targets.size ()); - auto processed = perform_rollbacks (targets); + auto processed = perform_rollbacks (targets, target_count); lock.lock (); @@ -285,7 +285,7 @@ bool nano::bounded_backlog::should_rollback (nano::block_hash const & hash) cons return true; } -std::deque nano::bounded_backlog::perform_rollbacks (std::deque const & targets) +std::deque nano::bounded_backlog::perform_rollbacks (std::deque const & targets, size_t max_rollbacks) { stats.inc (nano::stat::type::bounded_backlog, nano::stat::detail::performing_rollbacks); @@ -320,6 +320,12 @@ std::deque nano::bounded_backlog::perform_rollbacks (std::dequ // TODO: Calling block_processor's event here is not ideal, but duplicating these events is even worse block_processor.rolled_back.notify (rollback_list, root); }); + + // Return early if we reached the maximum number of rollbacks + if (processed.size () >= max_rollbacks) + { + break; + } } else { diff --git a/nano/node/bounded_backlog.hpp b/nano/node/bounded_backlog.hpp index 179c086622..2d6981c190 100644 --- a/nano/node/bounded_backlog.hpp +++ b/nano/node/bounded_backlog.hpp @@ -135,7 +135,7 @@ class bounded_backlog std::deque gather_targets (size_t max_count) const; bool should_rollback (nano::block_hash const &) const; - std::deque perform_rollbacks (std::deque const & targets); + std::deque perform_rollbacks (std::deque const & targets, size_t max_rollbacks); void run_scan ();