diff --git a/nano/core_test/node.cpp b/nano/core_test/node.cpp index 593c5ee6c1..297c1d2f1c 100644 --- a/nano/core_test/node.cpp +++ b/nano/core_test/node.cpp @@ -3706,7 +3706,6 @@ TEST (node, bounded_backlog) nano::node_config node_config; node_config.backlog.max_backlog = 10; - node_config.backlog.bucket_threshold = 2; node_config.backlog_scan.enable = false; auto & node = *system.add_node (node_config); diff --git a/nano/node/bounded_backlog.cpp b/nano/node/bounded_backlog.cpp index 9aa39dee45..62b604ecdd 100644 --- a/nano/node/bounded_backlog.cpp +++ b/nano/node/bounded_backlog.cpp @@ -337,6 +337,11 @@ std::deque nano::bounded_backlog::perform_rollbacks (std::dequ return processed; } +size_t nano::bounded_backlog::bucket_threshold () const +{ + return config.max_backlog / bucketing.size (); +} + std::deque nano::bounded_backlog::gather_targets (size_t max_count) const { debug_assert (!mutex.try_lock ()); @@ -347,7 +352,7 @@ std::deque nano::bounded_backlog::gather_targets (size_t max_c for (auto bucket : bucketing.bucket_indices ()) { // Only start rolling back if the bucket is over the threshold of unconfirmed blocks - if (index.size (bucket) > config.bucket_threshold) + if (index.size (bucket) > bucket_threshold ()) { auto const count = std::min (max_count, config.batch_size); diff --git a/nano/node/bounded_backlog.hpp b/nano/node/bounded_backlog.hpp index 4eb0cc16bf..5935e7a59f 100644 --- a/nano/node/bounded_backlog.hpp +++ b/nano/node/bounded_backlog.hpp @@ -96,7 +96,6 @@ class bounded_backlog_config { public: size_t max_backlog{ 100000 }; - size_t bucket_threshold{ 1000 }; double overfill_factor{ 1.5 }; size_t batch_size{ 32 }; size_t max_queued_notifications{ 128 }; @@ -112,6 +111,7 @@ class bounded_backlog void stop (); size_t index_size () const; + size_t bucket_threshold () const; nano::container_info container_info () const;