Skip to content

Commit

Permalink
Directly configurable rate limit
Browse files Browse the repository at this point in the history
  • Loading branch information
pwojcikdev committed Nov 26, 2024
1 parent afd5949 commit 59b0532
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 10 deletions.
6 changes: 3 additions & 3 deletions nano/core_test/toml.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ TEST (toml, daemon_config_deserialize_defaults)
ASSERT_EQ (conf.node.max_unchecked_blocks, defaults.node.max_unchecked_blocks);
ASSERT_EQ (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable);
ASSERT_EQ (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size);
ASSERT_EQ (conf.node.backlog_scan.frequency, defaults.node.backlog_scan.frequency);
ASSERT_EQ (conf.node.backlog_scan.rate_limit, defaults.node.backlog_scan.rate_limit);
ASSERT_EQ (conf.node.enable_upnp, defaults.node.enable_upnp);

ASSERT_EQ (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled);
Expand Down Expand Up @@ -468,7 +468,7 @@ TEST (toml, daemon_config_deserialize_no_defaults)
[node.backlog_scan]
enable = false
batch_size = 999
frequency = 999
rate_limit = 999
[node.block_processor]
max_peer_queue = 999
Expand Down Expand Up @@ -706,7 +706,7 @@ TEST (toml, daemon_config_deserialize_no_defaults)
ASSERT_NE (conf.node.request_aggregator_threads, defaults.node.request_aggregator_threads);
ASSERT_NE (conf.node.backlog_scan.enable, defaults.node.backlog_scan.enable);
ASSERT_NE (conf.node.backlog_scan.batch_size, defaults.node.backlog_scan.batch_size);
ASSERT_NE (conf.node.backlog_scan.frequency, defaults.node.backlog_scan.frequency);
ASSERT_NE (conf.node.backlog_scan.rate_limit, defaults.node.backlog_scan.rate_limit);
ASSERT_NE (conf.node.enable_upnp, defaults.node.enable_upnp);

ASSERT_NE (conf.node.websocket_config.enabled, defaults.node.websocket_config.enabled);
Expand Down
11 changes: 6 additions & 5 deletions nano/node/backlog_scan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ nano::backlog_scan::backlog_scan (backlog_scan_config const & config_a, nano::le
config{ config_a },
ledger{ ledger_a },
stats{ stats_a },
limiter{ config.batch_size * config.frequency }
limiter{ config.rate_limit }
{
}

Expand Down Expand Up @@ -93,7 +93,8 @@ void nano::backlog_scan::populate_backlog (nano::unique_lock<nano::mutex> & lock
// Wait for the rate limiter
while (!limiter.should_pass (config.batch_size))
{
condition.wait_for (lock, std::chrono::milliseconds{ 1000 / config.frequency / 2 });
std::chrono::milliseconds const wait_time{ 1000 / std::max ((config.rate_limit / config.batch_size), size_t{ 1 }) / 2 };
condition.wait_for (lock, std::max (wait_time, 10ms));
if (stopped)
{
return;
Expand Down Expand Up @@ -158,8 +159,8 @@ nano::container_info nano::backlog_scan::container_info () const
nano::error nano::backlog_scan_config::serialize (nano::tomlconfig & toml) const
{
toml.put ("enable", enable, "Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC \ntype:bool");
toml.put ("batch_size", batch_size, "Number of accounts per second to process when doing backlog population scan. Increasing this value will help unconfirmed frontiers get into election prioritization queue faster, however it will also increase resource usage. \ntype:uint");
toml.put ("frequency", frequency, "Number of batches to process per second. Higher frequency and smaller batch size helps to utilize resources more uniformly, however it also introduces more overhead. Use 0 to process as fast as possible, but be aware that it may consume a lot of resources. \ntype:uint");
toml.put ("batch_size", batch_size, "Size of a single batch. Larger batches reduce overhead, but may put more pressure on other node components. \ntype:uint");
toml.put ("rate_limit", rate_limit, "Number of accounts per second to process when doing backlog population scan. Increasing this value will help unconfirmed frontiers get into election prioritization queue faster. Use 0 to process as fast as possible, but be aware that it may consume a lot of resources. \ntype:uint");

return toml.get_error ();
}
Expand All @@ -168,7 +169,7 @@ nano::error nano::backlog_scan_config::deserialize (nano::tomlconfig & toml)
{
toml.get ("enable", enable);
toml.get ("batch_size", batch_size);
toml.get ("frequency", frequency);
toml.get ("rate_limit", rate_limit);

return toml.get_error ();
}
4 changes: 2 additions & 2 deletions nano/node/backlog_scan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ class backlog_scan_config final
public:
/** Control if ongoing backlog population is enabled. If not, backlog population can still be triggered by RPC */
bool enable{ true };
/** Number of accounts to scan per second. */
size_t rate_limit{ 10000 };
/** Number of accounts per second to process. */
size_t batch_size{ 1000 };
/** Number of batches to run per second. */
size_t frequency{ 10 };
};

class backlog_scan final
Expand Down

0 comments on commit 59b0532

Please sign in to comment.