Skip to content

Commit

Permalink
Improve bootstrap request timeout tracking
Browse files Browse the repository at this point in the history
  • Loading branch information
pwojcikdev committed Dec 10, 2024
1 parent a865db5 commit fc274ad
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 8 deletions.
1 change: 1 addition & 0 deletions nano/lib/stats_enums.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ enum class detail
erased,
request,
request_failed,
request_success,
broadcast,
cleanup,
top,
Expand Down
29 changes: 23 additions & 6 deletions nano/node/bootstrap/bootstrap_service.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,8 @@ bool nano::bootstrap_service::send (std::shared_ptr<nano::transport::channel> co
{
nano::lock_guard<nano::mutex> lock{ mutex };
debug_assert (tags.get<tag_id> ().count (tag.id) == 0);
// Give extra time for the request to be processed by the channel
tag.cutoff = std::chrono::steady_clock::now () + config.request_timeout * 4;
tags.get<tag_id> ().insert (tag);
}

Expand Down Expand Up @@ -202,10 +204,25 @@ bool nano::bootstrap_service::send (std::shared_ptr<nano::transport::channel> co
stats.inc (nano::stat::type::bootstrap, nano::stat::detail::request, nano::stat::dir::out);
stats.inc (nano::stat::type::bootstrap_request, to_stat_detail (tag.type));

// TODO: There is no feedback mechanism if bandwidth limiter starts dropping our requests
channel->send (
request, nullptr,
nano::transport::buffer_drop_policy::limiter, nano::transport::traffic_type::bootstrap);
request, [this, id = tag.id] (auto const & ec, auto size) {
nano::lock_guard<nano::mutex> lock{ mutex };
if (auto it = tags.get<tag_id> ().find (id); it != tags.get<tag_id> ().end ())
{
if (!ec)
{
stats.inc (nano::stat::type::bootstrap, nano::stat::detail::request_success, nano::stat::dir::out);
tags.get<tag_id> ().modify (it, [&] (auto & tag) {
// After the request has been sent, the peer has a limited time to respond
tag.cutoff = std::chrono::steady_clock::now () + config.request_timeout;
});
}
else
{
stats.inc (nano::stat::type::bootstrap, nano::stat::detail::request_failed, nano::stat::dir::out);
tags.get<tag_id> ().erase (it);
}
} }, nano::transport::buffer_drop_policy::limiter, nano::transport::traffic_type::bootstrap);

return true; // TODO: Return channel send result
}
Expand Down Expand Up @@ -674,9 +691,9 @@ void nano::bootstrap_service::cleanup_and_sync ()

throttle.resize (compute_throttle_size ());

auto const cutoff = std::chrono::steady_clock::now () - config.request_timeout;
auto should_timeout = [cutoff] (async_tag const & tag) {
return tag.timestamp < cutoff;
auto const now = std::chrono::steady_clock::now ();
auto should_timeout = [&] (async_tag const & tag) {
return tag.cutoff < now;
};

auto & tags_by_order = tags.get<tag_sequenced> ();
Expand Down
4 changes: 2 additions & 2 deletions nano/node/bootstrap/bootstrap_service.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ class bootstrap_service
nano::account account{ 0 };
nano::block_hash hash{ 0 };
size_t count{ 0 };

id_t id{ nano::bootstrap::generate_id () };
std::chrono::steady_clock::time_point cutoff{};
std::chrono::steady_clock::time_point timestamp{ std::chrono::steady_clock::now () };
id_t id{ nano::bootstrap::generate_id () };
};

private:
Expand Down

0 comments on commit fc274ad

Please sign in to comment.