Skip to content

Commit

Permalink
Merge tcp channels and network keepalive loops
Browse files Browse the repository at this point in the history
  • Loading branch information
pwojcikdev committed Mar 15, 2024
1 parent ce31080 commit f46673e
Show file tree
Hide file tree
Showing 5 changed files with 4 additions and 40 deletions.
3 changes: 0 additions & 3 deletions nano/lib/thread_roles.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,6 @@ std::string nano::thread_role::get_string (nano::thread_role::name role)
case nano::thread_role::name::network_reachout:
thread_role_name_string = "Net reachout";
break;
case nano::thread_role::name::tcp_keepalive:
thread_role_name_string = "Tcp keepalive";
break;
default:
debug_assert (false && "nano::thread_role::get_string unhandled thread role");
}
Expand Down
1 change: 0 additions & 1 deletion nano/lib/thread_roles.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ enum class name
network_cleanup,
network_keepalive,
network_reachout,
tcp_keepalive,
};

/*
Expand Down
2 changes: 2 additions & 0 deletions nano/node/network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ void nano::network::run_keepalive ()
flood_keepalive (0.75f);
flood_keepalive_self (0.25f);

tcp_channels.keepalive ();

lock.lock ();
}
}
Expand Down
32 changes: 1 addition & 31 deletions nano/node/transport/tcp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,16 +133,11 @@ nano::transport::tcp_channels::tcp_channels (nano::node & node, std::function<vo

nano::transport::tcp_channels::~tcp_channels ()
{
// All threads must be stopped before destruction
debug_assert (!keepalive_thread.joinable ());
debug_assert (channels.empty ());
}

void nano::transport::tcp_channels::start ()
{
keepalive_thread = std::thread ([this] () {
nano::thread_role::set (nano::thread_role::name::tcp_keepalive);
run_keepalive ();
});
}

void nano::transport::tcp_channels::stop ()
Expand All @@ -153,11 +148,6 @@ void nano::transport::tcp_channels::stop ()
}
condition.notify_all ();

if (keepalive_thread.joinable ())
{
keepalive_thread.join ();
}

message_manager.stop ();

// Close all TCP sockets
Expand All @@ -176,26 +166,6 @@ void nano::transport::tcp_channels::stop ()
channels.clear ();
}

// TODO: Merge with keepalive in network class
void nano::transport::tcp_channels::run_keepalive ()
{
nano::unique_lock<nano::mutex> lock{ mutex };
while (!stopped)
{
condition.wait_for (lock, node.network_params.network.keepalive_period);
if (stopped)
{
return;
}
lock.unlock ();

node.stats.inc (nano::stat::type::tcp_channels, nano::stat::detail::loop_keepalive);
keepalive ();

lock.lock ();
}
}

bool nano::transport::tcp_channels::insert (std::shared_ptr<nano::transport::channel_tcp> const & channel_a, std::shared_ptr<nano::transport::socket> const & socket_a, std::shared_ptr<nano::transport::tcp_server> const & server_a)
{
auto endpoint (channel_a->get_tcp_endpoint ());
Expand Down
6 changes: 1 addition & 5 deletions nano/node/transport/tcp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ namespace transport
void list (std::deque<std::shared_ptr<nano::transport::channel>> &, uint8_t = 0, bool = true);
void modify (std::shared_ptr<nano::transport::channel_tcp> const &, std::function<void (std::shared_ptr<nano::transport::channel_tcp> const &)>);
void update (nano::tcp_endpoint const &);
void keepalive ();
std::optional<nano::keepalive> sample_keepalive ();

// Connection start
Expand All @@ -167,10 +168,6 @@ namespace transport
private: // Dependencies
nano::node & node;

private:
void run_keepalive ();
void keepalive ();

public:
nano::tcp_message_manager message_manager;

Expand Down Expand Up @@ -286,7 +283,6 @@ namespace transport
std::atomic<bool> stopped{ false };
nano::condition_variable condition;
mutable nano::mutex mutex;
std::thread keepalive_thread;

std::default_random_engine rng;
};
Expand Down

0 comments on commit f46673e

Please sign in to comment.