diff --git a/senseicore/src/channels.rs b/senseicore/src/channels.rs index de7aa53..b032f08 100644 --- a/senseicore/src/channels.rs +++ b/senseicore/src/channels.rs @@ -139,8 +139,8 @@ impl ChannelOpener { } = event { if *user_channel_id == request.custom_id { - channel_counterparty_node_id = Some(*counterparty_node_id); - return true; + channel_counterparty_node_id = Some(*counterparty_node_id); + return true; } } false @@ -149,10 +149,10 @@ impl ChannelOpener { if event.is_none() { (request, Err(Error::FundingGenerationNeverHappened), None) } else { - (request, result, channel_counterparty_node_id) + (request, result, channel_counterparty_node_id) } } else { - (request, result, None) + (request, result, None) } }) .collect::>(); @@ -192,7 +192,7 @@ impl ChannelOpener { let channels_to_open = requests_with_results .iter() - .filter(|(_request, result, counterparty_node_id)| result.is_ok()) + .filter(|(_request, result, _counterparty_node_id)| result.is_ok()) .count(); self.broadcaster @@ -203,10 +203,11 @@ impl ChannelOpener { .map(|(request, result, counterparty_node_id)| { if let Ok(tcid) = result { let counterparty_node_id = counterparty_node_id.unwrap(); - match self - .channel_manager - .funding_transaction_generated(&tcid, &counterparty_node_id, funding_tx.clone()) - { + match self.channel_manager.funding_transaction_generated( + &tcid, + &counterparty_node_id, + funding_tx.clone(), + ) { Ok(()) => (request, result), Err(e) => (request, Err(Error::LdkApi(e))), } diff --git a/senseicore/src/database.rs b/senseicore/src/database.rs index 8ed1b95..13a68ab 100644 --- a/senseicore/src/database.rs +++ b/senseicore/src/database.rs @@ -114,6 +114,15 @@ impl SenseiDatabase { .await?) } + pub async fn list_ports_in_use(&self) -> Result, Error> { + Ok(Node::find() + .all(&self.connection) + .await? + .into_iter() + .map(|node| node.listen_port as u16) + .collect()) + } + pub async fn list_nodes( &self, pagination: PaginationRequest, diff --git a/senseicore/src/event_handler.rs b/senseicore/src/event_handler.rs index a97b905..8c5c326 100644 --- a/senseicore/src/event_handler.rs +++ b/senseicore/src/event_handler.rs @@ -79,7 +79,7 @@ impl EventHandler for LightningNodeEventHandler { channel_value_satoshis: *channel_value_satoshis, output_script: output_script.clone(), user_channel_id: *user_channel_id, - counterparty_node_id: *counterparty_node_id + counterparty_node_id: *counterparty_node_id, }) .unwrap(); } diff --git a/senseicore/src/events.rs b/senseicore/src/events.rs index 1409f3d..2d2fc0f 100644 --- a/senseicore/src/events.rs +++ b/senseicore/src/events.rs @@ -1,4 +1,4 @@ -use bitcoin::{Script, Txid, secp256k1::PublicKey}; +use bitcoin::{secp256k1::PublicKey, Script, Txid}; use serde::Serialize; #[derive(Clone, Debug, Serialize)] @@ -13,6 +13,6 @@ pub enum SenseiEvent { channel_value_satoshis: u64, output_script: Script, user_channel_id: u64, - counterparty_node_id: PublicKey + counterparty_node_id: PublicKey, }, } diff --git a/senseicore/src/node.rs b/senseicore/src/node.rs index a6579e8..665055e 100644 --- a/senseicore/src/node.rs +++ b/senseicore/src/node.rs @@ -58,7 +58,7 @@ use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph, NodeId, RoutingFees}; use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning::routing::scoring::ProbabilisticScorer; -use lightning::util::config::{ChannelConfig, ChannelHandshakeLimits, UserConfig}; +use lightning::util::config::UserConfig; use lightning::util::ser::ReadableArgs; use lightning_background_processor::BackgroundProcessor; use lightning_invoice::utils::DefaultRouter; diff --git a/senseicore/src/services/admin.rs b/senseicore/src/services/admin.rs index a9082d1..74ec2c9 100644 --- a/senseicore/src/services/admin.rs +++ b/senseicore/src/services/admin.rs @@ -20,7 +20,7 @@ use entity::sea_orm::{ActiveModelTrait, ActiveValue}; use lightning_background_processor::BackgroundProcessor; use macaroon::Macaroon; use serde::Serialize; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::atomic::Ordering; use std::{collections::hash_map::Entry, fs, sync::Arc}; use tokio::sync::{broadcast, Mutex}; @@ -141,6 +141,7 @@ pub struct AdminService { pub database: Arc, pub chain_manager: Arc, pub event_sender: broadcast::Sender, + pub available_ports: Arc>>, } impl AdminService { @@ -151,6 +152,23 @@ impl AdminService { chain_manager: Arc, event_sender: broadcast::Sender, ) -> Self { + let mut used_ports = HashSet::new(); + let mut available_ports = VecDeque::new(); + database + .list_ports_in_use() + .await + .unwrap() + .into_iter() + .for_each(|port| { + used_ports.insert(port); + }); + + for port in config.port_range_min..config.port_range_max { + if !used_ports.contains(&port) { + available_ports.push_back(port); + } + } + Self { data_dir: String::from(data_dir), config: Arc::new(config), @@ -158,6 +176,7 @@ impl AdminService { database: Arc::new(database), chain_manager, event_sender, + available_ports: Arc::new(Mutex::new(available_ports)), } } } @@ -400,34 +419,31 @@ impl AdminService { role: node::NodeRole, ) -> Result<(node::Model, Macaroon), crate::error::Error> { let listen_addr = public_ip::addr().await.unwrap().to_string(); + let listen_port: i32 = match role { node::NodeRole::Root => 9735, node::NodeRole::Default => { - let mut port = self.config.port_range_min; - let mut port_used_by_system = !portpicker::is_free(port); - let mut port_used_by_sensei = - self.database.port_in_use(&listen_addr, port.into()).await?; - - while port <= self.config.port_range_max - && (port_used_by_system || port_used_by_sensei) - { - port += 1; - port_used_by_system = !portpicker::is_free(port); - port_used_by_sensei = - self.database.port_in_use(&listen_addr, port.into()).await?; - } - - port.into() + let mut available_ports = self.available_ports.lock().await; + available_ports.pop_front().unwrap().into() } }; let node_id = Uuid::new_v4().to_string(); - let (node_pubkey, node_macaroon) = LightningNode::get_node_pubkey_and_macaroon( + + let result = LightningNode::get_node_pubkey_and_macaroon( node_id.clone(), passphrase, self.database.clone(), ) - .await?; + .await; + + if let Err(e) = result { + let mut available_ports = self.available_ports.lock().await; + available_ports.push_front(listen_port.try_into().unwrap()); + return Err(e); + } + + let (node_pubkey, macaroon) = result.unwrap(); let node = entity::node::ActiveModel { id: ActiveValue::Set(node_id), @@ -442,12 +458,22 @@ impl AdminService { ..Default::default() }; - let node = node.insert(self.database.get_connection()).await.unwrap(); + let result = node.insert(self.database.get_connection()).await; + + if let Err(e) = result { + let mut available_ports = self.available_ports.lock().await; + available_ports.push_front(listen_port.try_into().unwrap()); + return Err(e.into()); + } + + let node = result.unwrap(); - Ok((node, node_macaroon)) + Ok((node, macaroon)) } // note: please be sure to stop the node first? maybe? + // TODO: this was never updated with the DB rewrite + // need to release the port and actually delete the node async fn delete_node(&self, node: node::Model) -> Result<(), crate::error::Error> { let data_dir = format!("{}/{}/{}", self.data_dir, self.config.network, node.id); Ok(fs::remove_dir_all(&data_dir)?)