From 7f131d9fd425334a48d3e3eef7b15f7d4463592a Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 3 Sep 2024 16:40:46 +0200 Subject: [PATCH 001/255] fix(launchpad): styling fixes --- node-launchpad/src/components/footer.rs | 4 +- node-launchpad/src/components/header.rs | 8 +- node-launchpad/src/components/help.rs | 150 +----------------- node-launchpad/src/components/options.rs | 6 +- .../src/components/popup/beta_programme.rs | 1 + .../src/components/popup/change_drive.rs | 2 + .../src/components/popup/connection_mode.rs | 1 + .../src/components/popup/manage_nodes.rs | 1 + .../src/components/popup/port_range.rs | 2 + .../src/components/popup/reset_nodes.rs | 1 + node-launchpad/src/components/status.rs | 22 ++- node-launchpad/src/error.rs | 1 + 12 files changed, 43 insertions(+), 156 deletions(-) diff --git a/node-launchpad/src/components/footer.rs b/node-launchpad/src/components/footer.rs index 6401766850..ff5a8eb045 100644 --- a/node-launchpad/src/components/footer.rs +++ b/node-launchpad/src/components/footer.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::style::{EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE}; +use crate::style::{COOL_GREY, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE}; use ratatui::{prelude::*, widgets::*}; pub enum NodesToStart { @@ -28,7 +28,7 @@ impl StatefulWidget for Footer { ) } else { ( - Style::default().fg(LIGHT_PERIWINKLE), + Style::default().fg(COOL_GREY), Style::default().fg(LIGHT_PERIWINKLE), ) }; diff --git a/node-launchpad/src/components/header.rs b/node-launchpad/src/components/header.rs index 030dbcc6c0..d503db6213 100644 --- a/node-launchpad/src/components/header.rs +++ b/node-launchpad/src/components/header.rs @@ -74,7 +74,13 @@ impl StatefulWidget for Header { let help = Span::styled("[H]elp", Style::default().fg(help_color)); // Combine the menu parts with separators - let menu = vec![status, Span::raw(" | "), options, Span::raw(" | "), help]; + let menu = vec![ + status, + Span::raw(" | ").fg(VIVID_SKY_BLUE), + options, + Span::raw(" | ").fg(VIVID_SKY_BLUE), + help, + ]; // Calculate spacing between title and menu items let total_width = (layout[0].width - 1) as usize; diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs index 3bc293ad0a..7bb94db50d 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/help.rs @@ -14,7 +14,7 @@ use crate::{ action::Action, components::header::Header, mode::{InputMode, Scene}, - style::{EUCALYPTUS, GHOST_WHITE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE}, + style::{COOL_GREY, GHOST_WHITE}, widgets::hyperlink::Hyperlink, }; use ansi_to_tui::IntoText; @@ -46,11 +46,7 @@ impl Component for Help { // We define a layout, top and down box. let layout = Layout::default() .direction(Direction::Vertical) - .constraints(vec![ - Constraint::Length(1), - Constraint::Min(7), - Constraint::Max(13), - ]) + .constraints(vec![Constraint::Length(1), Constraint::Length(9)]) .split(area); // ==== Header ===== @@ -132,151 +128,15 @@ impl Component for Help { .block( Block::new() .borders(Borders::ALL) + .border_style(Style::default().fg(COOL_GREY)) .padding(Padding::uniform(1)) .title(" Get Help & Support ") - .title_style(Style::default().bold()), + .bold() + .title_style(Style::default().bold().fg(GHOST_WHITE)), ); f.render_widget(table_help_and_support, layout[1]); - // ---- Keyboard shortcuts ---- - let rows_keyboard_shortcuts = vec![ - Row::new(vec![ - Cell::from(Line::from(vec![ - Span::styled("[S] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Status", Style::default().fg(VIVID_SKY_BLUE)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Manage Nodes", Style::default().fg(EUCALYPTUS)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+D] ", Style::default().fg(GHOST_WHITE)), - Span::styled( - "Change Storage Drive", - Style::default().fg(VERY_LIGHT_AZURE), - ), - ])), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from(Line::from(vec![ - Span::styled("[O] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Options", Style::default().fg(VIVID_SKY_BLUE)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+S] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Start All Nodes", Style::default().fg(EUCALYPTUS)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+K] ", Style::default().fg(GHOST_WHITE)), - Span::styled( - "Switch Connection Mode", - Style::default().fg(VERY_LIGHT_AZURE), - ), - ])), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from(Line::from(vec![ - Span::styled("[H] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Help", Style::default().fg(VIVID_SKY_BLUE)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+X] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Stop All Nodes", Style::default().fg(EUCALYPTUS)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+P] ", Style::default().fg(GHOST_WHITE)), - Span::styled( - "Edit Custom Port Range", - Style::default().fg(VERY_LIGHT_AZURE), - ), - ])), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from(Line::from(vec![ - Span::styled("[Q] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Quit", Style::default().fg(VIVID_SKY_BLUE)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+R] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Reset All Nodes", Style::default().fg(EUCALYPTUS)), - ])), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+B] ", Style::default().fg(GHOST_WHITE)), - Span::styled( - "Edit Discord Username", - Style::default().fg(VERY_LIGHT_AZURE), - ), - ])), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+L] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Open Logs Folder", Style::default().fg(VERY_LIGHT_AZURE)), - ])), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Line::from(vec![ - Span::styled("[Ctrl+L] ", Style::default().fg(GHOST_WHITE)), - Span::styled("Open Logs Folder", Style::default().fg(VERY_LIGHT_AZURE)), - ])), - ]), - ]; - - let table_keyboard_shortcuts = Table::new( - rows_keyboard_shortcuts, - vec![ - Constraint::Percentage(33), - Constraint::Percentage(33), - Constraint::Percentage(33), - ], - ) - .block( - Block::new() - .borders(Borders::ALL) - .padding(Padding::uniform(1)) - .title(" Keyboard Shortcuts ") - .title_style(Style::default().bold()), - ); - - f.render_widget(table_keyboard_shortcuts, layout[2]); - Ok(()) } diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 3a2c8658fc..2e234726f9 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -90,7 +90,7 @@ impl Component for Options { .title_style(Style::default().bold().fg(GHOST_WHITE)) .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) - .border_style(Style::default().fg(VIVID_SKY_BLUE)); + .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let storage_drivename = Table::new( vec![ Row::new(vec![ @@ -217,7 +217,7 @@ impl Component for Options { .title_style(Style::default().bold().fg(GHOST_WHITE)) .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) - .border_style(Style::default().fg(VIVID_SKY_BLUE)); + .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let beta_rewards = Table::new( vec![ Row::new(vec![ @@ -268,7 +268,7 @@ impl Component for Options { .title_style(Style::default().bold().fg(GHOST_WHITE)) .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) - .border_style(Style::default().fg(VIVID_SKY_BLUE)); + .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let logs_folder = Table::new( vec![ Row::new(vec![ diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index bcfbe45acd..bb64865c19 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -187,6 +187,7 @@ impl Component for BetaProgramme { Block::default() .borders(Borders::ALL) .title(" Beta Rewards Program ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 02d2ae13e5..c84b707c91 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -150,6 +150,7 @@ impl ChangeDrivePopup { Block::default() .borders(Borders::ALL) .title(" Select a Drive ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)) @@ -248,6 +249,7 @@ impl ChangeDrivePopup { Block::default() .borders(Borders::ALL) .title(" Confirm & Reset ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)) diff --git a/node-launchpad/src/components/popup/connection_mode.rs b/node-launchpad/src/components/popup/connection_mode.rs index 134a068e3b..bebb5d789a 100644 --- a/node-launchpad/src/components/popup/connection_mode.rs +++ b/node-launchpad/src/components/popup/connection_mode.rs @@ -225,6 +225,7 @@ impl Component for ChangeConnectionModePopUp { Block::default() .borders(Borders::ALL) .title(" Connection Mode ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)) diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 1810d02713..3117d22077 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -219,6 +219,7 @@ impl Component for ManageNodes { Block::default() .borders(Borders::ALL) .title(" Manage Nodes ") + .bold() .title_style(Style::new().fg(GHOST_WHITE)) .title_style(Style::new().fg(EUCALYPTUS)) .padding(Padding::uniform(2)) diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index 491294a96a..142d588f65 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -88,6 +88,7 @@ impl PortRangePopUp { Block::default() .borders(Borders::ALL) .title(" Custom Ports ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), @@ -198,6 +199,7 @@ impl PortRangePopUp { Block::default() .borders(Borders::ALL) .title(" Port Forwarding For Private IPs ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), diff --git a/node-launchpad/src/components/popup/reset_nodes.rs b/node-launchpad/src/components/popup/reset_nodes.rs index 7389b8e472..c7117519f9 100644 --- a/node-launchpad/src/components/popup/reset_nodes.rs +++ b/node-launchpad/src/components/popup/reset_nodes.rs @@ -118,6 +118,7 @@ impl Component for ResetNodesPopup { Block::default() .borders(Borders::ALL) .title(" Reset Nodes ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(VIVID_SKY_BLUE)), diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 9f054f15a0..04febac214 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -532,7 +532,7 @@ impl Component for Status { )]); let line2 = Line::from(vec![ Span::styled("Press ", Style::default().fg(VERY_LIGHT_AZURE)), - Span::styled("[Ctrl+B]", Style::default().fg(GHOST_WHITE)), + Span::styled("[Ctrl+B]", Style::default().fg(GHOST_WHITE).bold()), Span::styled(" to add your ", Style::default().fg(VERY_LIGHT_AZURE)), Span::styled( "Discord Username", @@ -543,6 +543,7 @@ impl Component for Status { Paragraph::new(vec![Line::raw(""), Line::raw(""), line1, line2]).block( Block::default() .title(" Device Status ") + .bold() .title_style(Style::new().fg(GHOST_WHITE)) .borders(Borders::ALL) .padding(Padding::horizontal(1)) @@ -636,6 +637,7 @@ impl Component for Status { .block( Block::default() .title(" Device Status ") + .bold() .title_style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) .padding(Padding::horizontal(1)) @@ -672,9 +674,9 @@ impl Component for Status { if node_rows.is_empty() { let line1 = Line::from(vec![ Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE)), + Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE).bold()), Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), ]); @@ -691,7 +693,10 @@ impl Component for Status { .fg(LIGHT_PERIWINKLE) .block( Block::default() - .title(" Nodes (0) ".to_string()) + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled(" (0) ", Style::default().fg(LIGHT_PERIWINKLE)), + ])) .title_style(Style::default().fg(LIGHT_PERIWINKLE)) .borders(Borders::ALL) .border_style(style::Style::default().fg(EUCALYPTUS)) @@ -711,7 +716,13 @@ impl Component for Status { .highlight_style(Style::new().reversed()) .block( Block::default() - .title(format!(" Nodes ({}) ", self.nodes_to_start)) + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + format!(" ({}) ", self.nodes_to_start), + Style::default().fg(LIGHT_PERIWINKLE), + ), + ])) .padding(Padding::new(2, 2, 1, 1)) .title_style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) @@ -751,6 +762,7 @@ impl Component for Status { Block::default() .borders(Borders::ALL) .title(" Manage Nodes ") + .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(GHOST_WHITE)), diff --git a/node-launchpad/src/error.rs b/node-launchpad/src/error.rs index 1f487ee558..14005a87a6 100644 --- a/node-launchpad/src/error.rs +++ b/node-launchpad/src/error.rs @@ -120,6 +120,7 @@ impl ErrorPopup { Block::default() .borders(Borders::ALL) .title(format!(" {} ", self.title)) + .bold() .title_style(Style::new().fg(RED)) .padding(Padding::uniform(2)) .border_style(Style::new().fg(RED)), From 247f46c1ca2a08363aa0ef0ebe7e8e544d8746a2 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 3 Sep 2024 17:14:48 +0200 Subject: [PATCH 002/255] fix(launchpad): drive selection screen styling --- .../src/components/popup/change_drive.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index c84b707c91..ab82ee2d2b 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -14,7 +14,7 @@ use color_eyre::Result; use crossterm::event::{KeyCode, KeyEvent}; use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, - style::{Modifier, Style, Stylize}, + style::{Style, Stylize}, text::{Line, Span}, widgets::{ Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph, Wrap, @@ -28,7 +28,7 @@ use crate::{ mode::{InputMode, Scene}, style::{ clear_area, COOL_GREY, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, - SPACE_CADET, VIVID_SKY_BLUE, + VIVID_SKY_BLUE, }, system, }; @@ -153,8 +153,7 @@ impl ChangeDrivePopup { .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) - .border_style(Style::new().fg(VIVID_SKY_BLUE)) - .bg(DARK_GUNMETAL), + .border_style(Style::new().fg(VIVID_SKY_BLUE)), ); clear_area(f, layer_zero); @@ -182,12 +181,7 @@ impl ChangeDrivePopup { let items = List::new(items) .block(Block::default().padding(Padding::uniform(1))) - .highlight_style( - Style::default() - .add_modifier(Modifier::BOLD) - .add_modifier(Modifier::REVERSED) - .fg(INDIGO), - ) + .highlight_style(Style::default().bg(INDIGO)) .highlight_spacing(HighlightSpacing::Always); f.render_stateful_widget(items, layer_two[0], &mut self.items.state); @@ -605,6 +599,6 @@ impl DriveItem { } }; - ListItem::new(line).style(Style::default().bg(SPACE_CADET)) + ListItem::new(line) } } From 4f9f9d34b1f747f518d5cae0ff4373db8a4d5d9a Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 3 Sep 2024 17:27:43 +0200 Subject: [PATCH 003/255] fix(launchpad): connection mode styling --- .../src/components/popup/connection_mode.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/node-launchpad/src/components/popup/connection_mode.rs b/node-launchpad/src/components/popup/connection_mode.rs index bebb5d789a..6c0d31d598 100644 --- a/node-launchpad/src/components/popup/connection_mode.rs +++ b/node-launchpad/src/components/popup/connection_mode.rs @@ -14,7 +14,7 @@ use color_eyre::Result; use crossterm::event::{KeyCode, KeyEvent}; use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, - style::{Modifier, Style, Stylize}, + style::{Style, Stylize}, text::{Line, Span}, widgets::{Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph}, }; @@ -27,7 +27,7 @@ use crate::{ mode::{InputMode, Scene}, style::{ clear_area, COOL_GREY, DARK_GUNMETAL, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, - SPACE_CADET, VIVID_SKY_BLUE, + VIVID_SKY_BLUE, }, }; @@ -228,8 +228,7 @@ impl Component for ChangeConnectionModePopUp { .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) - .border_style(Style::new().fg(VIVID_SKY_BLUE)) - .bg(DARK_GUNMETAL), + .border_style(Style::new().fg(VIVID_SKY_BLUE)), ); clear_area(f, layer_zero); @@ -259,12 +258,7 @@ impl Component for ChangeConnectionModePopUp { let items = List::new(items) .block(Block::default().padding(Padding::uniform(1))) - .highlight_style( - Style::default() - .add_modifier(Modifier::BOLD) - .add_modifier(Modifier::REVERSED) - .fg(INDIGO), - ) + .highlight_style(Style::default().bg(INDIGO)) .highlight_spacing(HighlightSpacing::Always); f.render_stateful_widget(items, layer_two[0], &mut self.items.state); @@ -397,6 +391,6 @@ impl ConnectionModeItem { ]), }; - ListItem::new(line).style(Style::default().bg(SPACE_CADET)) + ListItem::new(line).style(Style::default().bg(DARK_GUNMETAL)) } } From b87cfe0be52eaf9f944867fc2c249a2499de7077 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 3 Sep 2024 17:44:09 +0200 Subject: [PATCH 004/255] fix(launchpad): first stroke on port edition --- node-launchpad/src/components/popup/port_range.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index 142d588f65..11b28ce354 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -45,6 +45,7 @@ pub struct PortRangePopUp { port_from_old_value: u32, port_to_old_value: u32, can_save: bool, + first_stroke: bool, } impl PortRangePopUp { @@ -59,6 +60,7 @@ impl PortRangePopUp { port_from_old_value: Default::default(), port_to_old_value: Default::default(), can_save: false, + first_stroke: true, } } @@ -372,6 +374,10 @@ impl Component for PortRangePopUp { vec![] } _ => { + if self.first_stroke { + self.first_stroke = false; + self.port_from = Input::default().with_value("".to_string()); + } // if max limit reached, we should not allow any more inputs. if self.port_from.value().len() < INPUT_SIZE as usize { self.port_from.handle_event(&Event::Key(key)); @@ -409,6 +415,7 @@ impl Component for PortRangePopUp { } => { if self.connection_mode == ConnectionMode::CustomPorts { self.active = true; + self.first_stroke = true; self.connection_mode_old_value = connection_mode_old_value; self.validate(); self.port_from_old_value = From 6fea35bb6ad3b990d7f60257387430b690893db2 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 3 Sep 2024 17:49:39 +0200 Subject: [PATCH 005/255] fix(launchpad): port selection screen styling --- node-launchpad/src/components/popup/port_range.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index 11b28ce354..dd6fa90b39 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -122,7 +122,6 @@ impl PortRangePopUp { f.render_widget(prompt.fg(GHOST_WHITE), layer_two[0]); let spaces_from = " ".repeat((INPUT_AREA - 1) as usize - self.port_from.value().len()); - let spaces_to = " ".repeat((INPUT_AREA - 1) as usize - self.port_to.value().len()); let input_line = Line::from(vec![ Span::styled( @@ -133,10 +132,7 @@ impl PortRangePopUp { .underlined(), ), Span::styled(" to ", Style::default().fg(GHOST_WHITE)), - Span::styled( - format!("{}{} ", spaces_to, self.port_to.value()), - Style::default().fg(VIVID_SKY_BLUE), - ), + Span::styled(self.port_to.value(), Style::default().fg(LIGHT_PERIWINKLE)), ]) .alignment(Alignment::Center); @@ -148,11 +144,11 @@ impl PortRangePopUp { "Choose the start of the range of {} ports.", PORT_ALLOCATION + 1 ), - Style::default().fg(GHOST_WHITE), + Style::default().fg(LIGHT_PERIWINKLE), )), Line::from(Span::styled( format!("This must be between {} and {}.", PORT_MIN, PORT_MAX), - Style::default().fg(if self.can_save { GHOST_WHITE } else { RED }), + Style::default().fg(if self.can_save { LIGHT_PERIWINKLE } else { RED }), )), ]) .block(block::Block::default().padding(Padding::horizontal(2))) From f9e5082b8497d38419941ff3d427304e9994f8e8 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 4 Sep 2024 20:54:41 +0800 Subject: [PATCH 006/255] fix(auditor): not to re-attempt fetched spend --- sn_auditor/src/dag_db.rs | 14 +++++++++++--- sn_client/src/audit/dag_crawling.rs | 7 ++----- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/sn_auditor/src/dag_db.rs b/sn_auditor/src/dag_db.rs index a1bb786010..d570799c5d 100644 --- a/sn_auditor/src/dag_db.rs +++ b/sn_auditor/src/dag_db.rs @@ -303,6 +303,7 @@ impl SpendDagDb { }; let mut addrs_to_get = BTreeMap::new(); + let mut addrs_fetched = BTreeSet::new(); loop { // get expired utxos for re-attempt fetch @@ -350,16 +351,23 @@ impl SpendDagDb { ) })); } else if let Some(sender) = spend_processing.clone() { - let (reattempt_addrs, fetched_addrs) = client + let (reattempt_addrs, fetched_addrs, addrs_for_further_track) = client .crawl_to_next_utxos(&mut addrs_to_get, sender.clone(), *UTXO_REATTEMPT_SECONDS) .await; + let mut utxo_addresses = self.utxo_addresses.write().await; - for addr in fetched_addrs.iter() { - let _ = utxo_addresses.remove(addr); + for addr in fetched_addrs { + let _ = utxo_addresses.remove(&addr); + let _ = addrs_fetched.insert(addr); } for (addr, tuple) in reattempt_addrs { let _ = utxo_addresses.insert(addr, tuple); } + for (addr, amount) in addrs_for_further_track { + if !addrs_fetched.contains(&addr) { + let _ = addrs_to_get.entry(addr).or_insert((0, amount)); + } + } } else { panic!("There is no point in running the auditor if we are not collecting the DAG or collecting data through crawling. Please enable the `dag-collection` feature or provide beta program related arguments."); }; diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs index e29760858e..7816eb2806 100644 --- a/sn_client/src/audit/dag_crawling.rs +++ b/sn_client/src/audit/dag_crawling.rs @@ -155,6 +155,7 @@ impl Client { ) -> ( BTreeMap, Vec, + BTreeSet<(SpendAddress, NanoTokens)>, ) { let mut failed_utxos = BTreeMap::new(); let mut tasks = JoinSet::new(); @@ -245,11 +246,7 @@ impl Client { } } - for (addr, amount) in addrs_for_further_track { - let _ = addrs_to_get.entry(addr).or_insert((0, amount)); - } - - (failed_utxos, fetched_addrs) + (failed_utxos, fetched_addrs, addrs_for_further_track) } /// Crawls the Spend Dag from a given SpendAddress recursively From 829997379da668d9d5c4a969484724375d367793 Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 6 Sep 2024 21:41:33 +0800 Subject: [PATCH 007/255] fix(auditor): use DashMap and stream for better threading --- Cargo.lock | 15 +++ sn_auditor/src/dag_db.rs | 7 +- sn_client/Cargo.toml | 1 + sn_client/src/audit/dag_crawling.rs | 195 +++++++++++++++------------- 4 files changed, 130 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bfa88e954..5448a94424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1596,6 +1596,20 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -7097,6 +7111,7 @@ dependencies = [ "console_error_panic_hook", "crdts", "custom_debug", + "dashmap", "dirs-next", "eyre", "futures", diff --git a/sn_auditor/src/dag_db.rs b/sn_auditor/src/dag_db.rs index d570799c5d..a21f64c94b 100644 --- a/sn_auditor/src/dag_db.rs +++ b/sn_auditor/src/dag_db.rs @@ -352,9 +352,14 @@ impl SpendDagDb { })); } else if let Some(sender) = spend_processing.clone() { let (reattempt_addrs, fetched_addrs, addrs_for_further_track) = client - .crawl_to_next_utxos(&mut addrs_to_get, sender.clone(), *UTXO_REATTEMPT_SECONDS) + .crawl_to_next_utxos( + addrs_to_get.clone(), + sender.clone(), + *UTXO_REATTEMPT_SECONDS, + ) .await; + addrs_to_get.clear(); let mut utxo_addresses = self.utxo_addresses.write().await; for addr in fetched_addrs { let _ = utxo_addresses.remove(&addr); diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index b1fd1be42a..d81d645ace 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -38,6 +38,7 @@ bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } crdts = "7.3.2" custom_debug = "~0.6.1" +dashmap = "~6.1.0" futures = "~0.3.13" hex = "~0.4.3" itertools = "~0.12.1" diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs index 7816eb2806..fa00a5078f 100644 --- a/sn_client/src/audit/dag_crawling.rs +++ b/sn_client/src/audit/dag_crawling.rs @@ -8,7 +8,11 @@ use crate::{Client, Error, SpendDag}; -use futures::{future::join_all, StreamExt}; +use dashmap::DashMap; +use futures::{ + future::join_all, + stream::{self, StreamExt}, +}; use sn_networking::{GetRecordError, NetworkError}; use sn_transfers::{ NanoTokens, SignedSpend, SpendAddress, SpendReason, UniquePubkey, WalletError, WalletResult, @@ -16,9 +20,10 @@ use sn_transfers::{ }; use std::{ collections::{BTreeMap, BTreeSet}, + sync::Arc, time::{Duration, Instant}, }; -use tokio::{sync::mpsc::Sender, task::JoinSet}; +use tokio::sync::mpsc::Sender; const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; @@ -145,11 +150,12 @@ impl Client { /// Get spends from a set of given SpendAddresses /// Drain the addresses at the same layer first, then: - /// 1, return with UTXOs for re-attempt (with insertion time stamp) - /// 2, addrs_to_get to hold the addresses for further track + /// 1, return failed_utxos for re-attempt (with insertion time stamp) + /// 2, return fetched_address to avoid un-necessary re-attempts + /// 3, return addrs_for_further_track for further track pub async fn crawl_to_next_utxos( &self, - addrs_to_get: &mut BTreeMap, + addrs_to_get: BTreeMap, sender: Sender<(SignedSpend, u64, bool)>, reattempt_seconds: u64, ) -> ( @@ -157,96 +163,111 @@ impl Client { Vec, BTreeSet<(SpendAddress, NanoTokens)>, ) { - let mut failed_utxos = BTreeMap::new(); - let mut tasks = JoinSet::new(); - let mut addrs_for_further_track = BTreeSet::new(); - let mut fetched_addrs = Vec::new(); - - while !addrs_to_get.is_empty() || !tasks.is_empty() { - while tasks.len() < 32 && !addrs_to_get.is_empty() { - if let Some((addr, (failed_times, amount))) = addrs_to_get.pop_first() { - let client_clone = self.clone(); - let _ = tasks.spawn(async move { - ( - client_clone.crawl_spend(addr).await, - failed_times, - addr, - amount, - ) - }); - } - } - - if let Some(Ok((result, failed_times, address, amount))) = tasks.join_next().await { - match result { - InternalGetNetworkSpend::Spend(spend) => { - let for_further_track = beta_track_analyze_spend(&spend); - let _ = sender - .send((*spend, for_further_track.len() as u64, false)) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string())); - addrs_for_further_track.extend(for_further_track); - fetched_addrs.push(address); - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - warn!( - "Detected burnt spend regarding {address:?} - {:?}", - spends.len() - ); - for (i, spend) in spends.iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); + // max concurrency for the tasks of fetching records from network. + const MAX_CONCURRENT: usize = 64; + + let failed_utxos_arc: Arc> = Arc::new(DashMap::new()); + let addrs_for_further_track_arc: Arc> = Arc::new(DashMap::new()); + let fetched_addrs_arc: Arc> = Arc::new(DashMap::new()); + + stream::iter(addrs_to_get.into_iter()) + .map(|(addr, (failed_times, amount))| { + let client_clone = self.clone(); + let sender_clone = sender.clone(); + + let failed_utxos = Arc::clone(&failed_utxos_arc); + let addrs_for_further_track = Arc::clone(&addrs_for_further_track_arc); + let fetched_addrs = Arc::clone(&fetched_addrs_arc); + async move { + let result = client_clone.crawl_spend(addr).await; + + match result { + InternalGetNetworkSpend::Spend(spend) => { + let for_further_track = beta_track_analyze_spend(&spend); + let _ = sender_clone + .send((*spend, for_further_track.len() as u64, false)) + .await; + for entry in for_further_track { + let _ = addrs_for_further_track.insert(entry, ()); + } + fetched_addrs.insert(addr, ()); + } + InternalGetNetworkSpend::DoubleSpend(spends) => { warn!( - "burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants + "Detected burnt spend regarding {addr:?} - {:?}", + spends.len() ); - let for_further_track = beta_track_analyze_spend(spend); - addrs_for_further_track.extend(for_further_track); - - let _ = sender - .send((spend.clone(), 0, true)) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string())); + for (i, spend) in spends.into_iter().enumerate() { + let reason = spend.reason(); + let amount = spend.spend.amount(); + let ancestors_len = spend.spend.ancestors.len(); + let descendants_len = spend.spend.descendants.len(); + let roy_len = spend.spend.network_royalties().len(); + warn!("burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", + spend.spend.ancestors, spend.spend.descendants); + } + fetched_addrs.insert(addr, ()); + } + InternalGetNetworkSpend::NotFound => { + let reattempt_interval = if amount.as_nano() > 100000 { + info!("Not find spend of big-UTXO {addr:?} with {amount}"); + reattempt_seconds + } else { + reattempt_seconds * (failed_times * 8 + 1) + }; + failed_utxos.insert( + addr, + ( + failed_times + 1, + Instant::now() + Duration::from_secs(reattempt_interval), + amount, + ), + ); + } + InternalGetNetworkSpend::Error(e) => { + warn!("Fetching spend {addr:?} with {amount:?} result in error {e:?}"); + // Error of `NotEnoughCopies` could be re-attempted and succeed eventually. + failed_utxos.insert( + addr, + ( + failed_times + 1, + Instant::now() + Duration::from_secs(reattempt_seconds), + amount, + ), + ); } - fetched_addrs.push(address); - } - InternalGetNetworkSpend::NotFound => { - let reattempt_interval = if amount.as_nano() > 100000 { - info!("Not find spend of big-UTXO {address:?} with {amount}"); - reattempt_seconds - } else { - reattempt_seconds * (failed_times * 8 + 1) - }; - let _ = failed_utxos.insert( - address, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_interval), - amount, - ), - ); - } - InternalGetNetworkSpend::Error(e) => { - warn!("Fetching spend {address:?} with {amount:?} result in error {e:?}"); - // Error of `NotEnoughCopies` could be re-attempted and succeed eventually. - let _ = failed_utxos.insert( - address, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_seconds), - amount, - ), - ); } + + (addr, amount) } - } + }) + .buffer_unordered(MAX_CONCURRENT) + .for_each(|(address, amount)| async move { + info!("Completed fetching attempt of {address:?} with amount {amount:?}"); + }) + .await; + + let mut failed_utxos_result = BTreeMap::new(); + for entry in failed_utxos_arc.iter() { + let key = entry.key(); + let val = entry.value(); + let _ = failed_utxos_result.insert(*key, *val); + } + + let mut fetched_addrs = Vec::new(); + for entry in fetched_addrs_arc.iter() { + let key = entry.key(); + fetched_addrs.push(*key); + } + + let mut addrs_for_further_track = BTreeSet::new(); + for entry in addrs_for_further_track_arc.iter() { + let key = entry.key(); + let _ = addrs_for_further_track.insert(*key); } - (failed_utxos, fetched_addrs, addrs_for_further_track) + (failed_utxos_result, fetched_addrs, addrs_for_further_track) } /// Crawls the Spend Dag from a given SpendAddress recursively From 6e847317a25b71c27bb0d08c9e71a970bd3df1cd Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 29 Aug 2024 11:51:38 +0900 Subject: [PATCH 008/255] feat(protocol): add Scratchpad data type --- sn_networking/src/cmd.rs | 13 +- sn_networking/src/lib.rs | 2 +- sn_networking/src/record_store.rs | 147 +++++++++++++++++- sn_node/src/error.rs | 7 + sn_node/src/log_markers.rs | 4 + sn_node/src/put_validation.rs | 119 +++++++++++++- sn_protocol/Cargo.toml | 1 + sn_protocol/src/error.rs | 5 + sn_protocol/src/lib.rs | 14 ++ sn_protocol/src/storage.rs | 4 +- sn_protocol/src/storage/address.rs | 2 + sn_protocol/src/storage/address/scratchpad.rs | 90 +++++++++++ sn_protocol/src/storage/header.rs | 19 +++ sn_protocol/src/storage/scratchpad.rs | 93 +++++++++++ 14 files changed, 509 insertions(+), 11 deletions(-) create mode 100644 sn_protocol/src/storage/address/scratchpad.rs create mode 100644 sn_protocol/src/storage/scratchpad.rs diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index d70fc3eba5..4a297eee25 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -96,7 +96,7 @@ pub enum LocalSwarmCmd { /// Notify the node received a payment. PaymentReceived, /// Put record to the local RecordStore - PutLocalRecord { + PutVerifiedLocalRecord { record: Record, }, /// Remove a local record from the RecordStore @@ -194,7 +194,7 @@ pub enum NetworkSwarmCmd { impl Debug for LocalSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - LocalSwarmCmd::PutLocalRecord { record } => { + LocalSwarmCmd::PutVerifiedLocalRecord { record } => { write!( f, "LocalSwarmCmd::PutLocalRecord {{ key: {:?} }}", @@ -587,8 +587,8 @@ impl SwarmDriver { let _ = sender.send(record); } - LocalSwarmCmd::PutLocalRecord { record } => { - cmd_string = "PutLocalRecord"; + LocalSwarmCmd::PutVerifiedLocalRecord { record } => { + cmd_string = "PutVerifiedLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -596,11 +596,14 @@ impl SwarmDriver { Ok(record_header) => { match record_header.kind { RecordKind::Chunk => RecordType::Chunk, + RecordKind::Scratchpad => RecordType::Scratchpad, RecordKind::Spend | RecordKind::Register => { let content_hash = XorName::from_content(&record.value); RecordType::NonChunk(content_hash) } - RecordKind::ChunkWithPayment | RecordKind::RegisterWithPayment => { + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { error!("Record {record_key:?} with payment shall not be stored locally."); return Err(NetworkError::InCorrectRecordHeader); } diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 05e9b162fc..a877b206f4 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -742,7 +742,7 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_local_swarm_cmd(LocalSwarmCmd::PutLocalRecord { record }) + self.send_local_swarm_cmd(LocalSwarmCmd::PutVerifiedLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index ccfe942300..26b967a239 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -955,16 +955,18 @@ mod tests { use super::*; use bls::SecretKey; + use sn_protocol::storage::{Scratchpad, ScratchpadAddress}; + use xor_name::XorName; + use bytes::Bytes; use eyre::{bail, ContextCompat}; use libp2p::kad::K_VALUE; use libp2p::{core::multihash::Multihash, kad::RecordKey}; use quickcheck::*; + use sn_protocol::storage::{try_serialize_record, Chunk, ChunkAddress}; use sn_transfers::{MainPubkey, PaymentQuote}; use std::collections::BTreeMap; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; - - use sn_protocol::storage::{try_serialize_record, ChunkAddress}; use tokio::runtime::Runtime; use tokio::time::{sleep, Duration}; @@ -1153,6 +1155,147 @@ mod tests { assert!(store.get(&r.key).is_none()); } + #[tokio::test] + async fn can_store_and_retrieve_chunk() { + let temp_dir = std::env::temp_dir(); + let store_config = NodeRecordStoreConfig { + storage_dir: temp_dir, + ..Default::default() + }; + let self_id = PeerId::random(); + let (network_event_sender, _) = mpsc::channel(1); + let (swarm_cmd_sender, _) = mpsc::channel(1); + + let mut store = NodeRecordStore::with_config( + self_id, + store_config, + network_event_sender, + swarm_cmd_sender, + ); + + // Create a chunk + let chunk_data = Bytes::from_static(b"Test chunk data"); + let chunk = Chunk::new(chunk_data.clone()); + let chunk_address = *chunk.address(); + + // Create a record from the chunk + let record = Record { + key: NetworkAddress::ChunkAddress(chunk_address).to_record_key(), + value: chunk_data.to_vec(), + expires: None, + publisher: None, + }; + + // Store the chunk using put_verified + assert!(store + .put_verified(record.clone(), RecordType::Chunk) + .is_ok()); + + // Mark as stored (simulating the CompletedWrite event) + store.mark_as_stored(record.key.clone(), RecordType::Chunk); + + // Verify the chunk is stored + let stored_record = store.get(&record.key); + assert!(stored_record.is_some(), "Chunk should be stored"); + + if let Some(stored) = stored_record { + assert_eq!( + stored.value, chunk_data, + "Stored chunk data should match original" + ); + + let stored_address = ChunkAddress::new(XorName::from_content(&stored.value)); + assert_eq!( + stored_address, chunk_address, + "Stored chunk address should match original" + ); + } + + // Clean up + store.remove(&record.key); + assert!( + store.get(&record.key).is_none(), + "Chunk should be removed after cleanup" + ); + } + + #[tokio::test] + async fn can_store_and_retrieve_scratchpad() { + let temp_dir = std::env::temp_dir(); + let store_config = NodeRecordStoreConfig { + storage_dir: temp_dir, + ..Default::default() + }; + let self_id = PeerId::random(); + let (network_event_sender, _) = mpsc::channel(1); + let (swarm_cmd_sender, _) = mpsc::channel(1); + + let mut store = NodeRecordStore::with_config( + self_id, + store_config, + network_event_sender, + swarm_cmd_sender, + ); + + // Create a scratchpad + let scratchpad_data = Bytes::from_static(b"Test scratchpad data"); + + let owner_sk = SecretKey::random(); + let owner_pk = owner_sk.public_key(); + + let mut signing_bytes = 0_u64.to_be_bytes().to_vec(); + signing_bytes.extend(XorName::from_content(&scratchpad_data).to_vec()); // add the count + + let sig = owner_sk.sign(&signing_bytes); + let scratchpad = Scratchpad::new(owner_pk, scratchpad_data.clone(), 0, sig); + let scratchpad_address = *scratchpad.address(); + + // Create a record from the scratchpad + let record = Record { + key: NetworkAddress::ScratchpadAddress(scratchpad_address).to_record_key(), + value: scratchpad_data.to_vec(), + expires: None, + publisher: None, + }; + + // Store the scratchpad using put_verified + assert!(store + .put_verified( + record.clone(), + RecordType::NonChunk(XorName::from_content(&record.value)) + ) + .is_ok()); + + // Mark as stored (simulating the CompletedWrite event) + store.mark_as_stored( + record.key.clone(), + RecordType::NonChunk(XorName::from_content(&record.value)), + ); + + // Verify the scratchpad is stored + let stored_record = store.get(&record.key); + assert!(stored_record.is_some(), "Scratchpad should be stored"); + + if let Some(stored) = stored_record { + assert_eq!( + stored.value, scratchpad_data, + "Stored scratchpad data should match original" + ); + + let stored_address = ScratchpadAddress::new(owner_pk); + assert_eq!( + stored_address, scratchpad_address, + "Stored scratchpad address should match original" + ); + } + + // Clean up + store.remove(&record.key); + assert!( + store.get(&record.key).is_none(), + "Scratchpad should be removed after cleanup" + ); + } #[tokio::test] async fn pruning_on_full() -> Result<()> { let max_iterations = 10; diff --git a/sn_node/src/error.rs b/sn_node/src/error.rs index 167db9eb20..1c2bb23e16 100644 --- a/sn_node/src/error.rs +++ b/sn_node/src/error.rs @@ -49,6 +49,13 @@ pub enum Error { #[error("The Record::key does not match with the key derived from Record::value")] RecordKeyMismatch, + // Scratchpad is old version + #[error("A newer version of this Scratchpad already exists")] + IgnoringOutdatedScratchpadPut, + // Scratchpad is invalid + #[error("Scratchpad signature is invalid over the counter + content hash")] + InvalidScratchpadSignature, + // ---------- Payment Errors #[error("The content of the payment quote is invalid")] InvalidQuoteContent, diff --git a/sn_node/src/log_markers.rs b/sn_node/src/log_markers.rs index ab1aacf325..0be204d38c 100644 --- a/sn_node/src/log_markers.rs +++ b/sn_node/src/log_markers.rs @@ -43,6 +43,8 @@ pub enum Marker<'a> { ValidRegisterRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), /// Valid non-existing Spend record PUT from the network received and stored ValidSpendRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), + /// Valid Scratchpad record PUT from the network received and stored + ValidScratchpadRecordPutFromNetwork(&'a PrettyPrintRecordKey<'a>), /// Valid paid to us and royalty paid chunk stored ValidPaidChunkPutFromClient(&'a PrettyPrintRecordKey<'a>), @@ -50,6 +52,8 @@ pub enum Marker<'a> { ValidPaidRegisterPutFromClient(&'a PrettyPrintRecordKey<'a>), /// Valid spend stored ValidSpendPutFromClient(&'a PrettyPrintRecordKey<'a>), + /// Valid scratchpad stored + ValidScratchpadRecordPutFromClient(&'a PrettyPrintRecordKey<'a>), /// Record rejected RecordRejected(&'a PrettyPrintRecordKey<'a>, &'a Error), diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8879b55cb4..8e13e698aa 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -12,7 +12,7 @@ use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkEr use sn_protocol::{ storage::{ try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, - SpendAddress, + Scratchpad, SpendAddress, }, NetworkAddress, PrettyPrintRecordKey, }; @@ -87,12 +87,62 @@ impl Node { store_chunk_result } + RecordKind::Chunk => { error!("Chunk should not be validated at this point"); Err(Error::InvalidPutWithoutPayment( PrettyPrintRecordKey::from(&record.key).into_owned(), )) } + RecordKind::ScratchpadWithPayment => { + let record_key = record.key.clone(); + let (payment, scratchpad) = + try_deserialize_record::<(Payment, Scratchpad)>(&record)?; + let _already_exists = self + .validate_key_and_existence(&scratchpad.network_address(), &record_key) + .await?; + + // Validate the payment and that we received what we asked. + // This stores any payments to disk + let payment_res = self + .payment_for_us_exists_and_is_still_valid( + &scratchpad.network_address(), + payment, + ) + .await; + + // Finally before we store, lets bail for any payment issues + payment_res?; + + // Writing chunk to disk takes time, hence try to execute it first. + // So that when the replicate target asking for the copy, + // the node can have a higher chance to respond. + let store_scratchpad_result = self + .validate_and_store_scratchpad_record(record, true) + .await; + + if store_scratchpad_result.is_ok() { + Marker::ValidScratchpadRecordPutFromClient(&PrettyPrintRecordKey::from( + &record_key, + )) + .log(); + self.replicate_valid_fresh_record(record_key.clone(), RecordType::Scratchpad); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record_key, RecordType::Scratchpad); + } + + store_scratchpad_result + } + RecordKind::Scratchpad => { + error!("Scratchpad should not be validated at this point"); + Err(Error::InvalidPutWithoutPayment( + PrettyPrintRecordKey::from(&record.key).into_owned(), + )) + } RecordKind::Spend => { let record_key = record.key.clone(); let value_to_hash = record.value.clone(); @@ -213,7 +263,9 @@ impl Node { let record_header = RecordHeader::from_record(&record)?; match record_header.kind { // A separate flow handles payment for chunks and registers - RecordKind::ChunkWithPayment | RecordKind::RegisterWithPayment => { + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { warn!("Prepaid record came with Payment, which should be handled in another flow"); Err(Error::UnexpectedRecordWithPayment( PrettyPrintRecordKey::from(&record.key).into_owned(), @@ -236,6 +288,10 @@ impl Node { self.store_chunk(&chunk) } + RecordKind::Scratchpad => { + self.validate_and_store_scratchpad_record(record, false) + .await + } RecordKind::Spend => { let record_key = record.key.clone(); let spends = try_deserialize_record::>(&record)?; @@ -323,6 +379,65 @@ impl Node { Ok(()) } + /// Validate and store a `Scratchpad` to the RecordStore + /// + /// When a node receives an update packet: + /// Verify Name: It MUST hash the provided public key and confirm it matches the name in the packet. + /// Check Counter: It MUST ensure that the new counter value is strictly greater than the currently stored value to prevent replay attacks. + /// Verify Signature: It MUST use the public key to verify the BLS12-381 signature against the content hash and the counter. + /// Accept or Reject: If all verifications succeed, the node MUST accept the packet and replace any previous version. Otherwise, it MUST reject the update. + + pub(crate) async fn validate_and_store_scratchpad_record( + &self, + record: Record, + is_client_put: bool, + ) -> Result<()> { + let record_key = record.key.clone(); + + let scratchpad = try_deserialize_record::(&record)?; + + // owner PK is defined herein, so as long as record key and this match, we're good + let addr = scratchpad.address(); + debug!("Validating and storing scratchpad {addr:?}"); + + // check if the deserialized value's RegisterAddress matches the record's key + let scratchpad_key = NetworkAddress::ScratchpadAddress(*addr).to_record_key(); + if scratchpad_key != record_key { + warn!("Record's key does not match with the value's ScratchpadAddress, ignoring PUT."); + return Err(Error::RecordKeyMismatch); + } + + // check if the Scratchpad is present locally that we don't have a newer version + if let Some(local_pad) = self.network().get_local_record(&scratchpad_key).await? { + let local_pad = try_deserialize_record::(&local_pad)?; + if local_pad.counter >= scratchpad.counter { + warn!("Rejecting Scratchpad PUT with counter less than or equal to the current counter"); + return Err(Error::IgnoringOutdatedScratchpadPut); + } + } + + // ensure data integrity + if !scratchpad.is_valid() { + warn!("Rejecting Scratchpad PUT with invalid signature"); + return Err(Error::InvalidScratchpadSignature); + } + + info!( + "Storing sratchpad {addr:?} with content of {:?} as Record locally", + scratchpad.encrypted_data_hash() + ); + self.network().put_local_record(record); + + let pretty_key = PrettyPrintRecordKey::from(&record_key); + + self.record_metrics(Marker::ValidScratchpadRecordPutFromNetwork(&pretty_key)); + + if is_client_put { + self.replicate_valid_fresh_record(record_key, RecordType::Scratchpad); + } + + Ok(()) + } /// Validate and store a `Register` to the RecordStore pub(crate) async fn validate_and_store_register( &self, diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 91feb1a370..b9a52f61ac 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -41,6 +41,7 @@ prost = { version = "0.9" , optional=true } tonic = { version = "0.6.2", optional=true, default-features = false, features = ["prost", "tls", "codegen"]} xor_name = "5.0.0" + [build-dependencies] # watch out updating this, protoc compiler needs to be installed on all build systems # arm builds + musl are very problematic diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index 2e481a67ce..c89ff9cba3 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -44,6 +44,11 @@ pub enum Error { key: Box, }, + // ---------- Scratchpad errors + /// The provided String can't be deserialized as a RegisterAddress + #[error("Failed to deserialize hex ScratchpadAddress")] + ScratchpadHexDeserializeFailed, + // ---------- payment errors #[error("There was an error getting the storecost from kademlia store")] GetStoreCostFailed, diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 900179bed1..1704df2038 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -29,6 +29,7 @@ pub mod safenode_proto { tonic::include_proto!("safenode_proto"); } pub use error::Error; +use storage::ScratchpadAddress; use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; use bytes::Bytes; @@ -82,6 +83,8 @@ pub enum NetworkAddress { RegisterAddress(RegisterAddress), /// The NetworkAddress is representing a RecordKey. RecordKey(Bytes), + /// The NetworkAddress is representing a ScratchpadAddress. + ScratchpadAddress(ScratchpadAddress), } impl NetworkAddress { @@ -118,6 +121,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => { cash_note_address.xorname().0.to_vec() } + NetworkAddress::ScratchpadAddress(addr) => addr.xorname().0.to_vec(), NetworkAddress::RegisterAddress(register_address) => { register_address.xorname().0.to_vec() } @@ -164,6 +168,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => { RecordKey::new(cash_note_address.xorname()) } + NetworkAddress::ScratchpadAddress(addr) => RecordKey::new(&addr.xorname()), NetworkAddress::PeerId(bytes) => RecordKey::new(bytes), } } @@ -216,6 +221,12 @@ impl Debug for NetworkAddress { &spend_address.to_hex()[0..6] ) } + NetworkAddress::ScratchpadAddress(scratchpad_address) => { + format!( + "NetworkAddress::ScratchpadAddress({} - ", + &scratchpad_address.to_hex()[0..6] + ) + } NetworkAddress::RegisterAddress(register_address) => format!( "NetworkAddress::RegisterAddress({} - ", ®ister_address.to_hex()[0..6] @@ -245,6 +256,9 @@ impl Display for NetworkAddress { NetworkAddress::SpendAddress(addr) => { write!(f, "NetworkAddress::SpendAddress({addr:?})") } + NetworkAddress::ScratchpadAddress(addr) => { + write!(f, "NetworkAddress::ScratchpadAddress({addr:?})") + } NetworkAddress::RegisterAddress(addr) => { write!(f, "NetworkAddress::RegisterAddress({addr:?})") } diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 3138c1011a..c0a9007ed0 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -9,15 +9,17 @@ mod address; mod chunks; mod header; +mod scratchpad; use crate::error::Error; use core::fmt; use std::{str::FromStr, time::Duration}; pub use self::{ - address::{ChunkAddress, RegisterAddress, SpendAddress}, + address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, + scratchpad::Scratchpad, }; /// Represents the strategy for retrying operations. This encapsulates both the duration it may take for an operation to diff --git a/sn_protocol/src/storage/address.rs b/sn_protocol/src/storage/address.rs index 1c00f75dfb..a076b97748 100644 --- a/sn_protocol/src/storage/address.rs +++ b/sn_protocol/src/storage/address.rs @@ -7,7 +7,9 @@ // permissions and limitations relating to use of the SAFE Network Software. mod chunk; +mod scratchpad; pub use self::chunk::ChunkAddress; +pub use self::scratchpad::ScratchpadAddress; pub use sn_registers::RegisterAddress; pub use sn_transfers::SpendAddress; diff --git a/sn_protocol/src/storage/address/scratchpad.rs b/sn_protocol/src/storage/address/scratchpad.rs new file mode 100644 index 0000000000..ecd9735183 --- /dev/null +++ b/sn_protocol/src/storage/address/scratchpad.rs @@ -0,0 +1,90 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::error::{Error, Result}; +use bls::PublicKey; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; +use xor_name::XorName; + +/// Address of a Scratchpad on the SAFE Network +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub struct ScratchpadAddress { + /// Owner of the scratchpad + pub(crate) owner: PublicKey, +} + +impl Display for ScratchpadAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "({:?})", &self.to_hex()[0..6]) + } +} + +impl Debug for ScratchpadAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ScratchpadAddress({}) {{ owner: {:?} }}", + &self.to_hex()[0..6], + self.owner + ) + } +} + +impl ScratchpadAddress { + /// Construct a new `ScratchpadAddress` given `owner`. + pub fn new(owner: PublicKey) -> Self { + Self { owner } + } + + /// Return the network name of the scratchpad. + /// This is used to locate the scratchpad on the network. + pub fn xorname(&self) -> XorName { + XorName::from_content(&self.owner.to_bytes()) + } + + /// Serialize this `ScratchpadAddress` instance to a hex-encoded `String`. + pub fn to_hex(&self) -> String { + hex::encode(self.owner.to_bytes()) + } + + /// Deserialize a hex-encoded representation of a `ScratchpadAddress` to a `ScratchpadAddress` instance. + pub fn from_hex(hex: &str) -> Result { + // let bytes = hex::decode(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; + let owner = PublicKey::from_hex(hex).map_err(|_| Error::ScratchpadHexDeserializeFailed)?; + Ok(Self { owner }) + } + + /// Return the owner. + pub fn owner(&self) -> &PublicKey { + &self.owner + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bls::SecretKey; + + #[test] + fn test_scratchpad_hex_conversion() { + let owner = SecretKey::random().public_key(); + let addr = ScratchpadAddress::new(owner); + let hex = addr.to_hex(); + let addr2 = ScratchpadAddress::from_hex(&hex).unwrap(); + + assert_eq!(addr, addr2); + + let bad_hex = format!("{hex}0"); + let err = ScratchpadAddress::from_hex(&bad_hex); + assert_eq!(err, Err(Error::ScratchpadHexDeserializeFailed)); + } +} diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index 0a7cc7dd71..96a4515526 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -21,6 +21,7 @@ use xor_name::XorName; #[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] pub enum RecordType { Chunk, + Scratchpad, NonChunk(XorName), } @@ -36,6 +37,8 @@ pub enum RecordKind { Spend, Register, RegisterWithPayment, + Scratchpad, + ScratchpadWithPayment, } impl Serialize for RecordKind { @@ -49,6 +52,8 @@ impl Serialize for RecordKind { Self::Spend => serializer.serialize_u32(2), Self::Register => serializer.serialize_u32(3), Self::RegisterWithPayment => serializer.serialize_u32(4), + Self::Scratchpad => serializer.serialize_u32(5), + Self::ScratchpadWithPayment => serializer.serialize_u32(6), } } } @@ -65,6 +70,8 @@ impl<'de> Deserialize<'de> for RecordKind { 2 => Ok(Self::Spend), 3 => Ok(Self::Register), 4 => Ok(Self::RegisterWithPayment), + 5 => Ok(Self::Scratchpad), + 6 => Ok(Self::ScratchpadWithPayment), _ => Err(serde::de::Error::custom( "Unexpected integer for RecordKind variant", )), @@ -185,6 +192,18 @@ mod tests { .try_serialize()?; assert_eq!(register.len(), RecordHeader::SIZE); + let scratchpad = RecordHeader { + kind: RecordKind::Scratchpad, + } + .try_serialize()?; + assert_eq!(scratchpad.len(), RecordHeader::SIZE); + + let scratchpad_with_payment = RecordHeader { + kind: RecordKind::ScratchpadWithPayment, + } + .try_serialize()?; + assert_eq!(scratchpad_with_payment.len(), RecordHeader::SIZE); + Ok(()) } } diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs new file mode 100644 index 0000000000..a1f792da60 --- /dev/null +++ b/sn_protocol/src/storage/scratchpad.rs @@ -0,0 +1,93 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use super::ScratchpadAddress; +use crate::NetworkAddress; +use bls::{PublicKey, Signature}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; + +use xor_name::XorName; + +/// Scratchpad, an mutable address for encrypted data +#[derive( + Hash, Eq, PartialEq, PartialOrd, Ord, Clone, custom_debug::Debug, Serialize, Deserialize, +)] +pub struct Scratchpad { + /// Network address. Omitted when serialising and + /// calculated from the `encrypted_data` when deserialising. + pub address: ScratchpadAddress, + /// Contained data. This should be encrypted + #[debug(skip)] + pub encrypted_data: Bytes, + /// Monotonically increasing counter to track the number of times this has been updated. + pub counter: u64, + /// Signature over `Vec`.extend(Xorname::from_content(encrypted_data).to_vec()) from the owning key. + pub signature: Signature, +} + +impl Scratchpad { + /// Creates a new instance of `Scratchpad`. + pub fn new( + owner: PublicKey, + encrypted_data: Bytes, + counter: u64, + signature: Signature, + ) -> Self { + Self { + address: ScratchpadAddress::new(owner), + encrypted_data, + counter, + signature, + } + } + + /// Verifies the signature and content of the scratchpad are valid for the + /// owner's public key. + pub fn is_valid(&self) -> bool { + let mut signing_bytes = self.counter.to_be_bytes().to_vec(); + signing_bytes.extend(self.encrypted_data_hash().to_vec()); // add the count + + self.owner().verify(&self.signature, &signing_bytes) + } + + /// Returns the encrypted_data. + pub fn encrypted_data(&self) -> &Bytes { + &self.encrypted_data + } + + /// Returns the encrypted_data hash + pub fn encrypted_data_hash(&self) -> XorName { + XorName::from_content(&self.encrypted_data) + } + + /// Returns the owner. + pub fn owner(&self) -> &PublicKey { + self.address.owner() + } + + /// Returns the address. + pub fn address(&self) -> &ScratchpadAddress { + &self.address + } + + /// Returns the NetworkAddress + pub fn network_address(&self) -> NetworkAddress { + NetworkAddress::ScratchpadAddress(self.address) + } + + /// Returns the name. + pub fn name(&self) -> XorName { + self.address.xorname() + } + + /// Returns size of contained encrypted_data. + pub fn payload_size(&self) -> usize { + self.encrypted_data.len() + } +} From a8d10bc91c22196849228f24061408196069264a Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 11 Sep 2024 10:24:56 +0900 Subject: [PATCH 009/255] chore(cli_aut): feat flag to avoid clippy errors --- autonomi/Cargo.toml | 4 ++-- autonomi/src/lib.rs | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index a3fbe99138..6ba4aead48 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [features] -default = [] +default = ["data"] full = ["data", "files", "fs", "registers", "transfers"] data = ["transfers"] -files = ["transfers"] +files = ["transfers", "data"] fs = [] local = ["sn_client/local-discovery"] registers = ["transfers"] diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index d3e835666b..3314d8a1b3 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -47,7 +47,10 @@ pub use client::{Client, ConnectError, CONNECT_TIMEOUT_SECS}; mod client; mod secrets; +#[cfg(feature = "data")] mod self_encryption; +#[cfg(feature = "transfers")] mod wallet; +#[cfg(feature = "transfers")] const VERIFY_STORE: bool = true; From 02ce072fe66774307a2d2516aa4136c8a6bd700e Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 11 Sep 2024 10:10:17 +0200 Subject: [PATCH 010/255] feat(global): update libp2p to 0.54.1 --- Cargo.lock | 172 ++++++++++-------------- autonomi/Cargo.toml | 2 +- nat-detection/Cargo.toml | 2 +- nat-detection/src/behaviour/identify.rs | 12 +- sn_cli/Cargo.toml | 2 +- sn_client/Cargo.toml | 2 +- sn_networking/Cargo.toml | 4 +- sn_networking/src/cmd.rs | 2 +- sn_networking/src/driver.rs | 6 +- sn_networking/src/event/kad.rs | 4 +- sn_networking/src/event/swarm.rs | 8 +- sn_node/Cargo.toml | 2 +- sn_node_manager/Cargo.toml | 2 +- sn_node_rpc_client/Cargo.toml | 2 +- sn_peers_acquisition/Cargo.toml | 2 +- sn_protocol/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 2 +- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- 19 files changed, 107 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f305e16756..b28749031c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -380,19 +380,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "asynchronous-codec" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" -dependencies = [ - "bytes", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite", -] - [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -3450,7 +3437,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core 0.51.1", + "windows-core 0.52.0", ] [[package]] @@ -3627,9 +3614,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", ] [[package]] @@ -3780,16 +3764,15 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" -version = "0.53.2" +version = "0.54.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" dependencies = [ "bytes", "either", "futures", "futures-timer", "getrandom 0.2.15", - "instant", "libp2p-allow-block-list", "libp2p-autonat", "libp2p-connection-limits", @@ -3819,9 +3802,9 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ "libp2p-core", "libp2p-identity", @@ -3831,30 +3814,36 @@ dependencies = [ [[package]] name = "libp2p-autonat" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95151726170e41b591735bf95c42b888fe4aa14f65216a9fbf0edcc04510586" +checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" dependencies = [ "async-trait", - "asynchronous-codec 0.6.2", + "asynchronous-codec", + "bytes", + "either", "futures", + "futures-bounded", "futures-timer", - "instant", "libp2p-core", "libp2p-identity", "libp2p-request-response", "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.2.0", + "quick-protobuf-codec", "rand 0.8.5", + "rand_core 0.6.4", + "thiserror", "tracing", + "void", + "web-time", ] [[package]] name = "libp2p-connection-limits" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ "libp2p-core", "libp2p-identity", @@ -3864,9 +3853,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.41.3" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" dependencies = [ "either", "fnv", @@ -3892,9 +3881,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.41.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ "async-trait", "futures", @@ -3908,12 +3897,12 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.46.1" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d665144a616dadebdc5fff186b1233488cdcd8bfb1223218ff084b6d052c94f7" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" dependencies = [ - "asynchronous-codec 0.7.0", - "base64 0.21.7", + "asynchronous-codec", + "base64 0.22.1", "byteorder", "bytes", "either", @@ -3922,28 +3911,28 @@ dependencies = [ "futures-ticker", "getrandom 0.2.15", "hex_fmt", - "instant", "libp2p-core", "libp2p-identity", "libp2p-swarm", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "regex", "sha2 0.10.8", "smallvec", "tracing", "void", + "web-time", ] [[package]] name = "libp2p-identify" -version = "0.44.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "either", "futures", "futures-bounded", @@ -3953,7 +3942,7 @@ dependencies = [ "libp2p-swarm", "lru", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "smallvec", "thiserror", "tracing", @@ -3980,24 +3969,23 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.45.3" +version = "0.46.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" dependencies = [ "arrayvec", - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "either", "fnv", "futures", "futures-bounded", "futures-timer", - "instant", "libp2p-core", "libp2p-identity", "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "sha2 0.10.8", "smallvec", @@ -4005,13 +3993,14 @@ dependencies = [ "tracing", "uint", "void", + "web-time", ] [[package]] name = "libp2p-mdns" -version = "0.45.1" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" dependencies = [ "data-encoding", "futures", @@ -4030,12 +4019,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.14.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "instant", "libp2p-core", "libp2p-identify", "libp2p-identity", @@ -4044,15 +4032,16 @@ dependencies = [ "libp2p-swarm", "pin-project", "prometheus-client", + "web-time", ] [[package]] name = "libp2p-noise" -version = "0.44.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "curve25519-dalek 4.1.3", "futures", @@ -4074,9 +4063,9 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c67296ad4e092e23f92aea3d2bdb6f24eab79c0929ed816dfb460ea2f4567d2b" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" dependencies = [ "bytes", "futures", @@ -4098,11 +4087,11 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.17.2" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d1c667cfabf3dd675c8e3cea63b7b98434ecf51721b7894cbb01d29983a6a9b" +checksum = "10df23d7f5b5adcc129f4a69d6fbd05209e356ccf9e8f4eb10b2692b79c77247" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "either", "futures", @@ -4112,7 +4101,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "static_assertions", "thiserror", @@ -4123,16 +4112,15 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c314fe28368da5e3a262553fb0ad575c1c8934c461e10de10265551478163836" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" dependencies = [ "async-trait", "cbor4ii", "futures", "futures-bounded", "futures-timer", - "instant", "libp2p-core", "libp2p-identity", "libp2p-swarm", @@ -4141,20 +4129,20 @@ dependencies = [ "smallvec", "tracing", "void", + "web-time", ] [[package]] name = "libp2p-swarm" -version = "0.44.2" +version = "0.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" dependencies = [ "either", "fnv", "futures", "futures-timer", "getrandom 0.2.15", - "instant", "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", @@ -4167,13 +4155,14 @@ dependencies = [ "tracing", "void", "wasm-bindgen-futures", + "web-time", ] [[package]] name = "libp2p-swarm-derive" -version = "0.34.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5daceb9dd908417b6dfcfe8e94098bc4aac54500c282e78120b885dadc09b999" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -4183,9 +4172,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.41.0" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" dependencies = [ "futures", "futures-timer", @@ -4200,9 +4189,9 @@ dependencies = [ [[package]] name = "libp2p-tls" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b7b831e55ce2aa6c354e6861a85fdd4dd0a2b97d5e276fabac0e4810a71776" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", @@ -4219,9 +4208,9 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" dependencies = [ "futures", "futures-timer", @@ -4235,9 +4224,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.43.2" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b953b6803a1f3161a989538974d72511c4e48a4af355337b6fb90723c56c05" +checksum = "888b2ff2e5d8dcef97283daab35ad1043d18952b65e05279eecbe02af4c6e347" dependencies = [ "either", "futures", @@ -4256,9 +4245,9 @@ dependencies = [ [[package]] name = "libp2p-websocket-websys" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95cd8a32fcf94ad1e5c2de37c2a05a5a4188d8358b005859a0fc9e63b6953bc" +checksum = "38cf9b429dd07be52cd82c4c484b1694df4209210a7db3b9ffb00c7606e230c8" dependencies = [ "bytes", "futures", @@ -4274,9 +4263,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.45.2" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddd5265f6b80f94d48a3963541aad183cc598a645755d2f1805a373e41e0716b" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", @@ -5700,26 +5689,13 @@ dependencies = [ "byteorder", ] -[[package]] -name = "quick-protobuf-codec" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", - "quick-protobuf", - "thiserror", - "unsigned-varint 0.7.2", -] - [[package]] name = "quick-protobuf-codec" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec 0.7.0", + "asynchronous-codec", "bytes", "quick-protobuf", "thiserror", @@ -8537,10 +8513,6 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" -dependencies = [ - "asynchronous-codec 0.6.2", - "bytes", -] [[package]] name = "unsigned-varint" diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index a3fbe99138..bff82eb2e0 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -17,7 +17,7 @@ transfers = [] bip39 = "2.0.0" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } -libp2p = "0.53" +libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.29.0" diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index f212b4ebd2..4d546f10be 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -18,7 +18,7 @@ clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" color-eyre = { version = "0.6", default-features = false } futures = "~0.3.13" -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "tcp", "noise", diff --git a/nat-detection/src/behaviour/identify.rs b/nat-detection/src/behaviour/identify.rs index 8489034039..738a01363c 100644 --- a/nat-detection/src/behaviour/identify.rs +++ b/nat-detection/src/behaviour/identify.rs @@ -7,7 +7,11 @@ use crate::{behaviour::PROTOCOL_VERSION, App}; impl App { pub(crate) fn on_event_identify(&mut self, event: identify::Event) { match event { - identify::Event::Received { peer_id, info } => { + identify::Event::Received { + peer_id, + info, + connection_id, + } => { debug!( %peer_id, protocols=?info.protocols, @@ -18,7 +22,7 @@ impl App { // Disconnect if peer has incompatible protocol version. if info.protocol_version != PROTOCOL_VERSION { - warn!(%peer_id, "Incompatible protocol version. Disconnecting from peer."); + warn!(conn_id=%connection_id, %peer_id, "Incompatible protocol version. Disconnecting from peer."); let _ = self.swarm.disconnect_peer_id(peer_id); return; } @@ -29,12 +33,12 @@ impl App { .iter() .any(|p| *p == autonat::DEFAULT_PROTOCOL_NAME) { - warn!(%peer_id, "Peer does not support AutoNAT. Disconnecting from peer."); + warn!(conn_id=%connection_id, %peer_id, "Peer does not support AutoNAT. Disconnecting from peer."); let _ = self.swarm.disconnect_peer_id(peer_id); return; } - info!(%peer_id, "Received peer info: confirmed it supports AutoNAT"); + info!(conn_id=%connection_id, %peer_id, "Received peer info: confirmed it supports AutoNAT"); // If we're a client and the peer has (a) global listen address(es), // add it as an AutoNAT server. diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index 4433821a98..ebca335d94 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -45,7 +45,7 @@ dirs-next = "~2.0.0" futures = "~0.3.13" hex = "~0.4.3" indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" rayon = "1.8.0" reqwest = { version = "0.12.2", default-features = false, features = [ diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index c6209ea077..32cc1b621d 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -41,7 +41,7 @@ custom_debug = "~0.6.1" futures = "~0.3.13" hex = "~0.4.3" itertools = "~0.12.1" -libp2p = { version = "0.53", features = ["identify"] } +libp2p = { version = "0.54.1", features = ["identify"] } petgraph = { version = "0.6.4", features = ["serde-1"] } prometheus-client = { version = "0.22", optional = true } rand = { version = "~0.8.5", features = ["small_rng"] } diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 2641c78c5c..f96c8567ce 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -22,7 +22,7 @@ encrypt-records = [] [dependencies] lazy_static = "~1.4.0" -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "dns", "kad", @@ -93,7 +93,7 @@ crate-type = ["cdylib", "rlib"] [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.2.12", features = ["js"] } -libp2p = { version = "0.53", features = [ +libp2p = { version = "0.54.1", features = [ "tokio", "dns", "kad", diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index d70fc3eba5..bd5e17596c 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -732,7 +732,7 @@ impl SwarmDriver { if let Some(distance) = range.0.ilog2() { let peers_in_kbucket = kbucket .iter() - .map(|peer_entry| peer_entry.node.key.clone().into_preimage()) + .map(|peer_entry| (*peer_entry.node.key).into_preimage()) .collect::>(); let _ = ilog2_kbuckets.insert(distance, peers_in_kbucket); } else { diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f7d8aedd68..e4fd7b38bc 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -76,6 +76,8 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); +const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); + /// The ways in which the Get Closest queries are used. pub(crate) enum PendingGetClosestType { /// The network discovery method is present at the networking layer @@ -314,7 +316,7 @@ impl NetworkBuilder { /// /// Returns an error if there is a problem initializing the mDNS behaviour. pub fn build_node(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { - let mut kad_cfg = kad::Config::default(); + let mut kad_cfg = kad::Config::new(KAD_STREAM_PROTOCOL_ID); let _ = kad_cfg .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) // how often a node will replicate records that it has stored, aka copying the key-value pair to other nodes @@ -399,7 +401,7 @@ impl NetworkBuilder { pub fn build_client(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { // Create a Kademlia behaviour for client mode, i.e. set req/resp protocol // to outbound-only mode and don't listen on any address - let mut kad_cfg = kad::Config::default(); // default query timeout is 60 secs + let mut kad_cfg = kad::Config::new(KAD_STREAM_PROTOCOL_ID); // default query timeout is 60 secs // 1mb packet size let _ = kad_cfg diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index b9d0aef3d9..7b165fcea9 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -53,7 +53,7 @@ impl SwarmDriver { // following criteria: // 1, `stats.num_pending()` is 0 // 2, `stats.duration()` is longer than a defined period - current_closest.extend(closest_peers.peers.clone()); + current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { let (get_closest_type, current_closest) = entry.remove(); match get_closest_type { @@ -101,7 +101,7 @@ impl SwarmDriver { // Trust them and leave for the caller to check whether they are enough. match err { GetClosestPeersError::Timeout { ref peers, .. } => { - current_closest.extend(peers); + current_closest.extend(peers.iter().map(|i| i.peer_id)); } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index e0ae2ea687..af74a1455e 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -115,8 +115,12 @@ impl SwarmDriver { event_string = "identify"; match *iden { - libp2p::identify::Event::Received { peer_id, info } => { - debug!(%peer_id, ?info, "identify: received info"); + libp2p::identify::Event::Received { + peer_id, + info, + connection_id, + } => { + debug!(conn_id=%connection_id, %peer_id, ?info, "identify: received info"); if info.protocol_version != IDENTIFY_PROTOCOL_STR.to_string() { warn!(?info.protocol_version, "identify: {peer_id:?} does not have the same protocol. Our IDENTIFY_PROTOCOL_STR: {:?}", IDENTIFY_PROTOCOL_STR.as_str()); diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index c598fbad39..1a30d1dd7a 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -39,7 +39,7 @@ file-rotate = "0.7.3" futures = "~0.3.13" hex = "~0.4.3" itertools = "~0.12.1" -libp2p = { version = "0.53", features = ["tokio", "dns", "kad", "macros"] } +libp2p = { version = "0.54.1", features = ["tokio", "dns", "kad", "macros"] } prometheus-client = { version = "0.22", optional = true } # watch out updating this, protoc compiler needs to be installed on all build systems # arm builds + musl are very problematic diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 80a8f02f17..033738bcd5 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -36,7 +36,7 @@ colored = "2.0.4" color-eyre = "~0.6" dirs-next = "2.0.0" indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.53", features = [] } +libp2p = { version = "0.54.1", features = [] } libp2p-identity = { version = "0.2.7", features = ["rand"] } prost = { version = "0.9" } rand = "0.8.5" diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 3e6bc323fa..634ee07a19 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -21,7 +21,7 @@ bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.2" hex = "~0.4.3" -libp2p = { version="0.53", features = ["kad"]} +libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", version = "0.110.0" } sn_logging = { path = "../sn_logging", version = "0.2.33" } diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index ebd44cbef9..2e013b2972 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -18,7 +18,7 @@ websockets = [] [dependencies] clap = { version = "4.2.1", features = ["derive", "env"] } lazy_static = "~1.4.0" -libp2p = { version="0.53", features = [] } +libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } sn_protocol = { path = "../sn_protocol", version = "0.17.8", optional = true} diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 91feb1a370..e9f60ac753 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -23,7 +23,7 @@ custom_debug = "~0.6.1" dirs-next = "~2.0.0" hex = "~0.4.3" lazy_static = "1.4.0" -libp2p = { version="0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index e09c63fe53..422991105a 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -12,7 +12,7 @@ version = "0.3.11" [dependencies] async-trait = "0.1" dirs-next = "2.0.0" -libp2p = { version = "0.53", features = ["kad"] } +libp2p = { version = "0.54.1", features = ["kad"] } libp2p-identity = { version = "0.2.7", features = ["rand"] } prost = { version = "0.9" } serde = { version = "1.0", features = ["derive"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 20767d938c..3961b1df5e 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -21,7 +21,7 @@ custom_debug = "~0.6.1" dirs-next = "~2.0.0" hex = "~0.4.3" lazy_static = "~1.4.0" -libp2p = { version = "0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = { version = "~0.8.5", features = ["small_rng"] } rmp-serde = "1.1.1" secrecy = "0.8.0" diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 9642cd6e56..3eaf871c11 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -12,6 +12,6 @@ version = "0.4.4" [dependencies] color-eyre = "~0.6.2" dirs-next = "~2.0.0" -libp2p = { version="0.53", features = ["identify", "kad"] } +libp2p = { version = "0.54.1", features = ["identify", "kad"] } serde = { version = "1.0.133", features = [ "derive"]} serde_json = "1.0" From 73a21fe62df780a2db5f88069a5fae7c1e9512fe Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 20 Aug 2024 08:46:19 +0200 Subject: [PATCH 011/255] feat(networking): increase circuit bytes limit --- sn_networking/src/driver.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f7d8aedd68..7e0d9883d6 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -600,6 +600,8 @@ impl NetworkBuilder { max_circuits: 1024, // The total amount of relayed connections at any given moment. max_circuits_per_peer: 256, // Amount of relayed connections per peer (both dst and src) circuit_src_rate_limiters: vec![], // No extra rate limiting for now + // We should at least be able to relay packets with chunks etc. + max_circuit_bytes: MAX_PACKET_SIZE as u64, ..Default::default() }; libp2p::relay::Behaviour::new(peer_id, relay_server_cfg) From 1485c143bea491af95c9687538aec03e1a077b98 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 11 Sep 2024 17:09:48 +0200 Subject: [PATCH 012/255] feat(launchpad): disable disk with not enough space --- .../src/components/popup/change_drive.rs | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 3c0d22906d..448c09a507 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -23,7 +23,10 @@ use ratatui::{ use crate::{ action::{Action, OptionsActions}, - components::Component, + components::{ + popup::manage_nodes::{GB, GB_PER_NODE}, + Component, + }, config::get_launchpad_nodes_data_dir_path, mode::{InputMode, Scene}, style::{ @@ -61,6 +64,7 @@ impl ChangeDrivePopup { .map(|(drive_name, mountpoint, space, available)| { let size_str = format!("{:.2} GB", *space as f64 / 1e9); let size_str_cloned = size_str.clone(); + let has_enough_space = *space >= (GB_PER_NODE * GB) as u64; DriveItem { name: drive_name.to_string(), mountpoint: mountpoint.clone(), @@ -75,6 +79,8 @@ impl ChangeDrivePopup { DriveStatus::Selected } else if !available { DriveStatus::NotAvailable + } else if !has_enough_space { + DriveStatus::NotEnoughSpace } else { DriveStatus::NotSelected }, @@ -101,7 +107,9 @@ impl ChangeDrivePopup { /// fn deselect_all(&mut self) { for item in &mut self.items.items { - if item.status != DriveStatus::NotAvailable { + if item.status != DriveStatus::NotAvailable + && item.status != DriveStatus::NotEnoughSpace + { item.status = DriveStatus::NotSelected; } } @@ -382,7 +390,8 @@ impl Component for ChangeDrivePopup { self.items.previous(); let drive = self.return_selection(); self.can_select = drive.mountpoint != self.drive_selection.mountpoint - && drive.status != DriveStatus::NotAvailable; + && drive.status != DriveStatus::NotAvailable + && drive.status != DriveStatus::NotEnoughSpace; } vec![] } @@ -391,7 +400,8 @@ impl Component for ChangeDrivePopup { self.items.next(); let drive = self.return_selection(); self.can_select = drive.mountpoint != self.drive_selection.mountpoint - && drive.status != DriveStatus::NotAvailable; + && drive.status != DriveStatus::NotAvailable + && drive.status != DriveStatus::NotEnoughSpace; } vec![] } @@ -558,6 +568,7 @@ enum DriveStatus { Selected, #[default] NotSelected, + NotEnoughSpace, NotAvailable, } @@ -586,6 +597,12 @@ impl DriveItem { Span::raw(" ".repeat(spaces)), Span::styled(self.size.clone(), Style::default().fg(GHOST_WHITE)), ]), + DriveStatus::NotEnoughSpace => Line::from(vec![ + Span::raw(" "), + Span::styled(self.name.clone(), Style::default().fg(COOL_GREY)), + Span::raw(" ".repeat(spaces)), + Span::styled(self.size.clone(), Style::default().fg(COOL_GREY)), + ]), DriveStatus::NotAvailable => { let legend = "No Access"; let spaces = width - self.name.len() - legend.len() - " ".len() - 4; From 306d9e490e3178efdb0cfed28b96fb5cdbe86ff0 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 11 Sep 2024 23:57:08 +0800 Subject: [PATCH 013/255] fix(network): register record shall not be verified by entire content --- autonomi/src/client/data.rs | 1 + autonomi/src/client/registers.rs | 1 + autonomi/src/client/transfers.rs | 1 + sn_client/src/api.rs | 6 ++++++ sn_client/src/register.rs | 1 + sn_networking/src/driver.rs | 35 ++++++++++++++++++++++++++++++-- sn_networking/src/event/kad.rs | 2 +- sn_networking/src/transfers.rs | 2 ++ sn_node/src/replication.rs | 3 +++ 9 files changed, 49 insertions(+), 3 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 3522da2251..0fca7f398a 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -88,6 +88,7 @@ impl Client { retry_strategy: None, target_record: None, expected_holders: HashSet::new(), + is_register: false, }; let record = self.network.get_record_from_network(key, &get_cfg).await?; let header = RecordHeader::from_record(&record)?; diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 79d302d787..8a8657c5b3 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -138,6 +138,7 @@ impl Client { retry_strategy: None, target_record: None, expected_holders: Default::default(), + is_register: true, }; let record = self.network.get_record_from_network(key, &get_cfg).await?; diff --git a/autonomi/src/client/transfers.rs b/autonomi/src/client/transfers.rs index 9d8ba9f480..7e34b93209 100644 --- a/autonomi/src/client/transfers.rs +++ b/autonomi/src/client/transfers.rs @@ -318,6 +318,7 @@ async fn store_spend(network: Network, spend: SignedSpend) -> Result<(), Network retry_strategy: None, target_record: record_to_verify, expected_holders, + is_register: false, }; let put_cfg = PutRecordCfg { put_quorum: Quorum::Majority, diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index b279ed9e31..5e9035ce37 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -617,6 +617,7 @@ impl Client { retry_strategy, target_record: None, // Not used since we use ChunkProof expected_holders: Default::default(), + is_register: false, }; // The `ChunkWithPayment` is only used to send out via PutRecord. // The holders shall only hold the `Chunk` copies. @@ -702,6 +703,7 @@ impl Client { retry_strategy: Some(retry_strategy.unwrap_or(RetryStrategy::Quick)), target_record: None, expected_holders, + is_register: false, }; let record = self.network.get_record_from_network(key, &get_cfg).await?; let header = RecordHeader::from_record(&record)?; @@ -856,6 +858,7 @@ impl Client { retry_strategy: None, target_record: record_to_verify, expected_holders, + is_register: false, }; let put_cfg = PutRecordCfg { put_quorum: Quorum::Majority, @@ -903,6 +906,7 @@ impl Client { retry_strategy: Some(RetryStrategy::Balanced), target_record: None, expected_holders: Default::default(), + is_register: false, }, ) .await @@ -919,6 +923,7 @@ impl Client { retry_strategy: None, target_record: None, expected_holders: Default::default(), + is_register: false, }, ) .await @@ -934,6 +939,7 @@ impl Client { retry_strategy: None, target_record: None, expected_holders: Default::default(), + is_register: false, }, ) .await diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs index a19674aca4..1b164a2f71 100644 --- a/sn_client/src/register.rs +++ b/sn_client/src/register.rs @@ -844,6 +844,7 @@ impl ClientRegister { retry_strategy: Some(RetryStrategy::Quick), target_record: record_to_verify, expected_holders, + is_register: true, }; let put_cfg = PutRecordCfg { put_quorum: Quorum::All, diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f7d8aedd68..7ef2cc10b1 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -49,13 +49,14 @@ use libp2p::{ use prometheus_client::{metrics::info::Info, registry::Registry}; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, - storage::RetryStrategy, + storage::{try_deserialize_record, RetryStrategy}, version::{ IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; +use sn_registers::SignedRegister; use sn_transfers::PaymentQuote; use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, @@ -138,11 +139,41 @@ pub struct GetRecordCfg { pub target_record: Option, /// Logs if the record was not fetched from the provided set of peers. pub expected_holders: HashSet, + /// For register record, only root value shall be checked, not the entire content. + pub is_register: bool, } impl GetRecordCfg { pub fn does_target_match(&self, record: &Record) -> bool { - self.target_record.as_ref().is_some_and(|t| t == record) + if let Some(ref target_record) = self.target_record { + if self.is_register { + let pretty_key = PrettyPrintRecordKey::from(&target_record.key); + + let fetched_register = match try_deserialize_record::(record) { + Ok(fetched_register) => fetched_register, + Err(err) => { + error!("When try to deserialize register from fetched record {pretty_key:?}, have error {err:?}"); + return false; + } + }; + let target_register = match try_deserialize_record::(target_record) + { + Ok(target_register) => target_register, + Err(err) => { + error!("When try to deserialize register from target record {pretty_key:?}, have error {err:?}"); + return false; + } + }; + + // Only compare root values of the register + target_register.base_register().read() == fetched_register.base_register().read() + } else { + target_record == record + } + } else { + // Not have target_record to check with + true + } } } diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index b9d0aef3d9..237a4e3ec3 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -612,7 +612,7 @@ impl SwarmDriver { record: Record, cfg: &GetRecordCfg, ) -> Result<()> { - let res = if cfg.target_record.is_none() || cfg.does_target_match(&record) { + let res = if cfg.does_target_match(&record) { Ok(record) } else { Err(GetRecordError::RecordDoesNotMatch(record)) diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 29ba11976e..76b6349ce1 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -35,6 +35,7 @@ impl Network { // what we will have in hand. target_record: None, expected_holders: Default::default(), + is_register: false, }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( @@ -55,6 +56,7 @@ impl Network { retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), + is_register: false, }; let record = match self.get_record_from_network(key.clone(), &get_cfg).await { Ok(record) => record, diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index ca631e32f4..59e0cff078 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -75,6 +75,9 @@ impl Node { retry_strategy: None, target_record: None, expected_holders: Default::default(), + // This is for replication, which doesn't have target_recrod to verify with. + // Hence value of the flag actually doesn't matter. + is_register: false, }; match node.network().get_record_from_network(key, &get_cfg).await { Ok(record) => record, From 2d6a67660b2135544c46591e8e3faf49ff290dcf Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 11 Sep 2024 18:05:28 +0200 Subject: [PATCH 014/255] feat(launchpad): avaialble disk space considers amount of nodes --- node-launchpad/src/app.rs | 3 +- .../src/components/popup/change_drive.rs | 168 ++++++++++-------- 2 files changed, 100 insertions(+), 71 deletions(-) diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index f5163a5837..f456fdb6b9 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -115,7 +115,8 @@ impl App { // Popups let reset_nodes = ResetNodesPopup::default(); let manage_nodes = ManageNodes::new(app_data.nodes_to_start, storage_mountpoint.clone())?; - let change_drive = ChangeDrivePopup::new(storage_mountpoint.clone())?; + let change_drive = + ChangeDrivePopup::new(storage_mountpoint.clone(), app_data.nodes_to_start)?; let change_connection_mode = ChangeConnectionModePopUp::new(connection_mode)?; let port_range = PortRangePopUp::new(connection_mode, port_from, port_to); let beta_programme = BetaProgramme::new(app_data.discord_username.clone()); diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 448c09a507..8efeeaed88 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -47,56 +47,25 @@ enum ChangeDriveState { pub struct ChangeDrivePopup { active: bool, state: ChangeDriveState, - items: StatefulList, + items: Option>, drive_selection: DriveItem, drive_selection_initial_state: DriveItem, + nodes_to_start: usize, + storage_mountpoint: PathBuf, can_select: bool, // Used to enable the "Change Drive" button based on conditions } impl ChangeDrivePopup { - pub fn new(storage_mountpoint: PathBuf) -> Result { - let drives_and_space = system::get_list_of_available_drives_and_available_space()?; - - let mut selected_connection_mode: DriveItem = DriveItem::default(); - // Create a vector of DriveItem from drives_and_space - let drives_items: Vec = drives_and_space - .iter() - .map(|(drive_name, mountpoint, space, available)| { - let size_str = format!("{:.2} GB", *space as f64 / 1e9); - let size_str_cloned = size_str.clone(); - let has_enough_space = *space >= (GB_PER_NODE * GB) as u64; - DriveItem { - name: drive_name.to_string(), - mountpoint: mountpoint.clone(), - size: size_str, - status: if mountpoint == &storage_mountpoint { - selected_connection_mode = DriveItem { - name: drive_name.to_string(), - mountpoint: mountpoint.clone(), - size: size_str_cloned, - status: DriveStatus::Selected, - }; - DriveStatus::Selected - } else if !available { - DriveStatus::NotAvailable - } else if !has_enough_space { - DriveStatus::NotEnoughSpace - } else { - DriveStatus::NotSelected - }, - } - }) - .collect::>(); + pub fn new(storage_mountpoint: PathBuf, nodes_to_start: usize) -> Result { debug!("Drive Mountpoint in Config: {:?}", storage_mountpoint); - debug!("Drives and space: {:?}", drives_and_space); - debug!("Drives items: {:?}", drives_items); - let items = StatefulList::with_items(drives_items); - Ok(Self { + Ok(ChangeDrivePopup { active: false, state: ChangeDriveState::Selection, - items, - drive_selection: selected_connection_mode.clone(), - drive_selection_initial_state: selected_connection_mode.clone(), + items: None, + drive_selection: DriveItem::default(), + drive_selection_initial_state: DriveItem::default(), + nodes_to_start, + storage_mountpoint, can_select: false, }) } @@ -106,11 +75,13 @@ impl ChangeDrivePopup { /// Deselects all drives in the list of items /// fn deselect_all(&mut self) { - for item in &mut self.items.items { - if item.status != DriveStatus::NotAvailable - && item.status != DriveStatus::NotEnoughSpace - { - item.status = DriveStatus::NotSelected; + if let Some(ref mut items) = self.items { + for item in &mut items.items { + if item.status != DriveStatus::NotAvailable + && item.status != DriveStatus::NotEnoughSpace + { + item.status = DriveStatus::NotSelected; + } } } } @@ -118,32 +89,74 @@ impl ChangeDrivePopup { /// fn assign_drive_selection(&mut self) { self.deselect_all(); - if let Some(i) = self.items.state.selected() { - self.items.items[i].status = DriveStatus::Selected; - self.drive_selection = self.items.items[i].clone(); + if let Some(ref mut items) = self.items { + if let Some(i) = items.state.selected() { + items.items[i].status = DriveStatus::Selected; + self.drive_selection = items.items[i].clone(); + } } } /// Highlights the drive that is currently selected in the list of items. /// fn select_drive(&mut self) { self.deselect_all(); - for (index, item) in self.items.items.iter_mut().enumerate() { - if item.mountpoint == self.drive_selection.mountpoint { - item.status = DriveStatus::Selected; - self.items.state.select(Some(index)); - break; + if let Some(ref mut items) = self.items { + for (index, item) in items.items.iter_mut().enumerate() { + if item.mountpoint == self.drive_selection.mountpoint { + item.status = DriveStatus::Selected; + items.state.select(Some(index)); + break; + } } } } /// Returns the highlighted drive in the list of items. /// fn return_selection(&mut self) -> DriveItem { - if let Some(i) = self.items.state.selected() { - return self.items.items[i].clone(); + if let Some(ref mut items) = self.items { + if let Some(i) = items.state.selected() { + return items.items[i].clone(); + } } DriveItem::default() } + /// Updates the drive items based on the current nodes_to_start value. + fn update_drive_items(&mut self) -> Result<()> { + let drives_and_space = system::get_list_of_available_drives_and_available_space()?; + let drives_items: Vec = drives_and_space + .iter() + .map(|(drive_name, mountpoint, space, available)| { + let size_str = format!("{:.2} GB", *space as f64 / 1e9); + let has_enough_space = *space >= (GB_PER_NODE * GB * self.nodes_to_start) as u64; + DriveItem { + name: drive_name.to_string(), + mountpoint: mountpoint.clone(), + size: size_str.clone(), + status: if *mountpoint == self.storage_mountpoint { + self.drive_selection = DriveItem { + name: drive_name.to_string(), + mountpoint: mountpoint.clone(), + size: size_str.clone(), + status: DriveStatus::Selected, + }; + DriveStatus::Selected + } else if !available { + DriveStatus::NotAvailable + } else if !has_enough_space { + DriveStatus::NotEnoughSpace + } else { + DriveStatus::NotSelected + }, + } + }) + .collect(); + self.items = Some(StatefulList::with_items(drives_items.clone())); + debug!("Drives and space: {:?}", drives_and_space); + debug!("Drives items: {:?}", drives_items); + Ok(()) + } + // -- Draw functions -- // Draws the Drive Selection screen @@ -180,6 +193,8 @@ impl ChangeDrivePopup { // Drive selector let items: Vec = self .items + .as_ref() + .unwrap() .items .iter() .enumerate() @@ -191,7 +206,7 @@ impl ChangeDrivePopup { .highlight_style(Style::default().bg(INDIGO)) .highlight_spacing(HighlightSpacing::Always); - f.render_stateful_widget(items, layer_two[0], &mut self.items.state); + f.render_stateful_widget(items, layer_two[0], &mut self.items.clone().unwrap().state); // Dash let dash = Block::new() @@ -386,22 +401,28 @@ impl Component for ChangeDrivePopup { vec![Action::SwitchScene(Scene::Options)] } KeyCode::Up => { - if self.items.items.len() > 1 { - self.items.previous(); - let drive = self.return_selection(); - self.can_select = drive.mountpoint != self.drive_selection.mountpoint - && drive.status != DriveStatus::NotAvailable - && drive.status != DriveStatus::NotEnoughSpace; + if let Some(ref mut items) = self.items { + if items.items.len() > 1 { + items.previous(); + let drive = self.return_selection(); + self.can_select = drive.mountpoint + != self.drive_selection.mountpoint + && drive.status != DriveStatus::NotAvailable + && drive.status != DriveStatus::NotEnoughSpace; + } } vec![] } KeyCode::Down => { - if self.items.items.len() > 1 { - self.items.next(); - let drive = self.return_selection(); - self.can_select = drive.mountpoint != self.drive_selection.mountpoint - && drive.status != DriveStatus::NotAvailable - && drive.status != DriveStatus::NotEnoughSpace; + if let Some(ref mut items) = self.items { + if items.items.len() > 1 { + items.next(); + let drive = self.return_selection(); + self.can_select = drive.mountpoint + != self.drive_selection.mountpoint + && drive.status != DriveStatus::NotAvailable + && drive.status != DriveStatus::NotEnoughSpace; + } } vec![] } @@ -465,6 +486,7 @@ impl Component for ChangeDrivePopup { self.active = true; self.can_select = false; self.state = ChangeDriveState::Selection; + let _ = self.update_drive_items(); self.select_drive(); Some(Action::SwitchInputMode(InputMode::Entry)) } @@ -480,6 +502,12 @@ impl Component for ChangeDrivePopup { self.select_drive(); None } + // We need to refresh the list of available drives because of the space + Action::StoreNodesToStart(ref nodes_to_start) => { + self.nodes_to_start = *nodes_to_start; + let _ = self.update_drive_items(); + None + } _ => None, }; Ok(send_back) @@ -518,7 +546,7 @@ impl Component for ChangeDrivePopup { } } -#[derive(Default)] +#[derive(Default, Clone)] struct StatefulList { state: ListState, items: Vec, From 5120900b0f77662c753ee5a1d04512c011ab2dbf Mon Sep 17 00:00:00 2001 From: loziniak Date: Wed, 11 Sep 2024 15:07:23 +0200 Subject: [PATCH 015/255] feat(client): enable Debug --- sn_client/src/lib.rs | 2 +- sn_networking/src/lib.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs index 3af4d517b3..27594bfa4a 100644 --- a/sn_client/src/lib.rs +++ b/sn_client/src/lib.rs @@ -150,7 +150,7 @@ pub async fn get_data(peer: &str, data_address: &str) -> std::result::Result<(), } /// Client API implementation to store and get data. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Client { network: Network, events_broadcaster: ClientEventsBroadcaster, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index a877b206f4..f704644025 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -148,7 +148,7 @@ pub fn sort_peers_by_key<'a, T>( Ok(sorted_peers) } -#[derive(Clone)] +#[derive(Clone, Debug)] /// API to interact with the underlying Swarm pub struct Network { inner: Arc, @@ -156,6 +156,7 @@ pub struct Network { /// The actual implementation of the Network. The other is just a wrapper around this, so that we don't expose /// the Arc from the interface. +#[derive(Debug)] struct NetworkInner { network_swarm_cmd_sender: mpsc::Sender, local_swarm_cmd_sender: mpsc::Sender, From eb382eb049142685a7f6fc99b10199c625098c1f Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 09:10:54 +0200 Subject: [PATCH 016/255] fix(launchpad): underline style in beta programme makes the screen flicker --- node-launchpad/src/components/popup/beta_programme.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index bb64865c19..7dc70f7576 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -226,11 +226,7 @@ impl Component for BetaProgramme { ); let input = Paragraph::new(Span::styled( format!("{}{} ", spaces, self.discord_input_filed.value()), - Style::default() - .fg(VIVID_SKY_BLUE) - .bg(INDIGO) - .underlined() - .underline_color(VIVID_SKY_BLUE), + Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); From b337d6bea3a66ac9fd5889da0be07bcce5ea2561 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 10:12:43 +0200 Subject: [PATCH 017/255] feat(launchpad): adding quit to options screen --- node-launchpad/src/components/options.rs | 215 ++++++++++++----------- 1 file changed, 113 insertions(+), 102 deletions(-) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 2e234726f9..2be6af49d5 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -71,10 +71,11 @@ impl Component for Options { .constraints( [ Constraint::Length(1), - Constraint::Length(9), - Constraint::Length(5), - Constraint::Length(5), - Constraint::Length(5), + Constraint::Length(7), + Constraint::Length(3), + Constraint::Length(3), + Constraint::Length(3), + Constraint::Length(3), ] .as_ref(), ) @@ -85,6 +86,8 @@ impl Component for Options { f.render_stateful_widget(header, layout[0], &mut SelectedMenuItem::Options); // Storage Drive + let port_legend = " Edit Port Range "; + let port_key = " [Ctrl+P] "; let block1 = Block::default() .title(" Device Options ") .title_style(Style::default().bold().fg(GHOST_WHITE)) @@ -93,11 +96,7 @@ impl Component for Options { .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let storage_drivename = Table::new( vec![ - Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), + Row::new(vec![Line::from(vec![])]), Row::new(vec![ Cell::from( Line::from(vec![Span::styled( @@ -121,11 +120,6 @@ impl Component for Options { .alignment(Alignment::Right), ), ]), - Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), Row::new(vec![ Cell::from( Line::from(vec![Span::styled( @@ -149,11 +143,6 @@ impl Component for Options { .alignment(Alignment::Right), ), ]), - Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), Row::new(vec![ Cell::from( Line::from(vec![Span::styled( @@ -182,7 +171,7 @@ impl Component for Options { Cell::from( Line::from(vec![ Span::styled( - " Edit Port Range ", + port_legend, if self.connection_mode == ConnectionMode::CustomPorts { Style::default().fg(VERY_LIGHT_AZURE) } else { @@ -190,7 +179,7 @@ impl Component for Options { }, ), Span::styled( - " [Ctrl+P] ", + port_key, if self.connection_mode == ConnectionMode::CustomPorts { Style::default().fg(GHOST_WHITE) } else { @@ -201,17 +190,20 @@ impl Component for Options { .alignment(Alignment::Right), ), ]), + Row::new(vec![Line::from(vec![])]), ], &[ Constraint::Length(18), - Constraint::Percentage(25), Constraint::Fill(1), + Constraint::Length((port_legend.len() + port_key.len()) as u16), ], ) .block(block1) .style(Style::default().fg(GHOST_WHITE)); // Beta Rewards Program + let beta_legend = " Edit Discord Username "; + let beta_key = " [Ctrl+B] "; let block2 = Block::default() .title(" Beta Rewards Program ") .title_style(Style::default().bold().fg(GHOST_WHITE)) @@ -219,50 +211,41 @@ impl Component for Options { .borders(Borders::ALL) .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let beta_rewards = Table::new( - vec![ - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from( - Line::from(vec![Span::styled( - " Discord Username: ", - Style::default().fg(LIGHT_PERIWINKLE), - )]) - .alignment(Alignment::Left), - ), - Cell::from( - Line::from(vec![Span::styled( - format!(" {} ", self.discord_username), - Style::default().fg(VIVID_SKY_BLUE), - )]) - .alignment(Alignment::Left), - ), - Cell::from( - Line::from(vec![ - Span::styled( - " Edit Discord Username ", - Style::default().fg(VERY_LIGHT_AZURE), - ), - Span::styled(" [Ctrl+B] ", Style::default().fg(GHOST_WHITE)), - ]) - .alignment(Alignment::Right), - ), - ]), - ], + vec![Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Discord Username: ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![Span::styled( + format!(" {} ", self.discord_username), + Style::default().fg(VIVID_SKY_BLUE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(beta_legend, Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled(beta_key, Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ])], &[ Constraint::Length(18), - Constraint::Percentage(25), Constraint::Fill(1), + Constraint::Length((beta_legend.len() + beta_key.len()) as u16), ], ) .block(block2) .style(Style::default().fg(GHOST_WHITE)); // Access Logs + let logs_legend = " Access Logs "; + let logs_key = " [Ctrl+L] "; let block3 = Block::default() .title(" Access Logs ") .title_style(Style::default().bold().fg(GHOST_WHITE)) @@ -270,35 +253,33 @@ impl Component for Options { .borders(Borders::ALL) .border_style(Style::default().fg(VERY_LIGHT_AZURE)); let logs_folder = Table::new( - vec![ - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from( - Line::from(vec![Span::styled( - " Open the Logs folder on this device ", - Style::default().fg(LIGHT_PERIWINKLE), - )]) - .alignment(Alignment::Left), - ), - Cell::from( - Line::from(vec![ - Span::styled(" Access Logs ", Style::default().fg(VERY_LIGHT_AZURE)), - Span::styled(" [Ctrl+L] ", Style::default().fg(GHOST_WHITE)), - ]) - .alignment(Alignment::Right), - ), - ]), + vec![Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Open the Logs folder on this device ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(logs_legend, Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled(logs_key, Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ])], + &[ + Constraint::Fill(1), + Constraint::Length((logs_legend.len() + logs_key.len()) as u16), ], - &[Constraint::Percentage(50), Constraint::Percentage(50)], ) .block(block3) .style(Style::default().fg(GHOST_WHITE)); // Reset All Nodes + let reset_legend = " Begin Reset "; + let reset_key = " [Ctrl+ R] "; let block4 = Block::default() .title(" Reset All Nodes ") .title_style(Style::default().bold().fg(GHOST_WHITE)) @@ -306,38 +287,68 @@ impl Component for Options { .borders(Borders::ALL) .border_style(Style::default().fg(EUCALYPTUS)); let reset_nodes = Table::new( - vec![ - Row::new(vec![ - Cell::from(Span::raw(" ")), // Empty row for padding - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from( - Line::from(vec![Span::styled( - " Remove and Reset all Nodes on this device ", - Style::default().fg(LIGHT_PERIWINKLE), - )]) - .alignment(Alignment::Left), - ), - Cell::from( - Line::from(vec![ - Span::styled(" Begin Reset ", Style::default().fg(EUCALYPTUS)), - Span::styled(" [Ctrl+R] ", Style::default().fg(GHOST_WHITE)), - ]) - .alignment(Alignment::Right), - ), - ]), + vec![Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Remove and Reset all Nodes on this device ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(reset_legend, Style::default().fg(EUCALYPTUS)), + Span::styled(reset_key, Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ])], + &[ + Constraint::Fill(1), + Constraint::Length((reset_legend.len() + reset_key.len()) as u16), ], - &[Constraint::Percentage(50), Constraint::Percentage(50)], ) .block(block4) .style(Style::default().fg(GHOST_WHITE)); + // Quit + let quit_legend = " Quit "; + let quit_key = " [Q] "; + let block5 = Block::default() + .style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(VIVID_SKY_BLUE)); + let quit = Table::new( + vec![Row::new(vec![ + Cell::from( + Line::from(vec![Span::styled( + " Close Launchpad (your nodes will keep running in the background) ", + Style::default().fg(LIGHT_PERIWINKLE), + )]) + .alignment(Alignment::Left), + ), + Cell::from( + Line::from(vec![ + Span::styled(quit_legend, Style::default().fg(VIVID_SKY_BLUE)), + Span::styled(quit_key, Style::default().fg(GHOST_WHITE)), + ]) + .alignment(Alignment::Right), + ), + ])], + &[ + Constraint::Fill(1), + Constraint::Length((quit_legend.len() + quit_key.len()) as u16), + ], + ) + .block(block5) + .style(Style::default().fg(GHOST_WHITE)); + // Render the tables in their respective sections f.render_widget(storage_drivename, layout[1]); f.render_widget(beta_rewards, layout[2]); f.render_widget(logs_folder, layout[3]); f.render_widget(reset_nodes, layout[4]); + f.render_widget(quit, layout[5]); Ok(()) } From d7a61279b2e7806ebec0036f0beb294d1d8cbc14 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 10:30:15 +0200 Subject: [PATCH 018/255] feat(launchpad): we hide change port hotkey when not in custom ports --- node-launchpad/src/components/options.rs | 26 ++++++++---------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 2be6af49d5..1cdd4cf9a6 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -169,24 +169,14 @@ impl Component for Options { .alignment(Alignment::Left), ), Cell::from( - Line::from(vec![ - Span::styled( - port_legend, - if self.connection_mode == ConnectionMode::CustomPorts { - Style::default().fg(VERY_LIGHT_AZURE) - } else { - Style::default().fg(COOL_GREY) - }, - ), - Span::styled( - port_key, - if self.connection_mode == ConnectionMode::CustomPorts { - Style::default().fg(GHOST_WHITE) - } else { - Style::default().fg(COOL_GREY) - }, - ), - ]) + Line::from(if self.connection_mode == ConnectionMode::CustomPorts { + vec![ + Span::styled(port_legend, Style::default().fg(VERY_LIGHT_AZURE)), + Span::styled(port_key, Style::default().fg(GHOST_WHITE)), + ] + } else { + vec![] + }) .alignment(Alignment::Right), ), ]), From 5bf3b79f0015649446af2766f9f80b46451e2da6 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 10:37:52 +0200 Subject: [PATCH 019/255] feat(launchpad): forward to status screen after changing options --- node-launchpad/src/components/popup/beta_programme.rs | 2 +- node-launchpad/src/components/popup/change_drive.rs | 2 +- node-launchpad/src/components/popup/connection_mode.rs | 2 +- node-launchpad/src/components/popup/port_range.rs | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 7dc70f7576..8940438eec 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -69,7 +69,7 @@ impl BetaProgramme { vec![ Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), - Action::SwitchScene(Scene::Options), + Action::SwitchScene(Scene::Status), ] } KeyCode::Esc => { diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index 8efeeaed88..ad58e21dcc 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -451,7 +451,7 @@ impl Component for ChangeDrivePopup { self.drive_selection.mountpoint.clone(), self.drive_selection.name.clone(), )), - Action::SwitchScene(Scene::Options), + Action::SwitchScene(Scene::Status), ] } Err(e) => { diff --git a/node-launchpad/src/components/popup/connection_mode.rs b/node-launchpad/src/components/popup/connection_mode.rs index 34440af4fb..0cff9bfbb3 100644 --- a/node-launchpad/src/components/popup/connection_mode.rs +++ b/node-launchpad/src/components/popup/connection_mode.rs @@ -138,7 +138,7 @@ impl Component for ChangeConnectionModePopUp { ), }) } else { - Action::SwitchScene(Scene::Options) + Action::SwitchScene(Scene::Status) }, ] } else { diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index dd6fa90b39..b3267969b2 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -393,9 +393,9 @@ impl Component for PortRangePopUp { } PortRangeState::ConfirmChange => match key.code { KeyCode::Enter => { - debug!("Got Enter, saving the ports and switching to Options Screen",); + debug!("Got Enter, saving the ports and switching to Status Screen",); self.state = PortRangeState::Selection; - vec![Action::SwitchScene(Scene::Options)] + vec![Action::SwitchScene(Scene::Status)] } _ => vec![], }, From 87a65df7054c6d0ee67eb4f0c408d10a00f2ebfa Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 11:49:32 +0200 Subject: [PATCH 020/255] fix(launchpad): default mountpoint not available on disks list --- node-launchpad/src/components/popup/change_drive.rs | 7 +++++++ node-launchpad/src/system.rs | 8 +++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index ad58e21dcc..a2cfd0e024 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -508,6 +508,13 @@ impl Component for ChangeDrivePopup { let _ = self.update_drive_items(); None } + Action::StoreStorageDrive(mountpoint, _drive_name) => { + self.storage_mountpoint = mountpoint; + let _ = self.update_drive_items(); + self.select_drive(); + None + } + _ => None, }; Ok(send_back) diff --git a/node-launchpad/src/system.rs b/node-launchpad/src/system.rs index 7d57ae91e6..d1691e0d80 100644 --- a/node-launchpad/src/system.rs +++ b/node-launchpad/src/system.rs @@ -67,6 +67,11 @@ pub fn get_list_of_available_drives_and_available_space( let disks = Disks::new_with_refreshed_list(); let mut drives: Vec<(String, PathBuf, u64, bool)> = Vec::new(); + let default_mountpoint = match get_default_mount_point() { + Ok((_name, mountpoint)) => mountpoint, + Err(_) => PathBuf::new(), + }; + for disk in disks.list() { let disk_info = ( disk.name() @@ -76,7 +81,8 @@ pub fn get_list_of_available_drives_and_available_space( .to_string(), disk.mount_point().to_path_buf(), disk.available_space(), - has_read_write_access(disk.mount_point().to_path_buf()), + has_read_write_access(disk.mount_point().to_path_buf()) + || default_mountpoint == disk.mount_point().to_path_buf(), ); // We avoid adding the same disk multiple times if it's mounted in multiple places From 5278e816706a1ec55a4d604e6b28698d5717e0bd Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 16 Sep 2024 17:48:28 +0200 Subject: [PATCH 021/255] feat(launchpad): flexible status layout --- node-launchpad/src/components/footer.rs | 31 ++++++----- node-launchpad/src/components/status.rs | 72 +++++++++++++++---------- 2 files changed, 59 insertions(+), 44 deletions(-) diff --git a/node-launchpad/src/components/footer.rs b/node-launchpad/src/components/footer.rs index ff5a8eb045..c1d74db1a1 100644 --- a/node-launchpad/src/components/footer.rs +++ b/node-launchpad/src/components/footer.rs @@ -12,6 +12,7 @@ use ratatui::{prelude::*, widgets::*}; pub enum NodesToStart { Configured, NotConfigured, + Running, } #[derive(Default)] @@ -33,23 +34,26 @@ impl StatefulWidget for Footer { ) }; - let command1 = vec![ + let commands = vec![ Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE)), Span::styled("Manage Nodes", Style::default().fg(EUCALYPTUS)), - ]; - let command2 = vec![ + Span::styled(" ", Style::default()), Span::styled("[Ctrl+S] ", command_style), Span::styled("Start Nodes", text_style), - ]; - let command3 = vec![ + Span::styled(" ", Style::default()), Span::styled("[Ctrl+X] ", command_style), - Span::styled("Stop Nodes", text_style), + Span::styled( + "Stop Nodes", + if matches!(state, NodesToStart::Running) { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(COOL_GREY) + }, + ), ]; - let cell1 = Cell::from(Line::from(command1)); - let cell2 = Cell::from(Line::from(command2)); - let cell3 = Cell::from(Line::from(command3)); - let row = Row::new(vec![cell1, cell2, cell3]); + let cell1 = Cell::from(Line::from(commands)); + let row = Row::new(vec![cell1]); let table = Table::new(vec![row], vec![Constraint::Max(1)]) .block( @@ -58,12 +62,7 @@ impl StatefulWidget for Footer { .border_style(Style::default().fg(EUCALYPTUS)) .padding(Padding::horizontal(1)), ) - .widths(vec![ - Constraint::Percentage(25), - Constraint::Percentage(25), - Constraint::Percentage(25), - Constraint::Percentage(25), - ]); + .widths(vec![Constraint::Fill(1)]); StatefulWidget::render(table, area, buf, &mut TableState::default()); } diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index bb40abbf66..0087575289 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -551,7 +551,18 @@ impl Component for Status { layout[1], ); } else { - // Device Status as a table + // Device Status as a block with two tables so we can shrink the screen + // and preserve as much as we can information + + let combined_block = Block::default() + .title(" Device Status ") + .bold() + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .padding(Padding::horizontal(1)) + .style(Style::default().fg(VERY_LIGHT_AZURE)); + + f.render_widget(combined_block.clone(), layout[1]); let storage_allocated_row = Row::new(vec![ Cell::new("Storage Allocated".to_string()).fg(GHOST_WHITE), @@ -584,11 +595,16 @@ impl Component for Status { let connection_mode_row = Row::new(vec![ Cell::new("Connection".to_string()).fg(GHOST_WHITE), - Cell::new(connection_mode_string).fg(GHOST_WHITE), + Cell::new(connection_mode_string).fg(LIGHT_PERIWINKLE), ]); - // Combine "Nanos Earned" and "Discord Username" into a single row - let discord_username_placeholder = "Discord Username: "; // Used to calculate the width of the username column + let stats_rows = vec![storage_allocated_row, memory_use_row, connection_mode_row]; + let stats_width = [Constraint::Length(5)]; + let column_constraints = [Constraint::Length(23), Constraint::Fill(1)]; + let stats_table = Table::new(stats_rows, stats_width).widths(column_constraints); + + // Combine "Nanos Earned" and "Username" into a single row + let discord_username_placeholder = "Username: "; // Used to calculate the width of the username column let discord_username_title = Span::styled( discord_username_placeholder, Style::default().fg(VIVID_SKY_BLUE), @@ -618,13 +634,8 @@ impl Component for Status { ), ]); - let stats_rows = vec![ - storage_allocated_row, - memory_use_row, - connection_mode_row, - total_nanos_earned_and_discord_row, - ]; - let stats_width = [Constraint::Length(5)]; + let nanos_discord_rows = vec![total_nanos_earned_and_discord_row]; + let nanos_discord_width = [Constraint::Length(5)]; let column_constraints = [ Constraint::Length(23), Constraint::Fill(1), @@ -632,18 +643,19 @@ impl Component for Status { (discord_username_placeholder.len() + self.discord_username.len()) as u16, ), ]; - let stats_table = Table::new(stats_rows, stats_width) - .block( - Block::default() - .title(" Device Status ") - .bold() - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .padding(Padding::horizontal(1)) - .style(Style::default().fg(VERY_LIGHT_AZURE)), - ) - .widths(column_constraints); - f.render_widget(stats_table, layout[1]); + let nanos_discord_table = + Table::new(nanos_discord_rows, nanos_discord_width).widths(column_constraints); + + let inner_area = combined_block.inner(layout[1]); + let device_layout = Layout::new( + Direction::Vertical, + vec![Constraint::Length(5), Constraint::Length(1)], + ) + .split(inner_area); + + // Render both tables inside the combined block + f.render_widget(stats_table, device_layout[0]); + f.render_widget(nanos_discord_table, device_layout[1]); }; // ==== Node Status ===== @@ -705,10 +717,10 @@ impl Component for Status { ); } else { let node_widths = [ - Constraint::Max(15), - Constraint::Min(40), - Constraint::Max(10), - Constraint::Max(10), + Constraint::Length(11), + Constraint::Fill(1), + Constraint::Length(9), + Constraint::Length(8), ]; let table = Table::new(node_rows.clone(), node_widths) .column_spacing(2) @@ -735,7 +747,11 @@ impl Component for Status { let footer = Footer::default(); let footer_state = if !node_rows.is_empty() { - &mut NodesToStart::Configured + if !self.get_running_nodes().is_empty() { + &mut NodesToStart::Running + } else { + &mut NodesToStart::Configured + } } else { &mut NodesToStart::NotConfigured }; From 4bc247b742a6a7d5a28872bef4610fd261906199 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 16:01:34 +0200 Subject: [PATCH 022/255] fix(launchpad): beta programm can be opened from status --- node-launchpad/.config/config.json5 | 3 +++ node-launchpad/src/action.rs | 1 + node-launchpad/src/components/options.rs | 4 ++-- .../src/components/popup/beta_programme.rs | 15 +++++++++++---- node-launchpad/src/components/status.rs | 5 ++++- node-launchpad/src/mode.rs | 3 ++- 6 files changed, 23 insertions(+), 8 deletions(-) diff --git a/node-launchpad/.config/config.json5 b/node-launchpad/.config/config.json5 index 58db17d7bb..049f9de82b 100644 --- a/node-launchpad/.config/config.json5 +++ b/node-launchpad/.config/config.json5 @@ -14,6 +14,9 @@ "": {"StatusActions":"StopNodes"}, "": {"StatusActions":"StopNodes"}, "": {"StatusActions":"StopNodes"}, + "": {"StatusActions":"TriggerBetaProgramme"}, + "": {"StatusActions":"TriggerBetaProgramme"}, + "": {"StatusActions":"TriggerBetaProgramme"}, "up" : {"StatusActions":"PreviousTableItem"}, "down": {"StatusActions":"NextTableItem"}, diff --git a/node-launchpad/src/action.rs b/node-launchpad/src/action.rs index 19b8bc7125..37a0226323 100644 --- a/node-launchpad/src/action.rs +++ b/node-launchpad/src/action.rs @@ -58,6 +58,7 @@ pub enum StatusActions { NodesStatsObtained(NodeStats), TriggerManageNodes, + TriggerBetaProgramme, PreviousTableItem, NextTableItem, diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 1cdd4cf9a6..49ea3c89b0 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -350,7 +350,7 @@ impl Component for Options { | Scene::ChangeDrivePopUp | Scene::ChangeConnectionModePopUp | Scene::ChangePortsPopUp { .. } - | Scene::BetaProgrammePopUp + | Scene::OptionsBetaProgrammePopUp | Scene::ResetNodesPopUp => { self.active = true; // make sure we're in navigation mode @@ -382,7 +382,7 @@ impl Component for Options { self.port_to = Some(to); } OptionsActions::TriggerBetaProgramme => { - return Ok(Some(Action::SwitchScene(Scene::BetaProgrammePopUp))); + return Ok(Some(Action::SwitchScene(Scene::OptionsBetaProgrammePopUp))); } OptionsActions::UpdateBetaProgrammeUsername(username) => { self.discord_username = username; diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 8940438eec..de3a4b3dcb 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -29,6 +29,7 @@ pub struct BetaProgramme { discord_input_filed: Input, // cache the old value incase user presses Esc. old_value: String, + back_to: Scene, } enum BetaProgrammeState { @@ -50,6 +51,7 @@ impl BetaProgramme { state, discord_input_filed: Input::default().with_value(username), old_value: Default::default(), + back_to: Scene::Status, } } @@ -69,7 +71,7 @@ impl BetaProgramme { vec![ Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), - Action::SwitchScene(Scene::Status), + Action::SwitchScene(self.back_to), ] } KeyCode::Esc => { @@ -82,7 +84,7 @@ impl BetaProgramme { .discord_input_filed .clone() .with_value(self.old_value.clone()); - vec![Action::SwitchScene(Scene::Options)] + vec![Action::SwitchScene(self.back_to)] } KeyCode::Char(' ') => vec![], KeyCode::Backspace => { @@ -135,7 +137,7 @@ impl Component for BetaProgramme { debug!("RejectTCs msg closed. Switching to Status scene."); self.state = BetaProgrammeState::ShowTCs; } - vec![Action::SwitchScene(Scene::Status)] + vec![Action::SwitchScene(self.back_to)] } BetaProgrammeState::AcceptTCsAndEnterDiscordId => self.capture_inputs(key), }; @@ -145,9 +147,14 @@ impl Component for BetaProgramme { fn update(&mut self, action: Action) -> Result> { let send_back = match action { Action::SwitchScene(scene) => match scene { - Scene::BetaProgrammePopUp => { + Scene::StatusBetaProgrammePopUp | Scene::OptionsBetaProgrammePopUp => { self.active = true; self.old_value = self.discord_input_filed.value().to_string(); + if scene == Scene::StatusBetaProgrammePopUp { + self.back_to = Scene::Status; + } else if scene == Scene::OptionsBetaProgrammePopUp { + self.back_to = Scene::Options; + } // Set to InputMode::Entry as we want to handle everything within our handle_key_events // so by default if this scene is active, we capture inputs. Some(Action::SwitchInputMode(InputMode::Entry)) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index bb40abbf66..c0b516b00d 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -274,7 +274,7 @@ impl Component for Status { self.try_update_node_stats(false)?; } Action::SwitchScene(scene) => match scene { - Scene::Status => { + Scene::Status | Scene::StatusBetaProgrammePopUp => { self.active = true; // make sure we're in navigation mode return Ok(Some(Action::SwitchInputMode(InputMode::Navigation))); @@ -478,6 +478,9 @@ impl Component for Status { stop_nodes(running_nodes, action_sender); } + StatusActions::TriggerBetaProgramme => { + return Ok(Some(Action::SwitchScene(Scene::StatusBetaProgrammePopUp))); + } }, Action::OptionsActions(OptionsActions::ResetNodes) => { debug!("Got action to reset nodes"); diff --git a/node-launchpad/src/mode.rs b/node-launchpad/src/mode.rs index 2f0d356599..3f4871302e 100644 --- a/node-launchpad/src/mode.rs +++ b/node-launchpad/src/mode.rs @@ -21,7 +21,8 @@ pub enum Scene { ChangePortsPopUp { connection_mode_old_value: Option, }, - BetaProgrammePopUp, + StatusBetaProgrammePopUp, + OptionsBetaProgrammePopUp, ManageNodesPopUp, ResetNodesPopUp, } From 2f5587fcecc22b77cc9f720de4249f808acd470b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 12 Sep 2024 16:03:14 +0200 Subject: [PATCH 023/255] fix(launchpad): after setting up the name forward to status --- node-launchpad/src/components/popup/beta_programme.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index de3a4b3dcb..ab1e9ee739 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -71,7 +71,7 @@ impl BetaProgramme { vec![ Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), - Action::SwitchScene(self.back_to), + Action::SwitchScene(Scene::Status), ] } KeyCode::Esc => { From 1287ef23687004e4e948173d9aca007f6f836067 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 16 Sep 2024 09:25:31 +0200 Subject: [PATCH 024/255] fix(launchpad): remove underline color to avoid flickering --- node-launchpad/src/components/popup/beta_programme.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index ab1e9ee739..f6a4f8463f 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -407,11 +407,7 @@ impl Component for BetaProgramme { ); let input = Paragraph::new(Span::styled( format!("{}{} ", spaces, self.discord_input_filed.value()), - Style::default() - .fg(VIVID_SKY_BLUE) - .bg(INDIGO) - .underlined() - .underline_color(VIVID_SKY_BLUE), + Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); From a36130ab5bbd8b39211937bca9f0a468aa098969 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 16 Sep 2024 10:48:51 +0200 Subject: [PATCH 025/255] fix(launchpad): discord username can be empty --- .../src/components/popup/beta_programme.rs | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index f6a4f8463f..8f0a547fe9 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -60,10 +60,6 @@ impl BetaProgramme { KeyCode::Enter => { let username = self.discord_input_filed.value().to_string(); - if username.is_empty() { - debug!("Got Enter, but username is empty, ignoring."); - return vec![]; - } debug!( "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", ); @@ -268,14 +264,10 @@ impl Component for BetaProgramme { )]); f.render_widget(button_no, buttons_layer[0]); - let button_yes_style = if self.discord_input_filed.value().is_empty() { - Style::default().fg(LIGHT_PERIWINKLE) - } else { - Style::default().fg(EUCALYPTUS) - }; + let button_yes = Line::from(vec![Span::styled( "Save Username [Enter]", - button_yes_style, + Style::default().fg(EUCALYPTUS), )]); f.render_widget(button_yes, buttons_layer[1]); } @@ -443,15 +435,10 @@ impl Component for BetaProgramme { " No, Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); - let button_yes_style = if self.discord_input_filed.value().is_empty() { - Style::default().fg(LIGHT_PERIWINKLE) - } else { - Style::default().fg(EUCALYPTUS) - }; f.render_widget(button_no, buttons_layer[0]); let button_yes = Line::from(vec![Span::styled( "Submit Username [Enter]", - button_yes_style, + Style::default().fg(EUCALYPTUS), )]); f.render_widget(button_yes, buttons_layer[1]); } From 035a151715fc9cf649921fe4a8b4488c1603221c Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 17 Sep 2024 10:47:18 +0200 Subject: [PATCH 026/255] test(autonomi): connect to peers from env --- Cargo.lock | 1 + autonomi/Cargo.toml | 1 + autonomi/tests/common/mod.rs | 13 +++++++++++++ autonomi/tests/file.rs | 2 +- autonomi/tests/put.rs | 4 +++- autonomi/tests/register.rs | 4 +++- sn_peers_acquisition/src/error.rs | 2 +- sn_peers_acquisition/src/lib.rs | 9 ++------- 8 files changed, 25 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09d780b39c..297edfbcc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -443,6 +443,7 @@ dependencies = [ "self_encryption", "serde", "sn_client", + "sn_peers_acquisition", "sn_protocol", "sn_registers", "sn_transfers", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index ff395c3661..96834b3035 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -23,6 +23,7 @@ rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } sn_client = { path = "../sn_client" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.0" } sn_protocol = { version = "0.17.5", path = "../sn_protocol" } sn_registers = { path = "../sn_registers", version = "0.3.18" } sn_transfers = { path = "../sn_transfers", version = "0.19.0" } diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs index 010543b566..c01ae23187 100644 --- a/autonomi/tests/common/mod.rs +++ b/autonomi/tests/common/mod.rs @@ -1,8 +1,10 @@ #![allow(dead_code)] use bytes::Bytes; +use libp2p::Multiaddr; use rand::Rng; use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; +use sn_peers_acquisition::parse_peer_addr; use sn_transfers::{get_faucet_data_dir, HotWallet}; /// When launching a testnet locally, we can use the faucet wallet. @@ -24,3 +26,14 @@ pub fn enable_logging() { .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) .try_init(); } + +/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. +/// +/// An empty `Vec` will be returned if the env var is not set. +pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { + let Ok(peers_str) = std::env::var("SAFE_PEERS") else { + return Ok(vec![]); + }; + + peers_str.split(',').map(parse_peer_addr).collect() +} diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 0e221ae6c7..63c88172f6 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -10,7 +10,7 @@ mod common; async fn file() -> Result<(), Box> { common::enable_logging(); - let mut client = Client::connect(&[]).await?; + let mut client = Client::connect(&common::peers_from_env()?).await?; let mut wallet = common::load_hot_wallet_from_faucet(); // let data = common::gen_random_data(1024 * 1024 * 1000); diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index 5c2111f36b..49eb263130 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -10,7 +10,9 @@ mod common; async fn put() { common::enable_logging(); - let mut client = Client::connect(&[]).await.unwrap(); + let mut client = Client::connect(&common::peers_from_env().unwrap()) + .await + .unwrap(); let mut wallet = common::load_hot_wallet_from_faucet(); let data = common::gen_random_data(1024 * 1024 * 10); diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 79cd00368d..c51873b966 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -12,7 +12,9 @@ mod common; async fn register() { common::enable_logging(); - let mut client = Client::connect(&[]).await.unwrap(); + let mut client = Client::connect(&common::peers_from_env().unwrap()) + .await + .unwrap(); let mut wallet = common::load_hot_wallet_from_faucet(); // Owner key of the register. diff --git a/sn_peers_acquisition/src/error.rs b/sn_peers_acquisition/src/error.rs index 1e9f4b7da2..d5df7c969b 100644 --- a/sn_peers_acquisition/src/error.rs +++ b/sn_peers_acquisition/src/error.rs @@ -5,7 +5,7 @@ pub type Result = std::result::Result; #[derive(Debug, Error)] pub enum Error { #[error("Could not parse the supplied multiaddr or socket address")] - InvalidPeerAddr, + InvalidPeerAddr(#[from] libp2p::multiaddr::Error), #[error("Could not obtain network contacts from {0} after {1} retries")] FailedToObtainPeersFromUrl(String, usize), #[error("No valid multaddr was present in the contacts file at {0}")] diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 10a7b1a775..db467d6249 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -159,7 +159,7 @@ impl PeersArgs { } /// Parse strings like `1.2.3.4:1234` and `/ip4/1.2.3.4/tcp/1234` into a multiaddr. -pub fn parse_peer_addr(addr: &str) -> Result { +pub fn parse_peer_addr(addr: &str) -> std::result::Result { // Parse valid IPv4 socket address, e.g. `1.2.3.4:1234`. if let Ok(addr) = addr.parse::() { let start_addr = Multiaddr::from(*addr.ip()); @@ -180,12 +180,7 @@ pub fn parse_peer_addr(addr: &str) -> Result { } // Parse any valid multiaddr string - if let Ok(addr) = addr.parse::() { - debug!("Parsing a full multiaddr: {:?}", addr); - return Ok(addr); - } - - Err(Error::InvalidPeerAddr) + addr.parse::() } /// Get and parse a list of peers from a URL. The URL should contain one multiaddr per line. From 52c6a35696465be0c372f5d6ee5ca8d68c70f881 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 17 Sep 2024 12:18:16 +0200 Subject: [PATCH 027/255] fix(launchpad): some error handling when sn node manager fails --- node-launchpad/src/components/options.rs | 6 ++-- node-launchpad/src/node_mgmt.rs | 38 ++++++++++++++++++++---- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 1cdd4cf9a6..36a3f666e9 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -269,7 +269,7 @@ impl Component for Options { // Reset All Nodes let reset_legend = " Begin Reset "; - let reset_key = " [Ctrl+ R] "; + let reset_key = " [Ctrl+R] "; let block4 = Block::default() .title(" Reset All Nodes ") .title_style(Style::default().bold().fg(GHOST_WHITE)) @@ -302,8 +302,8 @@ impl Component for Options { .style(Style::default().fg(GHOST_WHITE)); // Quit - let quit_legend = " Quit "; - let quit_key = " [Q] "; + let quit_legend = "Quit "; + let quit_key = "[Q] "; let block5 = Block::default() .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 88c48dd566..bc628d5635 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -17,7 +17,7 @@ use sn_releases::{self, ReleaseType, SafeReleaseRepoActions}; pub const PORT_MAX: u32 = 65535; pub const PORT_MIN: u32 = 1024; -const PORT_ASSIGNMENT_MAX_RETRIES: u32 = 5; +const NODE_ADD_MAX_RETRIES: u32 = 5; /// Stop the specified services pub fn stop_nodes(services: Vec, action_sender: UnboundedSender) { @@ -331,7 +331,7 @@ async fn add_nodes( ) { let mut retry_count = 0; - while nodes_to_add > 0 && retry_count < PORT_ASSIGNMENT_MAX_RETRIES { + while nodes_to_add > 0 && retry_count < NODE_ADD_MAX_RETRIES { // Find the next available port while used_ports.contains(current_port) && *current_port <= max_port { *current_port += 1; @@ -395,16 +395,42 @@ async fn add_nodes( "Port {} is being used, retrying with a different port. Attempt {}/{}", current_port, retry_count + 1, - PORT_ASSIGNMENT_MAX_RETRIES + NODE_ADD_MAX_RETRIES ); - *current_port += 1; - retry_count += 1; + } else if err + .to_string() + .contains("Failed to add one or more services") + && retry_count >= NODE_ADD_MAX_RETRIES + { + if let Err(err) = action_sender.send(Action::StatusActions( + StatusActions::ErrorScalingUpNodes { + raw_error: "When trying to add a node, we failed.\n\n\ + Maybe you ran out of disk space?\n\n\ + Maybe you need to change the port range\n\n" + .to_string(), + }, + )) { + error!("Error while sending action: {err:?}"); + } } else { error!("Range of ports to be used {:?}", *current_port..max_port); error!("Error while adding node on port {}: {err:?}", current_port); - retry_count += 1; } + // In case of error, we increase the port and the retry count + *current_port += 1; + retry_count += 1; } } } + if retry_count >= NODE_ADD_MAX_RETRIES { + if let Err(err) = action_sender.send(Action::StatusActions(StatusActions::ErrorScalingUpNodes { + raw_error: format!( + "When trying to assign an available port to run a node, we reached the maximum amount of retries ({}).", + NODE_ADD_MAX_RETRIES + ), + })) + { + error!("Error while sending action: {err:?}"); + } + } } From 3b1aaa8edd685f503fda69abf4baf76ae8c12855 Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 17 Sep 2024 20:53:48 +0800 Subject: [PATCH 028/255] feat(client): carry out quote self validation --- sn_networking/src/lib.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index a877b206f4..cab6f9f650 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -366,7 +366,7 @@ impl Network { let mut all_costs = vec![]; let mut all_quotes = vec![]; for response in responses.into_values().flatten() { - debug!( + info!( "StoreCostReq for {record_address:?} received response: {:?}", response ); @@ -376,6 +376,14 @@ impl Network { payment_address, peer_address, }) => { + // Check the quote itself is valid. + if quote.cost.as_nano() + != calculate_cost_for_records(quote.quoting_metrics.close_records_stored) + { + warn!("Received invalid quote from {peer_address:?}, {quote:?}"); + continue; + } + all_costs.push((peer_address.clone(), payment_address, quote.clone())); all_quotes.push((peer_address, quote)); } From caca69007c3a58fa9409263a00d59b7efb03d822 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 17 Sep 2024 13:17:54 +0200 Subject: [PATCH 029/255] fix(launchpad): avoid overflow when computing needed space --- .../src/components/popup/change_drive.rs | 3 ++- node-launchpad/src/node_mgmt.rs | 15 ++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/node-launchpad/src/components/popup/change_drive.rs b/node-launchpad/src/components/popup/change_drive.rs index a2cfd0e024..c73dbffee7 100644 --- a/node-launchpad/src/components/popup/change_drive.rs +++ b/node-launchpad/src/components/popup/change_drive.rs @@ -128,7 +128,8 @@ impl ChangeDrivePopup { .iter() .map(|(drive_name, mountpoint, space, available)| { let size_str = format!("{:.2} GB", *space as f64 / 1e9); - let has_enough_space = *space >= (GB_PER_NODE * GB * self.nodes_to_start) as u64; + let has_enough_space = *space as u128 + >= (GB_PER_NODE as u128 * GB as u128 * self.nodes_to_start as u128); DriveItem { name: drive_name.to_string(), mountpoint: mountpoint.clone(), diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index bc628d5635..fea2eeff0b 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -406,7 +406,7 @@ async fn add_nodes( StatusActions::ErrorScalingUpNodes { raw_error: "When trying to add a node, we failed.\n\n\ Maybe you ran out of disk space?\n\n\ - Maybe you need to change the port range\n\n" + Maybe you need to change the port range?\n\n" .to_string(), }, )) { @@ -423,12 +423,13 @@ async fn add_nodes( } } if retry_count >= NODE_ADD_MAX_RETRIES { - if let Err(err) = action_sender.send(Action::StatusActions(StatusActions::ErrorScalingUpNodes { - raw_error: format!( - "When trying to assign an available port to run a node, we reached the maximum amount of retries ({}).", - NODE_ADD_MAX_RETRIES - ), - })) + if let Err(err) = + action_sender.send(Action::StatusActions(StatusActions::ErrorScalingUpNodes { + raw_error: format!( + "When trying run a node, we reached the maximum amount of retries ({}).", + NODE_ADD_MAX_RETRIES + ), + })) { error!("Error while sending action: {err:?}"); } From 4abf02c000942fd365e1e57a54f4d8ede61d023f Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 17 Sep 2024 16:29:07 +0200 Subject: [PATCH 030/255] fix(launchpad): spacing on error titles --- node-launchpad/src/components/status.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index bb40abbf66..dfc0512240 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -367,7 +367,7 @@ impl Component for Status { StatusActions::ErrorLoadingNodeRegistry { raw_error } | StatusActions::ErrorGettingNodeRegistryPath { raw_error } => { self.error_popup = Some(ErrorPopup::new( - " Error ".to_string(), + "Error".to_string(), "Error getting node registry path".to_string(), raw_error, )); @@ -379,7 +379,7 @@ impl Component for Status { } StatusActions::ErrorScalingUpNodes { raw_error } => { self.error_popup = Some(ErrorPopup::new( - " Error ".to_string(), + "Erro ".to_string(), "Error adding new nodes".to_string(), raw_error, )); @@ -391,7 +391,7 @@ impl Component for Status { } StatusActions::ErrorStoppingNodes { raw_error } => { self.error_popup = Some(ErrorPopup::new( - " Error ".to_string(), + "Error".to_string(), "Error stopping nodes".to_string(), raw_error, )); @@ -403,7 +403,7 @@ impl Component for Status { } StatusActions::ErrorResettingNodes { raw_error } => { self.error_popup = Some(ErrorPopup::new( - " Error ".to_string(), + "Error".to_string(), "Error resetting nodes".to_string(), raw_error, )); From 99f23e84d6724cc9f0bf0441175aae2b3b6dfa29 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 17 Sep 2024 17:12:19 +0200 Subject: [PATCH 031/255] feat(launchpad): discord name set or not set in device status --- node-launchpad/src/components/status.rs | 230 +++++++++++------------- 1 file changed, 104 insertions(+), 126 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 0087575289..9d5efe48a0 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -524,140 +524,118 @@ impl Component for Status { // ==== Device Status ===== - if self.discord_username.is_empty() { - let line1 = Line::from(vec![Span::styled( - "Add this device to the Beta Rewards Program", - Style::default().fg(VERY_LIGHT_AZURE), - )]); - let line2 = Line::from(vec![ - Span::styled("Press ", Style::default().fg(VERY_LIGHT_AZURE)), - Span::styled("[Ctrl+B]", Style::default().fg(GHOST_WHITE).bold()), - Span::styled(" to add your ", Style::default().fg(VERY_LIGHT_AZURE)), - Span::styled( - "Discord Username", - Style::default().fg(VERY_LIGHT_AZURE).bold(), - ), - ]); - f.render_widget( - Paragraph::new(vec![Line::raw(""), Line::raw(""), line1, line2]).block( - Block::default() - .title(" Device Status ") - .bold() - .title_style(Style::new().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .padding(Padding::horizontal(1)) - .border_style(Style::new().fg(VERY_LIGHT_AZURE)), - ), - layout[1], - ); + // Device Status as a block with two tables so we can shrink the screen + // and preserve as much as we can information + + let combined_block = Block::default() + .title(" Device Status ") + .bold() + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .padding(Padding::horizontal(1)) + .style(Style::default().fg(VERY_LIGHT_AZURE)); + + f.render_widget(combined_block.clone(), layout[1]); + + let storage_allocated_row = Row::new(vec![ + Cell::new("Storage Allocated".to_string()).fg(GHOST_WHITE), + Cell::new(format!("{} GB", self.nodes_to_start * GB_PER_NODE)).fg(GHOST_WHITE), + ]); + let memory_use_val = if self.node_stats.memory_usage_mb as f64 / 1024_f64 > 1.0 { + format!( + "{:.2} GB", + self.node_stats.memory_usage_mb as f64 / 1024_f64 + ) } else { - // Device Status as a block with two tables so we can shrink the screen - // and preserve as much as we can information - - let combined_block = Block::default() - .title(" Device Status ") - .bold() - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .padding(Padding::horizontal(1)) - .style(Style::default().fg(VERY_LIGHT_AZURE)); - - f.render_widget(combined_block.clone(), layout[1]); - - let storage_allocated_row = Row::new(vec![ - Cell::new("Storage Allocated".to_string()).fg(GHOST_WHITE), - Cell::new(format!("{} GB", self.nodes_to_start * GB_PER_NODE)).fg(GHOST_WHITE), - ]); - let memory_use_val = if self.node_stats.memory_usage_mb as f64 / 1024_f64 > 1.0 { - format!( - "{:.2} GB", - self.node_stats.memory_usage_mb as f64 / 1024_f64 - ) - } else { - format!("{} MB", self.node_stats.memory_usage_mb) - }; - - let memory_use_row = Row::new(vec![ - Cell::new("Memory Use".to_string()).fg(GHOST_WHITE), - Cell::new(memory_use_val).fg(GHOST_WHITE), - ]); - - let connection_mode_string = match self.connection_mode { - ConnectionMode::HomeNetwork => "Home Network", - ConnectionMode::UPnP => "UPnP", - ConnectionMode::CustomPorts => &format!( - "Custom Ports {}-{}", - self.port_from.unwrap_or(PORT_MIN), - self.port_to.unwrap_or(PORT_MIN + PORT_ALLOCATION) - ), - ConnectionMode::Automatic => "Automatic", - }; + format!("{} MB", self.node_stats.memory_usage_mb) + }; - let connection_mode_row = Row::new(vec![ - Cell::new("Connection".to_string()).fg(GHOST_WHITE), - Cell::new(connection_mode_string).fg(LIGHT_PERIWINKLE), - ]); + let memory_use_row = Row::new(vec![ + Cell::new("Memory Use".to_string()).fg(GHOST_WHITE), + Cell::new(memory_use_val).fg(GHOST_WHITE), + ]); + + let connection_mode_string = match self.connection_mode { + ConnectionMode::HomeNetwork => "Home Network", + ConnectionMode::UPnP => "UPnP", + ConnectionMode::CustomPorts => &format!( + "Custom Ports {}-{}", + self.port_from.unwrap_or(PORT_MIN), + self.port_to.unwrap_or(PORT_MIN + PORT_ALLOCATION) + ), + ConnectionMode::Automatic => "Automatic", + }; - let stats_rows = vec![storage_allocated_row, memory_use_row, connection_mode_row]; - let stats_width = [Constraint::Length(5)]; - let column_constraints = [Constraint::Length(23), Constraint::Fill(1)]; - let stats_table = Table::new(stats_rows, stats_width).widths(column_constraints); + let connection_mode_row = Row::new(vec![ + Cell::new("Connection".to_string()).fg(GHOST_WHITE), + Cell::new(connection_mode_string).fg(LIGHT_PERIWINKLE), + ]); + + let stats_rows = vec![storage_allocated_row, memory_use_row, connection_mode_row]; + let stats_width = [Constraint::Length(5)]; + let column_constraints = [Constraint::Length(23), Constraint::Fill(1)]; + let stats_table = Table::new(stats_rows, stats_width).widths(column_constraints); + + // Combine "Nanos Earned" and "Username" into a single row + let discord_username_placeholder = "Username: "; // Used to calculate the width of the username column + let discord_username_no_username = "[Ctrl+B] to set"; + let discord_username_title = Span::styled( + discord_username_placeholder, + Style::default().fg(VIVID_SKY_BLUE), + ); - // Combine "Nanos Earned" and "Username" into a single row - let discord_username_placeholder = "Username: "; // Used to calculate the width of the username column - let discord_username_title = Span::styled( - discord_username_placeholder, + let discord_username = if !self.discord_username.is_empty() { + Span::styled( + self.discord_username.clone(), Style::default().fg(VIVID_SKY_BLUE), - ); - - let discord_username = if !self.discord_username.is_empty() { - Span::styled( - self.discord_username.clone(), - Style::default().fg(VIVID_SKY_BLUE), - ) - .bold() - } else { - Span::styled( - "[Ctrl+B] to set".to_string(), - Style::default().fg(GHOST_WHITE), - ) - }; - - let total_nanos_earned_and_discord_row = Row::new(vec![ - Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), - Cell::new(self.node_stats.forwarded_rewards.to_string()) - .fg(VIVID_SKY_BLUE) - .bold(), - Cell::new( - Line::from(vec![discord_username_title, discord_username]) - .alignment(Alignment::Right), - ), - ]); - - let nanos_discord_rows = vec![total_nanos_earned_and_discord_row]; - let nanos_discord_width = [Constraint::Length(5)]; - let column_constraints = [ - Constraint::Length(23), - Constraint::Fill(1), - Constraint::Length( - (discord_username_placeholder.len() + self.discord_username.len()) as u16, - ), - ]; - let nanos_discord_table = - Table::new(nanos_discord_rows, nanos_discord_width).widths(column_constraints); - - let inner_area = combined_block.inner(layout[1]); - let device_layout = Layout::new( - Direction::Vertical, - vec![Constraint::Length(5), Constraint::Length(1)], ) - .split(inner_area); - - // Render both tables inside the combined block - f.render_widget(stats_table, device_layout[0]); - f.render_widget(nanos_discord_table, device_layout[1]); + .bold() + } else { + Span::styled( + discord_username_no_username, + Style::default().fg(GHOST_WHITE), + ) }; + let total_nanos_earned_and_discord_row = Row::new(vec![ + Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), + Cell::new(self.node_stats.forwarded_rewards.to_string()) + .fg(VIVID_SKY_BLUE) + .bold(), + Cell::new( + Line::from(vec![discord_username_title, discord_username]) + .alignment(Alignment::Right), + ), + ]); + + let nanos_discord_rows = vec![total_nanos_earned_and_discord_row]; + let nanos_discord_width = [Constraint::Length(5)]; + let column_constraints = [ + Constraint::Length(23), + Constraint::Fill(1), + Constraint::Length( + discord_username_placeholder.len() as u16 + + if !self.discord_username.is_empty() { + self.discord_username.len() as u16 + } else { + discord_username_no_username.len() as u16 + }, + ), + ]; + let nanos_discord_table = + Table::new(nanos_discord_rows, nanos_discord_width).widths(column_constraints); + + let inner_area = combined_block.inner(layout[1]); + let device_layout = Layout::new( + Direction::Vertical, + vec![Constraint::Length(5), Constraint::Length(1)], + ) + .split(inner_area); + + // Render both tables inside the combined block + f.render_widget(stats_table, device_layout[0]); + f.render_widget(nanos_discord_table, device_layout[1]); + // ==== Node Status ===== let node_rows: Vec<_> = self From 3e793aa857f934efa3a76136a9ce2593abb73e70 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 2 Sep 2024 13:09:56 +0900 Subject: [PATCH 032/255] feat(aut): add basic vault api for storing data to Scratchpad --- Cargo.lock | 1 + autonomi/Cargo.toml | 4 +- autonomi/src/client/data.rs | 2 + autonomi/src/client/files.rs | 15 ++- autonomi/src/client/mod.rs | 12 +- autonomi/src/client/vault.rs | 181 ++++++++++++++++++++++++++ autonomi/tests/file.rs | 44 +++++++ sn_networking/src/record_store.rs | 35 ++--- sn_node/src/put_validation.rs | 2 +- sn_protocol/src/error.rs | 3 + sn_protocol/src/lib.rs | 5 + sn_protocol/src/storage/scratchpad.rs | 76 ++++++++--- 12 files changed, 342 insertions(+), 38 deletions(-) create mode 100644 autonomi/src/client/vault.rs diff --git a/Cargo.lock b/Cargo.lock index 297edfbcc6..de693647e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -437,6 +437,7 @@ dependencies = [ "bip39", "blsttc", "bytes", + "eyre", "libp2p", "rand 0.8.5", "rmp-serde", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 96834b3035..065a52f9f0 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -5,8 +5,9 @@ edition = "2021" [features] default = ["data"] -full = ["data", "files", "fs", "registers", "transfers"] +full = ["data", "files", "fs", "registers", "transfers", "vault"] data = ["transfers"] +vault = ["data"] files = ["transfers", "data"] fs = [] local = ["sn_client/local-discovery"] @@ -34,6 +35,7 @@ walkdir = "2.5.0" xor_name = "5.0.0" [dev-dependencies] +eyre = "0.6.5" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 0fca7f398a..8f54c35387 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -32,6 +32,8 @@ pub enum PutError { SelfEncryption(#[from] crate::self_encryption::Error), #[error("Error serializing data.")] Serialization, + #[error("Error getting Vault XorName data.")] + VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), #[error("A wallet error occurred.")] diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 21fd2d8d65..457eba8fa5 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -64,9 +64,13 @@ impl Client { } let root = Root { map }; - let root_serialized = rmp_serde::to_vec(&root)?; + let root_serialized = Bytes::from(rmp_serde::to_vec(&root)?); - let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; + #[cfg(feature = "vault")] + self.write_bytes_to_vault_if_defined(root_serialized.clone(), wallet) + .await?; + + let xor_name = self.put(root_serialized, wallet).await?; Ok((root, xor_name)) } @@ -74,7 +78,12 @@ impl Client { /// Fetch a directory from the network. pub async fn fetch_root(&mut self, address: XorName) -> Result { let data = self.get(address).await?; - let root: Root = rmp_serde::from_slice(&data[..])?; + + Self::deserialise_root(data) + } + + pub fn deserialise_root(data: Bytes) -> Result { + let root: Root = rmp_serde::from_slice(&data[..]).expect("TODO"); Ok(root) } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index d8e80620b0..b50d7e7ce8 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -1,5 +1,7 @@ use std::{collections::HashSet, time::Duration}; +#[cfg(feature = "vault")] +use bls::SecretKey; use libp2p::{identity::Keypair, Multiaddr}; use sn_client::networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; @@ -17,6 +19,8 @@ mod registers; #[cfg(feature = "transfers")] #[cfg_attr(docsrs, doc(cfg(feature = "transfers")))] mod transfers; +#[cfg(feature = "vault")] +mod vault; /// Time before considering the connection timed out. pub const CONNECT_TIMEOUT_SECS: u64 = 20; @@ -39,6 +43,8 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; #[derive(Clone)] pub struct Client { pub(crate) network: Network, + #[cfg(feature = "vault")] + vault_secret_key: Option, } /// Error returned by [`Client::connect`]. @@ -88,7 +94,11 @@ impl Client { receiver.await.expect("sender should not close")?; - Ok(Self { network }) + Ok(Self { + network, + #[cfg(feature = "vault")] + vault_secret_key: None, + }) } } diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs new file mode 100644 index 0000000000..5d27aa4cf9 --- /dev/null +++ b/autonomi/src/client/vault.rs @@ -0,0 +1,181 @@ +use std::collections::HashSet; + +use crate::Client; +use bls::SecretKey; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use sn_client::{ + networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}, + transfers::HotWallet, +}; +use sn_protocol::storage::{RetryStrategy, Scratchpad, ScratchpadAddress}; +use sn_protocol::{ + storage::{try_deserialize_record, try_serialize_record, RecordKind}, + NetworkAddress, +}; +use tracing::info; + +use super::data::PutError; + +#[derive(Debug, thiserror::Error)] +pub enum VaultError { + #[error("Could not generate Vault secret key from entropy: {0:?}")] + Bls(#[from] bls::Error), + #[error("No Vault has been defined. Use `client.with_vault_entropy` to define one.")] + NoVaultPacketDefined, + #[error("Scratchpad found at {0:?} was not a valid record.")] + CouldNotDeserializeVaultScratchPad(ScratchpadAddress), + #[error("Protocol: {0}")] + Protocol(#[from] sn_protocol::Error), + #[error("Network: {0}")] + Network(#[from] NetworkError), +} + +impl Client { + /// Add an vault secret key to the client + /// + /// The secret key is derived from the supplied entropy bytes. + pub fn with_vault_entropy(mut self, bytes: Bytes) -> Result { + // simple hash as XORNAME_LEN == SK_LENs + let xorname = xor_name::XorName::from_content(&bytes); + // before generating the sk from these bytes. + self.vault_secret_key = Some(SecretKey::from_bytes(xorname.0)?); + + Ok(self) + } + + /// Retrieves and returns a decrypted vault if one exists. + pub async fn fetch_and_decrypt_vault(&self) -> Result, VaultError> { + let Some(vault_secret_key) = self.vault_secret_key.as_ref() else { + return Err(VaultError::NoVaultPacketDefined); + }; + + let pad = self.get_vault_from_network().await?; + + Ok(pad.decrypt_data(vault_secret_key)?) + } + + /// Gets the vault Scratchpad from a provided client public key + async fn get_vault_from_network(&self) -> Result { + // let vault = self.vault.as_ref()?; + let Some(vault_secret_key) = self.vault_secret_key.as_ref() else { + return Err(VaultError::NoVaultPacketDefined); + }; + + let client_pk = vault_secret_key.public_key(); + + let scratch_address = ScratchpadAddress::new(client_pk); + let network_address = NetworkAddress::from_scratchpad_address(scratch_address); + let scratch_key = network_address.to_record_key(); + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }; + + let record = self + .network + .get_record_from_network(scratch_key, &get_cfg) + .await?; + + let pad = try_deserialize_record::(&record) + .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; + + Ok(pad) + } + + /// Put data into the client's VaultPacket + /// + /// Returns Ok(None) early if no vault packet is defined. + /// + /// Pays for a new VaultPacket if none yet created for the client. Returns the current version + /// of the data on success. + pub async fn write_bytes_to_vault_if_defined( + &mut self, + data: Bytes, + wallet: &mut HotWallet, + ) -> Result, PutError> { + // Exit early if no vault packet defined + let Some(client_sk) = self.vault_secret_key.as_ref() else { + return Ok(None); + }; + + let client_pk = client_sk.public_key(); + + let pad_res = self.get_vault_from_network().await; + + let mut is_new = true; + let mut scratch = if let Ok(existing_data) = pad_res { + tracing::info!("Scratchpad already exists, returning existing data"); + + info!( + "scratch already exists, is version {:?}", + existing_data.count() + ); + + is_new = false; + existing_data + } else { + tracing::trace!("new scratchpad creation"); + Scratchpad::new(client_pk) + }; + + let next_count = scratch.update_and_sign(data, client_sk); + let scratch_address = scratch.network_address(); + let scratch_key = scratch_address.to_record_key(); + + let record = if is_new { + self.pay( + [&scratch_address].iter().filter_map(|f| f.as_xorname()), + wallet, + ) + .await?; + + let (payment, _payee) = self.get_recent_payment_for_addr( + &scratch_address.as_xorname().ok_or(PutError::VaultXorName)?, + wallet, + )?; + + Record { + key: scratch_key, + value: try_serialize_record(&(payment, scratch), RecordKind::ScratchpadWithPayment) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: scratch_key, + value: try_serialize_record(&scratch, RecordKind::Scratchpad) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: None, + verification: Some(( + VerificationKind::Network, + GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }, + )), + }; + + self.network.put_record(record, &put_cfg).await?; + + Ok(Some(next_count)) + } +} diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 63c88172f6..180fd49644 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -1,6 +1,8 @@ use std::time::Duration; use autonomi::Client; +use bytes::Bytes; +use eyre::{bail, Result}; use tokio::time::sleep; mod common; @@ -30,3 +32,45 @@ async fn file() -> Result<(), Box> { Ok(()) } + +// files and vault feats +#[cfg(all(feature = "files", feature = "vault"))] +#[tokio::test] +async fn file_into_vault() -> Result<()> { + common::enable_logging(); + + let mut client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + let mut wallet = common::load_hot_wallet_from_faucet(); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + sleep(Duration::from_secs(2)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + // now assert over the stored account packet + let new_client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { + let ap_root_fetched = Client::deserialise_root(ap)?; + + assert_eq!( + root.map, ap_root_fetched.map, + "root fetched should match root put" + ); + } else { + bail!("No account packet found"); + } + + Ok(()) +} diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 26b967a239..55183866b8 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -955,7 +955,7 @@ mod tests { use super::*; use bls::SecretKey; - use sn_protocol::storage::{Scratchpad, ScratchpadAddress}; + use sn_protocol::storage::{try_deserialize_record, Scratchpad}; use xor_name::XorName; use bytes::Bytes; @@ -1220,7 +1220,7 @@ mod tests { } #[tokio::test] - async fn can_store_and_retrieve_scratchpad() { + async fn can_store_and_retrieve_scratchpad() -> eyre::Result<()> { let temp_dir = std::env::temp_dir(); let store_config = NodeRecordStoreConfig { storage_dir: temp_dir, @@ -1238,22 +1238,21 @@ mod tests { ); // Create a scratchpad - let scratchpad_data = Bytes::from_static(b"Test scratchpad data"); - + let unencrypted_scratchpad_data = Bytes::from_static(b"Test scratchpad data"); let owner_sk = SecretKey::random(); let owner_pk = owner_sk.public_key(); - let mut signing_bytes = 0_u64.to_be_bytes().to_vec(); - signing_bytes.extend(XorName::from_content(&scratchpad_data).to_vec()); // add the count + let mut scratchpad = Scratchpad::new(owner_pk); + + let _next_version = + scratchpad.update_and_sign(unencrypted_scratchpad_data.clone(), &owner_sk); - let sig = owner_sk.sign(&signing_bytes); - let scratchpad = Scratchpad::new(owner_pk, scratchpad_data.clone(), 0, sig); let scratchpad_address = *scratchpad.address(); // Create a record from the scratchpad let record = Record { key: NetworkAddress::ScratchpadAddress(scratchpad_address).to_record_key(), - value: scratchpad_data.to_vec(), + value: try_serialize_record(&scratchpad, RecordKind::Scratchpad)?.to_vec(), expires: None, publisher: None, }; @@ -1277,24 +1276,30 @@ mod tests { assert!(stored_record.is_some(), "Scratchpad should be stored"); if let Some(stored) = stored_record { + let scratchpad = try_deserialize_record::(&stored)?; + + let stored_address = scratchpad.address(); assert_eq!( - stored.value, scratchpad_data, - "Stored scratchpad data should match original" + stored_address, &scratchpad_address, + "Stored scratchpad address should match original" ); - let stored_address = ScratchpadAddress::new(owner_pk); + let decrypted_data = scratchpad.decrypt_data(&owner_sk)?; + assert_eq!( - stored_address, scratchpad_address, - "Stored scratchpad address should match original" + decrypted_data, + Some(unencrypted_scratchpad_data), + "Stored scratchpad data should match original" ); } - // Clean up store.remove(&record.key); assert!( store.get(&record.key).is_none(), "Scratchpad should be removed after cleanup" ); + + Ok(()) } #[tokio::test] async fn pruning_on_full() -> Result<()> { diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8e13e698aa..8839c8d631 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -410,7 +410,7 @@ impl Node { // check if the Scratchpad is present locally that we don't have a newer version if let Some(local_pad) = self.network().get_local_record(&scratchpad_key).await? { let local_pad = try_deserialize_record::(&local_pad)?; - if local_pad.counter >= scratchpad.counter { + if local_pad.count() >= scratchpad.count() { warn!("Rejecting Scratchpad PUT with counter less than or equal to the current counter"); return Err(Error::IgnoringOutdatedScratchpadPut); } diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index c89ff9cba3..f73c356b53 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -48,6 +48,9 @@ pub enum Error { /// The provided String can't be deserialized as a RegisterAddress #[error("Failed to deserialize hex ScratchpadAddress")] ScratchpadHexDeserializeFailed, + /// The provided SecretyKey failed to decrypt the data + #[error("Failed to derive CipherText from encrypted_data")] + ScratchpadCipherTextFailed, // ---------- payment errors #[error("There was an error getting the storecost from kademlia store")] diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 1704df2038..a5aa364e25 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -97,6 +97,10 @@ impl NetworkAddress { pub fn from_spend_address(cash_note_address: SpendAddress) -> Self { NetworkAddress::SpendAddress(cash_note_address) } + /// Return a `NetworkAddress` representation of the `SpendAddress`. + pub fn from_scratchpad_address(address: ScratchpadAddress) -> Self { + NetworkAddress::ScratchpadAddress(address) + } /// Return a `NetworkAddress` representation of the `RegisterAddress`. pub fn from_register_address(register_address: RegisterAddress) -> Self { @@ -145,6 +149,7 @@ impl NetworkAddress { NetworkAddress::SpendAddress(cash_note_address) => Some(*cash_note_address.xorname()), NetworkAddress::ChunkAddress(chunk_address) => Some(*chunk_address.xorname()), NetworkAddress::RegisterAddress(register_address) => Some(register_address.xorname()), + NetworkAddress::ScratchpadAddress(address) => Some(address.xorname()), _ => None, } } diff --git a/sn_protocol/src/storage/scratchpad.rs b/sn_protocol/src/storage/scratchpad.rs index a1f792da60..ea38d2e686 100644 --- a/sn_protocol/src/storage/scratchpad.rs +++ b/sn_protocol/src/storage/scratchpad.rs @@ -7,8 +7,9 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::ScratchpadAddress; +use crate::error::{Error, Result}; use crate::NetworkAddress; -use bls::{PublicKey, Signature}; +use bls::{Ciphertext, PublicKey, SecretKey, Signature}; use bytes::Bytes; use serde::{Deserialize, Serialize}; @@ -21,39 +22,70 @@ use xor_name::XorName; pub struct Scratchpad { /// Network address. Omitted when serialising and /// calculated from the `encrypted_data` when deserialising. - pub address: ScratchpadAddress, + address: ScratchpadAddress, /// Contained data. This should be encrypted #[debug(skip)] - pub encrypted_data: Bytes, + encrypted_data: Bytes, /// Monotonically increasing counter to track the number of times this has been updated. - pub counter: u64, + counter: u64, /// Signature over `Vec`.extend(Xorname::from_content(encrypted_data).to_vec()) from the owning key. - pub signature: Signature, + /// Required for scratchpad to be valid. + signature: Option, } impl Scratchpad { /// Creates a new instance of `Scratchpad`. - pub fn new( - owner: PublicKey, - encrypted_data: Bytes, - counter: u64, - signature: Signature, - ) -> Self { + pub fn new(owner: PublicKey) -> Self { Self { address: ScratchpadAddress::new(owner), - encrypted_data, - counter, - signature, + encrypted_data: Bytes::new(), + counter: 0, + signature: None, } } + /// Return the current count + pub fn count(&self) -> u64 { + self.counter + } + + /// Increments the counter value. + pub fn increment(&mut self) -> u64 { + self.counter += 1; + + self.counter + } + + /// Returns the next counter value, + /// + /// Encrypts data and updates the signature with provided sk + pub fn update_and_sign(&mut self, unencrypted_data: Bytes, sk: &SecretKey) -> u64 { + let next_count = self.increment(); + + let pk = self.owner(); + + self.encrypted_data = Bytes::from(pk.encrypt(unencrypted_data).to_bytes()); + + let encrypted_data_xorname = self.encrypted_data_hash().to_vec(); + + let mut bytes_to_sign = self.counter.to_be_bytes().to_vec(); + bytes_to_sign.extend(encrypted_data_xorname); + + self.signature = Some(sk.sign(&bytes_to_sign)); + next_count + } + /// Verifies the signature and content of the scratchpad are valid for the /// owner's public key. pub fn is_valid(&self) -> bool { - let mut signing_bytes = self.counter.to_be_bytes().to_vec(); - signing_bytes.extend(self.encrypted_data_hash().to_vec()); // add the count + if let Some(signature) = &self.signature { + let mut signing_bytes = self.counter.to_be_bytes().to_vec(); + signing_bytes.extend(self.encrypted_data_hash().to_vec()); // add the count - self.owner().verify(&self.signature, &signing_bytes) + self.owner().verify(signature, &signing_bytes) + } else { + false + } } /// Returns the encrypted_data. @@ -61,6 +93,16 @@ impl Scratchpad { &self.encrypted_data } + /// Returns the encrypted_data, decrypted via the passed SecretKey + pub fn decrypt_data(&self, sk: &SecretKey) -> Result> { + Ok(sk + .decrypt( + &Ciphertext::from_bytes(&self.encrypted_data) + .map_err(|_| Error::ScratchpadCipherTextFailed)?, + ) + .map(Bytes::from)) + } + /// Returns the encrypted_data hash pub fn encrypted_data_hash(&self) -> XorName { XorName::from_content(&self.encrypted_data) From 0f809b0bc0608b7b79c0daf73dd3aa3fb5c8bf1c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 18 Sep 2024 10:51:25 +0900 Subject: [PATCH 033/255] chore(lp): remove some dead code --- node-launchpad/src/components/status.rs | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 4406453bb8..d06e777953 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -227,22 +227,6 @@ impl Status { }; self.node_table_state.select(Some(i)); } - - #[expect(dead_code)] - fn unselect_table_item(&mut self) { - self.node_table_state.select(None); - } - - #[expect(dead_code)] - fn get_service_name_of_selected_table_item(&self) -> Option { - let Some(service_idx) = self.node_table_state.selected() else { - warn!("No item selected from table, not removing anything"); - return None; - }; - self.node_services - .get(service_idx) - .map(|data| data.service_name.clone()) - } } impl Component for Status { From 1f6f3583d3d410163a7cc458ee85eca3a8db4afa Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 18 Sep 2024 14:38:33 +0200 Subject: [PATCH 034/255] fix(autonomi): verify register on fetch Registers are fetched and contain CRDT operations. These operations should be valid, so we check once we fetched it. Then later we assert the register is valid. --- autonomi/src/client/registers.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 8a8657c5b3..6aa77cf74b 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -28,6 +28,8 @@ pub enum RegisterError { Network(#[from] NetworkError), #[error("Serialization error")] Serialization, + #[error("Register could not be verified (corrupt)")] + FailedVerification, #[error("Payment failure occurred during register creation.")] Pay(#[from] PayError), #[error("Failed to retrieve wallet payment")] @@ -54,7 +56,8 @@ impl Register { pub fn values(&self) -> Vec { self.inner .clone() - .base_register() + .register() + .expect("register to be valid") .read() .into_iter() .map(|(_hash, value)| value.into()) @@ -146,6 +149,11 @@ impl Client { let register: SignedRegister = try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + // Make sure the fetched record contains valid CRDT operations + register + .verify() + .map_err(|_| RegisterError::FailedVerification)?; + Ok(Register { inner: register }) } @@ -158,7 +166,11 @@ impl Client { ) -> Result<(), RegisterError> { // Fetch the current register let mut signed_register = register.inner; - let mut register = signed_register.base_register().clone(); + let mut register = signed_register + .clone() + .register() + .expect("register to be valid") + .clone(); // Get all current branches let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); From 2895fc9b52727318384aab61dfebe76b391ab2da Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 18 Sep 2024 10:54:45 +0900 Subject: [PATCH 035/255] feat: remove version string restriction as unused --- sn_peers_acquisition/src/lib.rs | 9 ++------ sn_protocol/src/version.rs | 41 ++++----------------------------- 2 files changed, 6 insertions(+), 44 deletions(-) diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index db467d6249..719a9ad0d4 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -15,8 +15,6 @@ use lazy_static::lazy_static; use libp2p::{multiaddr::Protocol, Multiaddr}; use rand::{seq::SliceRandom, thread_rng}; use reqwest::Client; -#[cfg(feature = "network-contacts")] -use sn_protocol::version::get_network_version; use std::time::Duration; use tracing::*; use url::Url; @@ -24,11 +22,8 @@ use url::Url; #[cfg(feature = "network-contacts")] lazy_static! { // URL containing the multi-addresses of the bootstrap nodes. - pub static ref NETWORK_CONTACTS_URL: String = { - let version = get_network_version(); - let version_prefix = if !version.is_empty() { format!("{version}-") } else { version.to_string() }; - format!("https://sn-testnet.s3.eu-west-2.amazonaws.com/{version_prefix}network-contacts") - }; + pub static ref NETWORK_CONTACTS_URL: String = + "https://sn-testnet.s3.eu-west-2.amazonaws.com/network-contacts".to_string(); } // The maximum number of retries to be performed while trying to get peers from a URL. diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index b507c2d725..ee88185752 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -13,8 +13,7 @@ lazy_static! { /// The node version used during Identify Behaviour. pub static ref IDENTIFY_NODE_VERSION_STR: String = format!( - "safe{}/node/{}/{}", - write_network_version_with_slash(), + "safe/node/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -22,8 +21,7 @@ lazy_static! { /// The client version used during Identify Behaviour. pub static ref IDENTIFY_CLIENT_VERSION_STR: String = format!( - "safe{}/client/{}/{}", - write_network_version_with_slash(), + "safe/client/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -31,8 +29,7 @@ lazy_static! { /// The req/response protocol version pub static ref REQ_RESPONSE_VERSION_STR: String = format!( - "/safe{}/node/{}/{}", - write_network_version_with_slash(), + "/safe/node/{}/{}", get_truncate_version_str(), get_key_version_str(), ); @@ -40,42 +37,12 @@ lazy_static! { /// The identify protocol version pub static ref IDENTIFY_PROTOCOL_STR: String = format!( - "safe{}/{}/{}", - write_network_version_with_slash(), + "safe/{}/{}", get_truncate_version_str(), get_key_version_str(), ); } -/// Get the network version string. -/// If the network version mode env variable is set to `restricted`, then the git branch is used as the version. -/// Else any non empty string is used as the version string. -/// If the env variable is empty or not set, then we do not apply any network versioning. -pub fn get_network_version() -> &'static str { - // Set this env variable to provide custom network versioning. If it is set to 'restricted', then the git branch name - // is used as the version string. Else we directly use the passed in string as the version. - match option_env!("NETWORK_VERSION_MODE") { - Some(value) => { - if value == "restricted" { - sn_build_info::git_branch() - } else { - value - } - } - _ => "", - } -} - -/// Helper to write the network version with `/` appended if it is not empty -fn write_network_version_with_slash() -> String { - let version = get_network_version(); - if version.is_empty() { - version.to_string() - } else { - format!("/{version}") - } -} - // Protocol support shall be downward compatible for patch only version update. // i.e. versions of `A.B.X` or `A.B.X-alpha.Y` shall be considered as a same protocol of `A.B` fn get_truncate_version_str() -> String { From a8514d78adf5f59124244b0ffcc3ea67e7206d3a Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 18 Sep 2024 12:53:02 +0900 Subject: [PATCH 036/255] chore(proto): remove OldKeyPrint variants Only used for one test arbitrarily testing itself... --- sn_protocol/src/lib.rs | 54 +----------------------------------------- 1 file changed, 1 insertion(+), 53 deletions(-) diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index a5aa364e25..4d3b92628d 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -394,62 +394,10 @@ impl<'a> std::fmt::Debug for PrettyPrintRecordKey<'a> { #[cfg(test)] mod tests { - use crate::{NetworkAddress, PrettyPrintRecordKey}; + use crate::NetworkAddress; use bls::rand::thread_rng; - use bytes::Bytes; - use libp2p::kad::{KBucketKey, RecordKey}; use sn_transfers::SpendAddress; - // A struct that implements hex representation of RecordKey using `bytes::Bytes` - struct OldRecordKeyPrint(RecordKey); - - // old impl using Bytes - impl std::fmt::Display for OldRecordKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let b: Vec = self.0.as_ref().to_vec(); - let record_key_b = Bytes::from(b); - let record_key_str = &format!("{record_key_b:64x}")[0..6]; // only the first 6 chars are logged - write!( - f, - "{record_key_str}({:?})", - OldKBucketKeyPrint(NetworkAddress::from_record_key(&self.0).as_kbucket_key()) - ) - } - } - - impl std::fmt::Debug for OldRecordKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } - } - - // A struct that implements hex representation of KBucketKey using `bytes::Bytes` - pub struct OldKBucketKeyPrint(KBucketKey>); - - // old impl using Bytes - impl std::fmt::Display for OldKBucketKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let kbucket_key_b = Bytes::from(self.0.hashed_bytes().to_vec()); - write!(f, "{kbucket_key_b:64x}") - } - } - - impl std::fmt::Debug for OldKBucketKeyPrint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{self}") - } - } - - #[test] - fn verify_custom_hex_representation() { - let random = xor_name::XorName::random(&mut thread_rng()); - let key = RecordKey::new(&random.0); - let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); - let old_record_key = OldRecordKeyPrint(key); - - assert_eq!(format!("{pretty_key:?}"), format!("{old_record_key:?}")); - } - #[test] fn verify_spend_addr_is_actionable() { let xorname = xor_name::XorName::random(&mut thread_rng()); From 43ef9fe714e63a664a97b9b5877048fcf6ad40cb Mon Sep 17 00:00:00 2001 From: Ermine Jose Date: Thu, 19 Sep 2024 14:25:37 +0530 Subject: [PATCH 037/255] fix: enable e2e wan nightly tests --- .github/workflows/nightly_wan.yml | 1161 ++++++++++++++--------------- 1 file changed, 565 insertions(+), 596 deletions(-) diff --git a/.github/workflows/nightly_wan.yml b/.github/workflows/nightly_wan.yml index 78c1ff756f..0ee6bc1ad3 100644 --- a/.github/workflows/nightly_wan.yml +++ b/.github/workflows/nightly_wan.yml @@ -1,13 +1,16 @@ name: Nightly -- Full WAN Network Tests on: -# todo: this is totally broken atm. Fix and re-enable. -# schedule: -# - cron: "0 0 * * *" + schedule: + - cron: "0 0 * * *" +# enable as below for testing purpose. + # pull_request: + # branches: ["*"] workflow_dispatch: env: CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI. + NETWORK_NAME: DEV-01 WORKFLOW_URL: https://github.com/maidsafe/stableset_net/actions/runs jobs: e2e: @@ -29,93 +32,95 @@ jobs: run: cargo build --release --bin safe timeout-minutes: 30 - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main + - name: setup testnet-deploy + uses: maidsafe/sn-testnet-control-action/init-testnet-deploy@main with: - action: create - re-attempts: 3 - rust-log: debug ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} aws-region: eu-west-2 do-token: ${{ secrets.SN_TESTNET_DO_PAT }} ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 + + - name: launch ${{ env.NETWORK_NAME }} + uses: maidsafe/sn-testnet-control-action/launch-network@main + with: + ansible-forks: ${{ env.ANSIBLE_FORKS }} + environment-type: development + node-vm-count: 10 node-count: 20 - vm-count: 1 + uploader-vm-count: 0 + bootstrap-node-vm-count: 0 + log-format: json + network-name: ${{ env.NETWORK_NAME }} provider: digital-ocean - testnet-name: NightlyE2E - # if we were to run on a PR, use the below - # safe-network-user: ${{ github.actor }}" - # safe-network-branch: ${{ github.event.pull_request.head.ref }} - # Specify custom branch to prevent the deployer from fetching the latest release. - # The latest release contains the `network-contacts` feature turned on. - safe-network-user: maidsafe safe-network-branch: main + safe-network-user: maidsafe - name: Check env variables shell: bash run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" + echo "Peer is $SAFE_PEERS" + echo "Deployment inventory is $SN_INVENTORY" + - name: start faucet + uses: maidsafe/sn-testnet-control-action/start-faucet@main + with: + network-name: ${{ env.NETWORK_NAME }} + - name: Obtain the funds from the faucet run: | + set -e + # read the inventory file - inventory_path=/home/runner/.local/share/safe/testnet-deploy/NightlyE2E-inventory.json + inventory_path=/home/runner/.local/share/safe/testnet-deploy/${{ env.NETWORK_NAME }}-inventory.json echo "Inventory Path: $inventory_path" faucet_address=$(jq -r '.faucet_address' $inventory_path) cargo run --bin safe --release -- wallet get-faucet ${faucet_address} env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to carry out chunk actions - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir files upload "./resources" --retry-strategy quick env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to create a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register create -n baobao env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to get a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register get -n baobao env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to edit a register - run: cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood + run: | + set -e + cargo run --bin safe --release -- --log-output-dest=data-dir register edit -n baobao wood env: SN_LOG: "all" timeout-minutes: 2 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyE2E - safe-network-user: maidsafe - safe-network-branch: main - + + # - name: Fetch network logs + # uses: maidsafe/sn-testnet-control-action/fetch-logs@main + # with: + # re-attempts: 3 + # rust-log: debug + # provider: digital-ocean + # network-name: ${{ env.NETWORK_NAME }} + - name: Upload local logs if: always() uses: actions/upload-artifact@v4 @@ -126,554 +131,518 @@ jobs: ~/.local/share/safe/*/*/*.log* ~/.local/share/safe/client/logs/*/*.log* - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyE2E - safe-network-user: maidsafe - safe-network-branch: main - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly E2E Test Run Failed" - - spend_test: - name: Spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run - timeout-minutes: 40 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - - name: execute the sequential transfers test - run: cargo test --release -p sn_node --test sequential_transfers -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - timeout-minutes: 45 - - - name: execute the storage payment tests - run: cargo test --release -p sn_node --test storage_payments -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - timeout-minutes: 45 - - - name: execute the double spend tests - run: cargo test --release -p sn_node --test double_spend -- --nocapture --test-threads=1 - timeout-minutes: 45 - - - - name: execute the spend simulation tests - run: cargo test --release -p sn_node --test spend_simulation -- --nocapture --test-threads=1 - timeout-minutes: 45 - - - name: Small wait to allow reward receipt - run: sleep 30 - timeout-minutes: 1 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlySpendTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlySpendTest - safe-network-user: maidsafe - safe-network-branch: main - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Spend Test Run Failed" - - churn: - name: Network churning tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - wan_logs_path: /home/runner/sn-testnet-deploy/logs - local_safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build churn tests - run: cargo test --release -p sn_node --test data_with_churn --no-run - timeout-minutes: 30 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --test data_with_churn -- --nocapture - env: - # TEST_DURATION_MINS: 60 - # TEST_CHURN_CYCLES: 6 - # SN_LOG: "all" - # todo: lower time for testing - TEST_DURATION_MINS: 10 - TEST_CHURN_CYCLES: 2 - SN_LOG: "all" - timeout-minutes: 90 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlyChurnTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network + - name: destroy network if: always() - uses: maidsafe/sn-testnet-action@main + uses: maidsafe/sn-testnet-control-action/destroy-network@main with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 + network-name: ${{ env.NETWORK_NAME }} provider: digital-ocean - testnet-name: NightlyChurnTest - safe-network-user: maidsafe - safe-network-branch: main - - # TODO: re-enable the below scripts once we have proper way to restart nodes. - # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same - # log location and the logs are over written. Hence the scripts might give false outputs. - - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run : | - # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # echo "Node dir count is $node_count" - # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - - # # if [ $restart_count -lt $node_count ]; then - # # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # # exit 1 - # # fi - - # - name: Verify data replication using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of replication - # # TODO: make this use an env var, or relate to testnet size - # run : | - # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Carried out $fetching_attempt_count fetching attempts" - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # if [ $fetching_attempt_count -lt $node_count ]; then - # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" - # exit 1 - # fi - - # Only error out after uploading the logs - - name: Don't log raw data - if: always() && matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - - # sanity check - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for local safe path" - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for wan logs path" - fi - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Churn Test Run Failed" - - verify_data_location_routing_table: - name: Verify data location and Routing Table - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - wan_logs_path: /home/runner/sn-testnet-deploy/logs - local_safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: Build data location and routing table tests - run: cargo test --release -p sn_node --test verify_data_location --test verify_routing_table --no-run - timeout-minutes: 30 - - - name: Start a WAN network - uses: maidsafe/sn-testnet-action@main - with: - action: create - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - security-group-id: sg-0d47df5b3f0d01e2a - subnet-id: subnet-018f2ab26755df7f9 - node-count: 20 - vm-count: 1 - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Check env variables - shell: bash - run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" - - - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - timeout-minutes: 5 - - - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --test verify_data_location -- --nocapture + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 env: - SN_LOG: "all" - timeout-minutes: 90 - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - timeout-minutes: 5 - - - name: Fetch network logs - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: logs - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - - name: Upload local logs - if: always() - uses: actions/upload-artifact@v4 - with: - name: local_logs_NightlyDataLocationTest - path: | - ~/.local/share/safe/node/*/logs/*.log* - ~/.local/share/safe/*/*/*.log* - ~/.local/share/safe/client/logs/*/*.log* - - - name: Stop the WAN network - if: always() - uses: maidsafe/sn-testnet-action@main - with: - action: destroy - re-attempts: 3 - rust-log: debug - ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} - aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} - aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} - aws-region: eu-west-2 - do-token: ${{ secrets.SN_TESTNET_DO_PAT }} - ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} - node-count: 20 - vm-count: 1 - provider: digital-ocean - testnet-name: NightlyDataLocationTest - safe-network-user: maidsafe - safe-network-branch: main - - # TODO: re-enable the below scripts once we have proper way to restart nodes. - # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same - # log location and the logs are over written. Hence the scripts might give false outputs. - - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run : | - # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) - # echo "Node dir count is $node_count" - # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - - # # if [ $restart_count -lt $node_count ]; then - # # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # # exit 1 - # # fi - - # Only error out after uploading the logs - - name: Don't log raw data - if: always() && matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi - - # sanity check - if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' - then - echo "Sanity check pass for local safe path" - fi - # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log - #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log - if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' - then - echo echo "Sanity check pass for wan logs path" - fi - - # - name: post notification to slack on failure - # if: ${{ failure() }} - # uses: bryannice/gitactions-slack-notification@2.0.0 - # env: - # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} - # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" - # SLACK_TITLE: "Nightly Data Location Test Run Failed" + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Nightly E2E Test Run Failed" + + # spend_test: + # name: Spend tests against network + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run + # timeout-minutes: 40 + + # - name: setup testnet-deploy + # uses: maidsafe/sn-testnet-control-action/init-testnet-deploy@main + # with: + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + + # - name: launch ${{ env.NETWORK_NAME }} + # uses: maidsafe/sn-testnet-control-action/launch-network@main + # with: + # ansible-forks: ${{ env.ANSIBLE_FORKS }} + # beta-encryption-key: ${{ env.DEFAULT_PAYMENT_FORWARD_SK }} + # environment-type: development + # faucet-version: ${{ env.FAUCET_VERSION }} + # log-format: json + # network-name: ${{ env.NETWORK_NAME }} + # network-contacts-file-name: ${{ env.NETWORK_CONTACTS_FILE_NAME }} + # provider: digital-ocean + # safe-network-branch: main + # safe-network-user: maidsafe + + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + + # - name: execute the sequential transfers test + # run: cargo test --release -p sn_node --test sequential_transfers -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # timeout-minutes: 45 + + # - name: execute the storage payment tests + # run: cargo test --release -p sn_node --test storage_payments -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # timeout-minutes: 45 + + # - name: execute the double spend tests + # run: cargo test --release -p sn_node --test double_spend -- --nocapture --test-threads=1 + # timeout-minutes: 45 + + + # - name: execute the spend simulation tests + # run: cargo test --release -p sn_node --test spend_simulation -- --nocapture --test-threads=1 + # timeout-minutes: 45 + + # - name: Small wait to allow reward receipt + # run: sleep 30 + # timeout-minutes: 1 + + # - name: Fetch network logs + # uses: ermineJose/sn-testnet-control-action/fetch-logs@feat-add_fetch-logs-action + # with: + # re-attempts: 3 + # rust-log: debug + # provider: digital-ocean + # testnet-name: ${{ env.NETWORK_NAME }} + + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlySpendTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + + # - name: destroy network + # uses: maidsafe/sn-testnet-control-action/destroy-network@main + # with: + # network-name: ${{ env.NETWORK_NAME }} + # provider: digital-ocean + + # - name: post notification to slack on failure + # if: ${{ failure() }} + # uses: bryannice/gitactions-slack-notification@2.0.0 + # env: + # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # SLACK_TITLE: "Nightly Spend Test Run Failed" + + # churn: + # name: Network churning tests + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # wan_logs_path: /home/runner/sn-testnet-deploy/logs + # local_safe_path: /home/runner/.local/share/safe + # # - os: windows-latest + # # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # # - os: macos-latest + # # node_data_path: /Users/runner/Library/Application Support/safe/node + # # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 + # + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + # + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + # + # - name: Build churn tests + # run: cargo test --release -p sn_node --test data_with_churn --no-run + # timeout-minutes: 30 + # + # - name: Start a WAN network + # uses: maidsafe/sn-testnet-action@main + # with: + # action: create + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # security-group-id: sg-0d47df5b3f0d01e2a + # subnet-id: subnet-018f2ab26755df7f9 + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + # + # - name: Chunks data integrity during nodes churn + # run: cargo test --release -p sn_node --test data_with_churn -- --nocapture + # env: + # # TEST_DURATION_MINS: 60 + # # TEST_CHURN_CYCLES: 6 + # # SN_LOG: "all" + # # todo: lower time for testing + # TEST_DURATION_MINS: 10 + # TEST_CHURN_CYCLES: 2 + # SN_LOG: "all" + # timeout-minutes: 90 + # + # - name: Fetch network logs + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: logs + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlyChurnTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + # + # - name: Stop the WAN network + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: destroy + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyChurnTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # # TODO: re-enable the below scripts once we have proper way to restart nodes. + # # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same + # # log location and the logs are over written. Hence the scripts might give false outputs. + # + # # - name: Verify restart of nodes using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of restarts + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Restart $restart_count nodes" + # # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "PeerRemovedFromRoutingTable $peer_removed times" + # # if [ $peer_removed -lt $restart_count ]; then + # # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # # exit 1 + # # fi + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # echo "Node dir count is $node_count" + # # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # + # # # if [ $restart_count -lt $node_count ]; then + # # # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # # # exit 1 + # # # fi + # + # # - name: Verify data replication using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of replication + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Carried out $fetching_attempt_count fetching attempts" + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # if [ $fetching_attempt_count -lt $node_count ]; then + # # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" + # # exit 1 + # # fi + # + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: always() && matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # + # # sanity check + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for local safe path" + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for wan logs path" + # fi + # + # # - name: post notification to slack on failure + # # if: ${{ failure() }} + # # uses: bryannice/gitactions-slack-notification@2.0.0 + # # env: + # # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # # SLACK_TITLE: "Nightly Churn Test Run Failed" + # + # verify_data_location_routing_table: + # name: Verify data location and Routing Table + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # wan_logs_path: /home/runner/sn-testnet-deploy/logs + # local_safe_path: /home/runner/.local/share/safe + # # - os: windows-latest + # # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # # - os: macos-latest + # # node_data_path: /Users/runner/Library/Application Support/safe/node + # # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 + # + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + # + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + # + # - name: Build data location and routing table tests + # run: cargo test --release -p sn_node --test verify_data_location --test verify_routing_table --no-run + # timeout-minutes: 30 + # + # - name: Start a WAN network + # uses: maidsafe/sn-testnet-action@main + # with: + # action: create + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # security-group-id: sg-0d47df5b3f0d01e2a + # subnet-id: subnet-018f2ab26755df7f9 + # node-count: 20 + # vm-count: 1 + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Check env variables + # shell: bash + # run: | + # echo "Peer is $SAFE_PEERS" + # echo "Deployment inventory is $SN_INVENTORY" + # + # - name: Verify the Routing table of the nodes + # run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture + # timeout-minutes: 5 + # + # - name: Verify the location of the data on the network + # run: cargo test --release -p sn_node --test verify_data_location -- --nocapture + # env: + # SN_LOG: "all" + # timeout-minutes: 90 + # + # - name: Verify the routing tables of the nodes + # run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture + # timeout-minutes: 5 + # + # - name: Fetch network logs + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: logs + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # - name: Upload local logs + # if: always() + # uses: actions/upload-artifact@v4 + # with: + # name: local_logs_NightlyDataLocationTest + # path: | + # ~/.local/share/safe/node/*/logs/*.log* + # ~/.local/share/safe/*/*/*.log* + # ~/.local/share/safe/client/logs/*/*.log* + # + # - name: Stop the WAN network + # if: always() + # uses: maidsafe/sn-testnet-action@main + # with: + # action: destroy + # re-attempts: 3 + # rust-log: debug + # ansible-vault-password: ${{ secrets.SN_TESTNET_ANSIBLE_VAULT_PASSWORD }} + # aws-access-key-id: ${{ secrets.SN_TESTNET_AWS_ACCESS_KEY_ID }} + # aws-access-key-secret: ${{ secrets.SN_TESTNET_AWS_SECRET_ACCESS_KEY }} + # aws-region: eu-west-2 + # do-token: ${{ secrets.SN_TESTNET_DO_PAT }} + # ssh-secret-key: ${{ secrets.SN_TESTNET_SSH_KEY }} + # node-count: 20 + # vm-count: 1 + # provider: digital-ocean + # testnet-name: NightlyDataLocationTest + # safe-network-user: maidsafe + # safe-network-branch: main + # + # # TODO: re-enable the below scripts once we have proper way to restart nodes. + # # Currently on remote network (not local), the nodes do not handle restart RPC cmd well. They reuse the same + # # log location and the logs are over written. Hence the scripts might give false outputs. + # + # # - name: Verify restart of nodes using rg + # # shell: bash + # # timeout-minutes: 1 + # # # get the counts, then the specific line, and then the digit count only + # # # then check we have an expected level of restarts + # # # TODO: make this use an env var, or relate to testnet size + # # run : | + # # restart_count=$(rg "Node is restarting in" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "Restart $restart_count nodes" + # # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.wan_logs_path }}" -c --stats | \ + # # rg "(\d+) matches" | rg "\d+" -o) + # # echo "PeerRemovedFromRoutingTable $peer_removed times" + # # if [ $peer_removed -lt $restart_count ]; then + # # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # # exit 1 + # # fi + # # node_count=$(find "${{ matrix.wan_logs_path }}" -type d | awk -F/ 'NF==9' | grep -E "/12D3KooW" | wc -l) + # # echo "Node dir count is $node_count" + # # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # + # # # if [ $restart_count -lt $node_count ]; then + # # # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # # # exit 1 + # # # fi + # + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: always() && matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + # + # # sanity check + # if ! rg '^' "${{ matrix.local_safe_path }}"/client/logs | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo "Sanity check pass for local safe path" + # fi + # # node dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/safenode1/safenode.log + # #faucet dir structure: ~/sn-testnet-deploy/logs/NightlyChurnTest/NightlyChurnTest-genesis/faucet/logs/faucet.log + # if ! rg '^' "${{ matrix.wan_logs_path }}"/*/*/*/ | awk 'length($0) > 1000 { print; exit 1 }' + # then + # echo echo "Sanity check pass for wan logs path" + # fi + # + # # - name: post notification to slack on failure + # # if: ${{ failure() }} + # # uses: bryannice/gitactions-slack-notification@2.0.0 + # # env: + # # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + # # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + # # SLACK_TITLE: "Nightly Data Location Test Run Failed" From e3e1d3006ec13a961e54bca0ec37757b685c46c3 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Thu, 19 Sep 2024 20:49:39 +0530 Subject: [PATCH 038/255] feat(manager): provide option to specify node ip --- node-launchpad/src/node_mgmt.rs | 2 + sn_node_manager/src/add_services/config.rs | 6 + sn_node_manager/src/add_services/mod.rs | 2 + sn_node_manager/src/add_services/tests.rs | 176 +++++++++++++++++++ sn_node_manager/src/bin/cli/main.rs | 7 + sn_node_manager/src/cmd/node.rs | 4 + sn_node_manager/src/lib.rs | 189 +++++++++++++++++++++ sn_node_manager/src/local.rs | 1 + sn_node_manager/src/rpc.rs | 3 + sn_service_management/src/node.rs | 15 +- 10 files changed, 404 insertions(+), 1 deletion(-) diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index fea2eeff0b..1b591e5a95 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -296,6 +296,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { None, None, None, + None, None, // We don't care about the port, as we are scaling down config.owner.clone(), config.peers_args.clone(), @@ -366,6 +367,7 @@ async fn add_nodes( None, None, None, + None, port_range, config.owner.clone(), config.peers_args.clone(), diff --git a/sn_node_manager/src/add_services/config.rs b/sn_node_manager/src/add_services/config.rs index 83b34a17be..ba0873d82c 100644 --- a/sn_node_manager/src/add_services/config.rs +++ b/sn_node_manager/src/add_services/config.rs @@ -79,6 +79,7 @@ pub struct InstallNodeServiceCtxBuilder { pub log_format: Option, pub name: String, pub metrics_port: Option, + pub node_ip: Option, pub node_port: Option, pub owner: Option, pub rpc_socket_addr: SocketAddr, @@ -115,6 +116,10 @@ impl InstallNodeServiceCtxBuilder { if self.upnp { args.push(OsString::from("--upnp")); } + if let Some(node_ip) = self.node_ip { + args.push(OsString::from("--ip")); + args.push(OsString::from(node_ip.to_string())); + } if let Some(node_port) = self.node_port { args.push(OsString::from("--port")); args.push(OsString::from(node_port.to_string())); @@ -166,6 +171,7 @@ pub struct AddNodeServiceOptions { pub log_format: Option, pub metrics_port: Option, pub owner: Option, + pub node_ip: Option, pub node_port: Option, pub rpc_address: Option, pub rpc_port: Option, diff --git a/sn_node_manager/src/add_services/mod.rs b/sn_node_manager/src/add_services/mod.rs index a1657640ee..bb9b75541a 100644 --- a/sn_node_manager/src/add_services/mod.rs +++ b/sn_node_manager/src/add_services/mod.rs @@ -219,6 +219,7 @@ pub async fn add_node( log_format: options.log_format, metrics_port: metrics_free_port, name: service_name.clone(), + node_ip: options.node_ip, node_port, owner: options.owner.clone(), rpc_socket_addr, @@ -250,6 +251,7 @@ pub async fn add_node( log_dir_path: service_log_dir_path.clone(), log_format: options.log_format, metrics_port: metrics_free_port, + node_ip: options.node_ip, node_port, number: node_number, reward_balance: None, diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index bf1649df4c..ed10be31cf 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -122,6 +122,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -155,6 +156,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -226,6 +228,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, pid: None, @@ -271,6 +274,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: Some(custom_rpc_address), rpc_port: None, @@ -339,6 +343,7 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -413,6 +418,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -450,6 +456,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( log_format: None, metrics_port: None, name: "safenode2".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), @@ -487,6 +494,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( log_dir_path: node_logs_dir.to_path_buf().join("safenode3"), metrics_port: None, name: "safenode3".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8085), @@ -521,6 +529,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -644,6 +653,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -677,6 +687,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -774,6 +785,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -807,6 +819,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -877,6 +890,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -922,6 +936,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { log_format: None, metrics_port: None, name: "safenode2".to_string(), + node_ip: None, node_port: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), owner: None, @@ -956,6 +971,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -997,6 +1013,129 @@ async fn add_new_node_should_add_another_service() -> Result<()> { Ok(()) } +#[tokio::test] +async fn add_node_should_use_custom_ip() -> Result<()> { + let tmp_data_dir = assert_fs::TempDir::new()?; + let node_reg_path = tmp_data_dir.child("node_reg.json"); + + let mut mock_service_control = MockServiceControl::new(); + + let mut node_registry = NodeRegistry { + auditor: None, + faucet: None, + save_path: node_reg_path.to_path_buf(), + nat_status: None, + nodes: vec![], + bootstrap_peers: vec![], + environment_variables: None, + daemon: None, + }; + let latest_version = "0.96.4"; + let temp_dir = assert_fs::TempDir::new()?; + let node_data_dir = temp_dir.child("data"); + node_data_dir.create_dir_all()?; + let node_logs_dir = temp_dir.child("logs"); + node_logs_dir.create_dir_all()?; + let safenode_download_path = temp_dir.child(SAFENODE_FILE_NAME); + safenode_download_path.write_binary(b"fake safenode bin")?; + + let custom_ip = Ipv4Addr::new(192, 168, 1, 1); + + let mut seq = Sequence::new(); + + mock_service_control + .expect_get_available_port() + .times(1) + .returning(|| Ok(12001)) + .in_sequence(&mut seq); + + mock_service_control + .expect_install() + .times(1) + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:12001"), + OsString::from("--root-dir"), + OsString::from( + node_data_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--log-output-dest"), + OsString::from( + node_logs_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--ip"), + OsString::from(custom_ip.to_string()), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: node_data_dir + .to_path_buf() + .join("safenode1") + .join(SAFENODE_FILE_NAME), + username: Some(get_username()), + working_directory: None, + }), + eq(false), + ) + .returning(|_, _| Ok(())) + .in_sequence(&mut seq); + + add_node( + AddNodeServiceOptions { + auto_restart: false, + auto_set_nat_flags: false, + bootstrap_peers: vec![], + count: None, + delete_safenode_src: true, + enable_metrics_server: false, + env_variables: None, + genesis: false, + home_network: false, + local: false, + log_format: None, + metrics_port: None, + owner: None, + node_ip: Some(custom_ip), + node_port: None, + rpc_address: None, + rpc_port: None, + safenode_dir_path: temp_dir.to_path_buf(), + safenode_src_path: safenode_download_path.to_path_buf(), + service_data_dir_path: node_data_dir.to_path_buf(), + service_log_dir_path: node_logs_dir.to_path_buf(), + upnp: false, + user: Some(get_username()), + user_mode: false, + version: latest_version.to_string(), + }, + &mut node_registry, + &mock_service_control, + VerbosityLevel::Normal, + ) + .await?; + + safenode_download_path.assert(predicate::path::missing()); + node_data_dir.assert(predicate::path::is_dir()); + node_logs_dir.assert(predicate::path::is_dir()); + + assert_eq!(node_registry.nodes.len(), 1); + assert_eq!(node_registry.nodes[0].node_ip, Some(custom_ip)); + + Ok(()) +} + #[tokio::test] async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { let tmp_data_dir = assert_fs::TempDir::new()?; @@ -1044,6 +1183,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: Some(custom_port), owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -1078,6 +1218,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Single(custom_port)), rpc_address: None, rpc_port: None, @@ -1296,6 +1437,7 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Range(12000, 12002)), rpc_address: None, rpc_port: None, @@ -1346,6 +1488,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R log_format: None, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), metrics_port: None, + node_ip: None, node_port: Some(12000), number: 1, owner: None, @@ -1389,6 +1532,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Single(12000)), rpc_address: None, rpc_port: None, @@ -1437,6 +1581,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us log_format: None, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), metrics_port: None, + node_ip: None, node_port: Some(12000), number: 1, owner: None, @@ -1480,6 +1625,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Range(12000, 12002)), rpc_address: None, rpc_port: None, @@ -1546,6 +1692,7 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Range(12000, 12002)), rpc_address: None, rpc_port: None, @@ -1617,6 +1764,7 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: Some(PortRange::Single(12000)), rpc_address: None, rpc_port: None, @@ -1741,6 +1889,7 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -1953,6 +2102,7 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< log_format: None, metrics_port: Some(PortRange::Range(12000, 12002)), owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2000,6 +2150,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: Some(12000), + node_ip: None, node_port: None, number: 1, owner: None, @@ -2043,6 +2194,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use log_format: None, metrics_port: Some(PortRange::Single(12000)), owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2092,6 +2244,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: Some(12000), + node_ip: None, node_port: None, number: 1, owner: None, @@ -2135,6 +2288,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran log_format: None, metrics_port: Some(PortRange::Range(12000, 12002)), owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2331,6 +2485,7 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: Some(PortRange::Range(20000, 20002)), @@ -2389,6 +2544,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2432,6 +2588,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: Some(PortRange::Single(8081)), @@ -2481,6 +2638,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2524,6 +2682,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: Some(PortRange::Range(8081, 8082)), @@ -2597,6 +2756,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -2630,6 +2790,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2700,6 +2861,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -2733,6 +2895,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2803,6 +2966,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), @@ -2836,6 +3000,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -2910,6 +3075,7 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -3521,6 +3687,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -3555,6 +3722,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -3626,6 +3794,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -3660,6 +3829,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -3731,6 +3901,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -3765,6 +3936,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -3833,6 +4005,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { log_format: None, metrics_port: None, name: "safenode1".to_string(), + node_ip: None, node_port: None, owner: None, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), @@ -3867,6 +4040,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { log_format: None, metrics_port: None, owner: None, + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -3982,6 +4156,7 @@ async fn add_node_should_assign_an_owner() -> Result<()> { log_format: None, metrics_port: None, owner: Some("discord_username".to_string()), + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, @@ -4099,6 +4274,7 @@ async fn add_node_should_auto_restart() -> Result<()> { log_format: None, metrics_port: None, owner: Some("discord_username".to_string()), + node_ip: None, node_port: None, rpc_address: None, rpc_port: None, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 152eb880c1..7e89275279 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -146,6 +146,11 @@ pub enum SubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] metrics_port: Option, + /// Specify the IP address for the safenode service(s). + /// + /// If not set, we bind to all the available network interfaces. + #[clap(long)] + node_ip: Option, /// Specify a port for the safenode service(s). /// /// If not used, ports will be selected at random. @@ -1022,6 +1027,7 @@ async fn main() -> Result<()> { log_dir_path, log_format, metrics_port, + node_ip, node_port, owner, path, @@ -1045,6 +1051,7 @@ async fn main() -> Result<()> { log_dir_path, log_format, metrics_port, + node_ip, node_port, owner, peers, diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 3578b62f97..8a7ba87435 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -47,6 +47,7 @@ pub async fn add( log_dir_path: Option, log_format: Option, metrics_port: Option, + node_ip: Option, node_port: Option, owner: Option, peers_args: PeersArgs, @@ -146,6 +147,7 @@ pub async fn add( log_format, metrics_port, owner, + node_ip, node_port, rpc_address, rpc_port, @@ -604,6 +606,7 @@ pub async fn maintain_n_running_nodes( log_dir_path: Option, log_format: Option, metrics_port: Option, + node_ip: Option, node_port: Option, owner: Option, peers: PeersArgs, @@ -704,6 +707,7 @@ pub async fn maintain_n_running_nodes( log_dir_path.clone(), log_format, metrics_port.clone(), + node_ip, Some(PortRange::Single(port)), owner.clone(), peers.clone(), diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 5841c22cfd..36a452819a 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -774,6 +774,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -875,6 +876,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -941,6 +943,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1050,6 +1053,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1129,6 +1133,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1218,6 +1223,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1306,6 +1312,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1364,6 +1371,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1412,6 +1420,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1458,6 +1467,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1507,6 +1517,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1569,6 +1580,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1694,6 +1706,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1781,6 +1794,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -1913,6 +1927,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2057,6 +2072,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2196,6 +2212,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2336,6 +2353,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2506,6 +2524,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2659,6 +2678,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: Some(LogFormat::Json), metrics_port: None, + node_ip: None, node_port: None, owner: None, number: 1, @@ -2815,6 +2835,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -2857,6 +2878,163 @@ mod tests { Ok(()) } + #[tokio::test] + async fn upgrade_should_retain_custom_node_ip() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--ip"), + OsString::from("192.168.1.1"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: false, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + metrics_port: None, + number: 1, + node_ip: Some(Ipv4Addr::new(192, 168, 1, 1)), + node_port: None, + owner: None, + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + reward_balance: Some(NanoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: false, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + assert_eq!( + service_manager.service.service_data.node_ip, + Some(Ipv4Addr::new(192, 168, 1, 1)) + ); + + Ok(()) + } + #[tokio::test] async fn upgrade_should_retain_custom_node_ports() -> Result<()> { let current_version = "0.1.0"; @@ -2969,6 +3147,7 @@ mod tests { log_format: None, metrics_port: None, number: 1, + node_ip: None, node_port: Some(12000), owner: None, peer_id: Some(PeerId::from_str( @@ -3121,6 +3300,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: Some(12000), + node_ip: None, node_port: None, number: 1, owner: None, @@ -3277,6 +3457,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: Some(12000), + node_ip: None, node_port: None, number: 1, owner: None, @@ -3433,6 +3614,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: Some("discord_username".to_string()), @@ -3589,6 +3771,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: Some("discord_username".to_string()), @@ -3744,6 +3927,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -3815,6 +3999,7 @@ mod tests { log_dir_path: log_dir.to_path_buf(), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -3871,6 +4056,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -3943,6 +4129,7 @@ mod tests { log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -4007,6 +4194,7 @@ mod tests { log_dir_path: log_dir.to_path_buf(), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, @@ -4069,6 +4257,7 @@ mod tests { log_dir_path: log_dir.to_path_buf(), log_format: None, metrics_port: None, + node_ip: None, node_port: None, number: 1, owner: None, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index aa468a5179..ec3a7ae34e 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -438,6 +438,7 @@ pub async fn run_node( log_dir_path: node_info.log_path, log_format: run_options.log_format, metrics_port: run_options.metrics_port, + node_ip: None, node_port: run_options.node_port, number: run_options.number, owner: run_options.owner, diff --git a/sn_node_manager/src/rpc.rs b/sn_node_manager/src/rpc.rs index 0d34d10c34..2c8f15a88b 100644 --- a/sn_node_manager/src/rpc.rs +++ b/sn_node_manager/src/rpc.rs @@ -74,6 +74,7 @@ pub async fn restart_node_service( metrics_port: None, owner: current_node_clone.owner.clone(), name: current_node_clone.service_name.clone(), + node_ip: current_node_clone.node_ip, node_port: current_node_clone.get_safenode_port(), rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path: current_node_clone.safenode_path.clone(), @@ -188,6 +189,7 @@ pub async fn restart_node_service( log_format: current_node_clone.log_format, name: new_service_name.clone(), metrics_port: None, + node_ip: current_node_clone.node_ip, node_port: None, owner: None, rpc_socket_addr: current_node_clone.rpc_socket_addr, @@ -211,6 +213,7 @@ pub async fn restart_node_service( log_dir_path, log_format: current_node_clone.log_format, metrics_port: None, + node_ip: current_node_clone.node_ip, node_port: None, number: new_node_number as u16, owner: None, diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index dcf18ee059..ffd6af0742 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -14,7 +14,13 @@ use service_manager::{ServiceInstallCtx, ServiceLabel}; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; use sn_transfers::NanoTokens; -use std::{ffi::OsString, net::SocketAddr, path::PathBuf, str::FromStr, time::Duration}; +use std::{ + ffi::OsString, + net::{Ipv4Addr, SocketAddr}, + path::PathBuf, + str::FromStr, + time::Duration, +}; pub struct NodeService<'a> { pub service_data: &'a mut NodeServiceData, @@ -82,6 +88,11 @@ impl<'a> ServiceStateActions for NodeService<'a> { args.push(OsString::from("--home-network")); } + if let Some(node_ip) = self.service_data.node_ip { + args.push(OsString::from("--ip")); + args.push(OsString::from(node_ip.to_string())); + } + if let Some(node_port) = self.service_data.node_port { args.push(OsString::from("--port")); args.push(OsString::from(node_port.to_string())); @@ -261,6 +272,8 @@ pub struct NodeServiceData { #[serde(default)] pub owner: Option, #[serde(default)] + pub node_ip: Option, + #[serde(default)] pub node_port: Option, pub number: u16, #[serde( From f257e0926594a0d61b2831ede9499edff24e6286 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 20 Sep 2024 16:27:04 +0200 Subject: [PATCH 039/255] chore(global): fix compile warnings on wasm32 --- sn_client/src/faucet.rs | 5 +++-- sn_peers_acquisition/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs index a294256945..3f97e386f5 100644 --- a/sn_client/src/faucet.rs +++ b/sn_client/src/faucet.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{wallet::send, Client, Error, Result}; -use sn_transfers::{load_genesis_wallet, HotWallet, NanoTokens, Transfer, FOUNDATION_PK}; +use sn_transfers::{load_genesis_wallet, HotWallet, NanoTokens, FOUNDATION_PK}; const INITIAL_FAUCET_BALANCE: NanoTokens = NanoTokens::from(900000000000000000); @@ -109,7 +109,8 @@ pub async fn fund_faucet_from_genesis_wallet( debug!("Writing cash note to: {foundation_transfer_path:?}"); - let transfer = Transfer::transfer_from_cash_note(&foundation_cashnote)?.to_hex()?; + let transfer = + sn_transfers::Transfer::transfer_from_cash_note(&foundation_cashnote)?.to_hex()?; if let Err(error) = std::fs::write(foundation_transfer_path, transfer) { error!("Could not write the foundation transfer to disk: {error}."); diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 719a9ad0d4..65967bebaa 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -239,6 +239,6 @@ pub async fn get_peers_from_url(url: Url) -> Result> { trace!( "Failed to get peers from URL, retrying {retries}/{MAX_RETRIES_ON_GET_PEERS_FROM_URL}" ); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(Duration::from_secs(1)).await; } } From d4fde0d1dbcffa8b626b88573943e6ca438b5277 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 20 Sep 2024 17:26:01 +0200 Subject: [PATCH 040/255] chore(launchpad): ratataui 0.28.1 --- Cargo.lock | 156 ++++++++++++++------ node-launchpad/Cargo.toml | 2 +- node-launchpad/src/app.rs | 10 +- node-launchpad/src/components/help.rs | 185 ++++++++++++++---------- node-launchpad/src/widgets/hyperlink.rs | 19 +-- 5 files changed, 224 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index de693647e0..f530984d3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -156,7 +156,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3bf628a79452df9614d933012dc500f8cb6ddad8c897ff8122ea1c0b187ff7" dependencies = [ "nom", - "ratatui", + "ratatui 0.26.3", "simdutf8", "smallvec", "thiserror", @@ -213,9 +213,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e1496f8fb1fbf272686b8d37f523dab3e4a7443300055e74cdaa449f3114356" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -225,9 +225,9 @@ checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -877,9 +877,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -978,9 +978,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1205,6 +1205,20 @@ dependencies = [ "cfg-if", "itoa", "ryu", + "static_assertions", +] + +[[package]] +name = "compact_str" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6050c3a16ddab2e412160b31f2c871015704239bca62f72f6e5f0be631d3f644" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "rustversion", + "ryu", "serde", "static_assertions", ] @@ -1454,6 +1468,22 @@ dependencies = [ "winapi", ] +[[package]] +name = "crossterm" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +dependencies = [ + "bitflags 2.6.0", + "crossterm_winapi", + "mio 1.0.2", + "parking_lot", + "rustix", + "signal-hook", + "signal-hook-mio", + "winapi", +] + [[package]] name = "crossterm_winapi" version = "0.9.1" @@ -3401,7 +3431,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", ] [[package]] @@ -3438,9 +3468,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3617,6 +3647,16 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "instability" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +dependencies = [ + "quote", + "syn 2.0.77", +] + [[package]] name = "instant" version = "0.1.13" @@ -4372,9 +4412,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -4461,6 +4501,7 @@ checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", + "log", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -4723,7 +4764,7 @@ dependencies = [ "clap", "color-eyre", "config", - "crossterm", + "crossterm 0.27.0", "derive_deref", "directories", "dirs-next", @@ -4737,7 +4778,7 @@ dependencies = [ "log", "pretty_assertions", "prometheus-parse", - "ratatui", + "ratatui 0.28.1", "reqwest 0.12.7", "serde", "serde_json", @@ -5226,9 +5267,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c73c26c01b8c87956cea613c907c9d6ecffd8d18a2a5908e5de0adfaa185cea" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -5237,9 +5278,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664d22978e2815783adbdd2c588b455b1bd625299ce36b2a99881ac9627e6d8d" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -5247,9 +5288,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2d5487022d5d33f4c30d91c22afa240ce2a644e87fe08caad974d4eab6badbe" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", @@ -5260,9 +5301,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.12" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0091754bbd0ea592c4deb3a122ce8ecbb0753b738aa82bc055fcc2eccc8d8174" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", @@ -5506,9 +5547,9 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -6021,12 +6062,10 @@ checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" dependencies = [ "bitflags 2.6.0", "cassowary", - "compact_str", - "crossterm", + "compact_str 0.7.1", "itertools 0.12.1", "lru", "paste", - "serde", "stability", "strum", "unicode-segmentation", @@ -6034,6 +6073,28 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "ratatui" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" +dependencies = [ + "bitflags 2.6.0", + "cassowary", + "compact_str 0.8.0", + "crossterm 0.28.1", + "instability", + "itertools 0.13.0", + "lru", + "paste", + "serde", + "strum", + "strum_macros", + "unicode-segmentation", + "unicode-truncate", + "unicode-width", +] + [[package]] name = "rayon" version = "1.10.0" @@ -6218,7 +6279,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", "windows-registry", ] @@ -6897,6 +6958,7 @@ checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", "mio 0.8.11", + "mio 1.0.2", "signal-hook", ] @@ -7641,9 +7703,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.11.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1db5ac243c7d7f8439eb3b8f0357888b37cf3732957e91383b0ad61756374e" +checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" dependencies = [ "debugid", "memmap2", @@ -7653,9 +7715,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.11.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea26e430c27d4a8a5dea4c4b81440606c7c1a415bd611451ef6af8c81416afc3" +checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -8092,9 +8154,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -8385,7 +8447,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3e785f863a3af4c800a2a669d0b64c879b538738e352607e2624d03f868dc01" dependencies = [ - "crossterm", + "crossterm 0.27.0", "unicode-width", ] @@ -8476,9 +8538,9 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-truncate" @@ -8493,15 +8555,15 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "universal-hash" @@ -8842,9 +8904,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -9271,9 +9333,9 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yasna" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 8c92a39f62..bb4b90abaa 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -41,7 +41,7 @@ libc = "0.2.148" log = "0.4.20" pretty_assertions = "1.4.0" prometheus-parse = "0.2.5" -ratatui = { version = "0.26.0", features = ["serde", "macros", "unstable-widget-ref"] } +ratatui = { version = "0.28.1", features = ["serde", "macros", "unstable-widget-ref"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/node-launchpad/src/app.rs b/node-launchpad/src/app.rs index f456fdb6b9..e8531ab825 100644 --- a/node-launchpad/src/app.rs +++ b/node-launchpad/src/app.rs @@ -167,7 +167,9 @@ impl App { for component in self.components.iter_mut() { component.register_action_handler(action_tx.clone())?; component.register_config_handler(self.config.clone())?; - component.init(tui.size()?)?; + let size = tui.size()?; + let rect = Rect::new(0, 0, size.width, size.height); + component.init(rect)?; } loop { @@ -223,7 +225,7 @@ impl App { tui.resize(Rect::new(0, 0, w, h))?; tui.draw(|f| { for component in self.components.iter_mut() { - let r = component.draw(f, f.size()); + let r = component.draw(f, f.area()); if let Err(e) = r { action_tx .send(Action::Error(format!("Failed to draw: {:?}", e))) @@ -236,10 +238,10 @@ impl App { tui.draw(|f| { f.render_widget( Block::new().style(Style::new().bg(SPACE_CADET)), - f.size(), + f.area(), ); for component in self.components.iter_mut() { - let r = component.draw(f, f.size()); + let r = component.draw(f, f.area()); if let Err(e) = r { action_tx .send(Action::Error(format!("Failed to draw: {:?}", e))) diff --git a/node-launchpad/src/components/help.rs b/node-launchpad/src/components/help.rs index 7bb94db50d..9270616d27 100644 --- a/node-launchpad/src/components/help.rs +++ b/node-launchpad/src/components/help.rs @@ -3,8 +3,8 @@ use color_eyre::eyre::Result; use ratatui::{ layout::{Constraint, Direction, Layout, Rect}, style::{Style, Stylize}, - text::{Line, Span}, - widgets::{Block, Borders, Cell, Padding, Row, Table}, + text::Span, + widgets::{Block, Borders, Padding}, Frame, }; use tokio::sync::mpsc::UnboundedSender; @@ -14,10 +14,9 @@ use crate::{ action::Action, components::header::Header, mode::{InputMode, Scene}, - style::{COOL_GREY, GHOST_WHITE}, + style::{COOL_GREY, GHOST_WHITE, VIVID_SKY_BLUE}, widgets::hyperlink::Hyperlink, }; -use ansi_to_tui::IntoText; #[derive(Clone)] pub struct Help { @@ -56,86 +55,114 @@ impl Component for Help { // ---- Get Help & Support ---- // Links + // Create a new layout as a table, so we can render hyperlinks + let columns_layout = Layout::default() + .direction(Direction::Horizontal) + .constraints(vec![Constraint::Percentage(50), Constraint::Percentage(50)]) + .split(layout[1]); + + let padded_area_left = Rect { + x: columns_layout[0].x + 2, + y: columns_layout[0].y + 2, + width: columns_layout[0].width - 2, + height: columns_layout[0].height - 2, + }; + + let left_column = Layout::default() + .direction(Direction::Vertical) + .constraints(vec![ + Constraint::Max(1), + Constraint::Max(2), + Constraint::Max(1), + Constraint::Max(2), + ]) + .split(padded_area_left); + + let padded_area_right = Rect { + x: columns_layout[1].x + 2, + y: columns_layout[1].y + 2, + width: columns_layout[1].width - 2, + height: columns_layout[1].height - 2, + }; + let right_column = Layout::default() + .direction(Direction::Vertical) + .constraints(vec![ + Constraint::Max(1), + Constraint::Max(2), + Constraint::Max(1), + Constraint::Max(2), + ]) + .split(padded_area_right); + let quickstart_guide_link = Hyperlink::new( - "docs.autonomi.com/getstarted", + Span::styled( + "docs.autonomi.com/getstarted", + Style::default().fg(VIVID_SKY_BLUE).underlined(), + ), "https://docs.autonomi.com/getstarted", ); - let beta_rewards_link = Hyperlink::new("autonomi.com/beta", "https://autonomi.com/beta"); - let get_direct_support_link = - Hyperlink::new("autonomi.com/support", "https://autonomi.com/support"); - let download_latest_link = - Hyperlink::new("autonomi.com/downloads", "https://autonomi.com/downloads"); - - // Content - let rows_help_and_support = vec![ - Row::new(vec![ - Cell::from(Line::from(vec![Span::styled( - "See the quick start guides:", - Style::default().fg(GHOST_WHITE), - )])), - Cell::from(Line::from(vec![Span::styled( - "To join the Beta Rewards Program:", - Style::default().fg(GHOST_WHITE), - )])), - ]), - Row::new(vec![ - Cell::from( - quickstart_guide_link - .to_string() - .into_text() - .unwrap() - .clone(), - ), - Cell::from(beta_rewards_link.to_string().into_text().unwrap().clone()), - ]), - Row::new(vec![ - // Empty row for padding - Cell::from(Span::raw(" ")), - Cell::from(Span::raw(" ")), - ]), - Row::new(vec![ - Cell::from(Line::from(vec![Span::styled( - "Get Direct Support:", - Style::default().fg(GHOST_WHITE), - )])), - Cell::from(Line::from(vec![Span::styled( - "Download the latest launchpad:", - Style::default().fg(GHOST_WHITE), - )])), - ]), - Row::new(vec![ - Cell::from( - get_direct_support_link - .to_string() - .into_text() - .unwrap() - .clone(), - ), - Cell::from( - download_latest_link - .to_string() - .into_text() - .unwrap() - .clone(), - ), - ]), - ]; - - let table_help_and_support = Table::new( - rows_help_and_support, - vec![Constraint::Percentage(50), Constraint::Percentage(50)], - ) - .block( - Block::new() - .borders(Borders::ALL) - .border_style(Style::default().fg(COOL_GREY)) - .padding(Padding::uniform(1)) - .title(" Get Help & Support ") - .bold() - .title_style(Style::default().bold().fg(GHOST_WHITE)), + let beta_rewards_link = Hyperlink::new( + Span::styled( + "autonomi.com/beta", + Style::default().fg(VIVID_SKY_BLUE).underlined(), + ), + "https://autonomi.com/beta", + ); + let get_direct_support_link = Hyperlink::new( + Span::styled( + "autonomi.com/support", + Style::default().fg(VIVID_SKY_BLUE).underlined(), + ), + "https://autonomi.com/support", + ); + let download_latest_link = Hyperlink::new( + Span::styled( + "autonomi.com/downloads", + Style::default().fg(VIVID_SKY_BLUE).underlined(), + ), + "https://autonomi.com/downloads", + ); + + let block = Block::new() + .borders(Borders::ALL) + .border_style(Style::default().fg(COOL_GREY)) + .padding(Padding::uniform(1)) + .title(" Get Help & Support ") + .bold() + .title_style(Style::default().bold().fg(GHOST_WHITE)); + + // Render hyperlinks in the new area + f.render_widget( + Span::styled( + "See the quick start guides:", + Style::default().fg(GHOST_WHITE), + ), + left_column[0], + ); + f.render_widget_ref(quickstart_guide_link, left_column[1]); + f.render_widget( + Span::styled("Get Direct Support:", Style::default().fg(GHOST_WHITE)), + left_column[2], + ); + f.render_widget_ref(get_direct_support_link, left_column[3]); + f.render_widget( + Span::styled( + "To join the Beta Rewards Program:", + Style::default().fg(GHOST_WHITE), + ), + right_column[0], + ); + f.render_widget_ref(beta_rewards_link, right_column[1]); + f.render_widget( + Span::styled( + "Download the latest launchpad:", + Style::default().fg(GHOST_WHITE), + ), + right_column[2], ); + f.render_widget_ref(download_latest_link, right_column[3]); - f.render_widget(table_help_and_support, layout[1]); + f.render_widget(block, layout[1]); Ok(()) } diff --git a/node-launchpad/src/widgets/hyperlink.rs b/node-launchpad/src/widgets/hyperlink.rs index 0798811ae0..2d78ed312e 100644 --- a/node-launchpad/src/widgets/hyperlink.rs +++ b/node-launchpad/src/widgets/hyperlink.rs @@ -8,7 +8,6 @@ use itertools::Itertools; use ratatui::{prelude::*, widgets::WidgetRef}; -use std::fmt; /// A hyperlink widget that renders a hyperlink in the terminal using [OSC 8]. /// @@ -27,20 +26,6 @@ impl<'content> Hyperlink<'content> { } } -// Displays the hyperlink in the terminal using OSC 8. -// Underline solid \x1b[4m -// Foreground color 45 \x1b[38;5;45m -impl fmt::Display for Hyperlink<'_> { - //TODO: Parameterize the color, underline, bold, etc. Use ratatui::Style. - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "\x1b[4m\x1b[38;5;45m\x1B]8;;{}\x07{}\x1B]8;;\x07\x1b[0m", - self.url, self.text - ) - } -} - impl WidgetRef for Hyperlink<'_> { fn render_ref(&self, area: Rect, buffer: &mut Buffer) { self.text.render_ref(area, buffer); @@ -60,8 +45,8 @@ impl WidgetRef for Hyperlink<'_> { let text = two_chars.collect::(); let hyperlink = format!("\x1B]8;;{}\x07{}\x1B]8;;\x07", self.url, text); buffer - .get_mut(area.x + i as u16 * 2, area.y) - .set_symbol(hyperlink.as_str()); + .cell_mut(Position::new(area.x + i as u16 * 2, area.y)) + .map(|cell| cell.set_symbol(hyperlink.as_str())); } } } From e80e19710da137cd8654f6d77ed6911dcbf80b75 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Fri, 20 Sep 2024 17:33:11 +0200 Subject: [PATCH 041/255] chore(launchpad): ansi-to-tui removed --- Cargo.lock | 66 ++------------------------------------- node-launchpad/Cargo.toml | 1 - 2 files changed, 2 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f530984d3d..dd2a0f5f61 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -149,19 +149,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" -[[package]] -name = "ansi-to-tui" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3bf628a79452df9614d933012dc500f8cb6ddad8c897ff8122ea1c0b187ff7" -dependencies = [ - "nom", - "ratatui 0.26.3", - "simdutf8", - "smallvec", - "thiserror", -] - [[package]] name = "anstream" version = "0.6.15" @@ -1195,19 +1182,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "compact_str" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" -dependencies = [ - "castaway", - "cfg-if", - "itoa", - "ryu", - "static_assertions", -] - [[package]] name = "compact_str" version = "0.8.0" @@ -4757,7 +4731,6 @@ dependencies = [ name = "node-launchpad" version = "0.3.14" dependencies = [ - "ansi-to-tui", "atty", "better-panic", "chrono", @@ -4778,7 +4751,7 @@ dependencies = [ "log", "pretty_assertions", "prometheus-parse", - "ratatui 0.28.1", + "ratatui", "reqwest 0.12.7", "serde", "serde_json", @@ -6054,25 +6027,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "ratatui" -version = "0.26.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" -dependencies = [ - "bitflags 2.6.0", - "cassowary", - "compact_str 0.7.1", - "itertools 0.12.1", - "lru", - "paste", - "stability", - "strum", - "unicode-segmentation", - "unicode-truncate", - "unicode-width", -] - [[package]] name = "ratatui" version = "0.28.1" @@ -6081,7 +6035,7 @@ checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ "bitflags 2.6.0", "cassowary", - "compact_str 0.8.0", + "compact_str", "crossterm 0.28.1", "instability", "itertools 0.13.0", @@ -6990,12 +6944,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simdutf8" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" - [[package]] name = "slab" version = "0.4.9" @@ -7630,16 +7578,6 @@ dependencies = [ "der 0.7.9", ] -[[package]] -name = "stability" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" -dependencies = [ - "quote", - "syn 2.0.77", -] - [[package]] name = "stable_deref_trait" version = "1.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index bb4b90abaa..9cd19d659c 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -62,7 +62,6 @@ tracing-error = "0.2.0" tracing-subscriber = { version = "0.3.17", features = ["env-filter", "serde"] } tui-input = "0.8.0" which = "6.0.1" -ansi-to-tui = "4.1.0" faccess = "0.2.4" [build-dependencies] From 78ed99cfe4b1133f97ed425a59a6775a5e945210 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 24 Sep 2024 13:52:32 +0200 Subject: [PATCH 042/255] feat(autonomi): remove dep on sn_client --- Cargo.lock | 4 +- autonomi/Cargo.toml | 6 +- autonomi/src/client/data.rs | 26 +++----- autonomi/src/client/mod.rs | 2 +- autonomi/src/client/registers.rs | 16 ++--- autonomi/src/client/transfers.rs | 20 +++--- autonomi/src/client/vault.rs | 6 +- autonomi/src/lib.rs | 1 - autonomi/src/secrets.rs | 42 ------------ autonomi/src/wallet/mod.rs | 2 +- autonomi/tests/common/mod.rs | 108 ++++++++++++++++++++++++++++++- 11 files changed, 142 insertions(+), 91 deletions(-) delete mode 100644 autonomi/src/secrets.rs diff --git a/Cargo.lock b/Cargo.lock index f296410140..ac806fd4ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -430,7 +430,9 @@ dependencies = [ "rmp-serde", "self_encryption", "serde", - "sn_client", + "sn_bls_ckd", + "sn_curv", + "sn_networking", "sn_peers_acquisition", "sn_protocol", "sn_registers", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index c8da7598df..286776e496 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -16,7 +16,7 @@ data = ["transfers"] vault = ["data"] files = ["transfers", "data"] fs = [] -local = ["sn_client/local-discovery"] +local = ["sn_networking/local-discovery"] registers = ["transfers"] transfers = [] @@ -24,12 +24,14 @@ transfers = [] bip39 = "2.0.0" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } +curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = ["num-bigint"] } +eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_client = { path = "../sn_client", version = "0.110.1" } +sn_networking = { path = "../sn_networking", version = "0.18.2" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } sn_protocol = { version = "0.17.9", path = "../sn_protocol" } sn_registers = { path = "../sn_registers", version = "0.3.19" } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 8f54c35387..5e226d19e0 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,11 +8,7 @@ use libp2p::{ PeerId, }; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_client::{ - networking::{GetRecordCfg, NetworkError, PutRecordCfg}, - transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}, - StoragePaymentResult, -}; +use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg}; use sn_protocol::{ storage::{ try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, @@ -20,6 +16,7 @@ use sn_protocol::{ NetworkAddress, }; use sn_transfers::Payment; +use sn_transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}; use tokio::task::{JoinError, JoinSet}; use xor_name::XorName; @@ -46,7 +43,7 @@ pub enum PutError { #[derive(Debug, thiserror::Error)] pub enum PayError { #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(sn_client::networking::NetworkError), + CouldNotGetStoreCosts(sn_networking::NetworkError), #[error("Could not simultaneously fetch store costs: {0:?}")] JoinError(JoinError), #[error("Hot wallet error")] @@ -63,9 +60,9 @@ pub enum GetError { #[error("Failed to decrypt data.")] Decryption(crate::self_encryption::Error), #[error("General networking error: {0:?}")] - Network(#[from] sn_client::networking::NetworkError), + Network(#[from] sn_networking::NetworkError), #[error("General protocol error: {0:?}")] - Protocol(#[from] sn_client::protocol::Error), + Protocol(#[from] sn_protocol::Error), } impl Client { @@ -117,8 +114,7 @@ impl Client { xor_names.push(*chunk.name()); } - let StoragePaymentResult { skipped_chunks, .. } = - self.pay(xor_names.into_iter(), wallet).await?; + let (.., skipped_chunks) = self.pay(xor_names.into_iter(), wallet).await?; // TODO: Upload in parallel if !skipped_chunks.contains(map.name()) { @@ -176,11 +172,12 @@ impl Client { } } + /// Returns the storage cost, royalty fees, and skipped chunks. In that order as tuple. pub(crate) async fn pay( &mut self, content_addrs: impl Iterator, wallet: &mut HotWallet, - ) -> Result { + ) -> Result<(NanoTokens, NanoTokens, Vec), PayError> { let mut tasks = JoinSet::new(); for content_addr in content_addrs { let network = self.network.clone(); @@ -229,12 +226,7 @@ impl Client { } else { self.pay_for_records(&cost_map, wallet).await? }; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) + Ok((storage_cost, royalty_fees, skipped_chunks)) } async fn pay_for_records( diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index b50d7e7ce8..cb50baa33a 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -3,7 +3,7 @@ use std::{collections::HashSet, time::Duration}; #[cfg(feature = "vault")] use bls::SecretKey; use libp2p::{identity::Keypair, Multiaddr}; -use sn_client::networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; +use sn_networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; use tokio::{sync::mpsc::Receiver, time::interval}; diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 6aa77cf74b..70f6a514a4 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -5,19 +5,19 @@ use crate::Client; use bls::SecretKey; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_client::networking::GetRecordCfg; -use sn_client::networking::NetworkError; -use sn_client::networking::PutRecordCfg; -use sn_client::registers::EntryHash; -use sn_client::registers::Permissions; -use sn_client::registers::Register as ClientRegister; -use sn_client::registers::SignedRegister; -use sn_client::transfers::HotWallet; +use sn_networking::GetRecordCfg; +use sn_networking::NetworkError; +use sn_networking::PutRecordCfg; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; +use sn_registers::EntryHash; +use sn_registers::Permissions; +use sn_registers::Register as ClientRegister; +use sn_registers::SignedRegister; +use sn_transfers::HotWallet; use xor_name::XorName; use super::data::PayError; diff --git a/autonomi/src/client/transfers.rs b/autonomi/src/client/transfers.rs index 7e34b93209..7740a95a7c 100644 --- a/autonomi/src/client/transfers.rs +++ b/autonomi/src/client/transfers.rs @@ -1,6 +1,6 @@ use crate::wallet::MemWallet; use crate::Client; -use sn_client::transfers::{MainPubkey, NanoTokens}; +use sn_transfers::{MainPubkey, NanoTokens}; use sn_transfers::{SpendReason, Transfer}; use sn_transfers::UniquePubkey; @@ -23,7 +23,7 @@ pub enum TransferError { #[error("Wallet error: {0:?}")] WalletError(#[from] crate::wallet::error::WalletError), #[error("Network error: {0:?}")] - NetworkError(#[from] sn_client::networking::NetworkError), + NetworkError(#[from] sn_networking::NetworkError), } #[derive(Debug, thiserror::Error)] @@ -39,17 +39,15 @@ use libp2p::{ kad::{Quorum, Record}, PeerId, }; -use sn_client::{ - networking::{ - GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, - }, - transfers::{HotWallet, SignedSpend}, +use sn_networking::{ + GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, }; use sn_protocol::{ storage::{try_serialize_record, RecordKind, RetryStrategy, SpendAddress}, NetworkAddress, PrettyPrintRecordKey, }; use sn_transfers::Payment; +use sn_transfers::{HotWallet, SignedSpend}; use xor_name::XorName; use crate::VERIFY_STORE; @@ -109,12 +107,12 @@ impl Client { let mut double_spent_keys = BTreeSet::new(); for (spend_key, spend_attempt_result) in join_all(tasks).await { match spend_attempt_result { - Err(sn_client::networking::NetworkError::GetRecordError( + Err(sn_networking::NetworkError::GetRecordError( GetRecordError::RecordDoesNotMatch(_), )) - | Err(sn_client::networking::NetworkError::GetRecordError( - GetRecordError::SplitRecord { .. }, - )) => { + | Err(sn_networking::NetworkError::GetRecordError(GetRecordError::SplitRecord { + .. + })) => { tracing::warn!( "Double spend detected while trying to spend: {:?}", spend_key diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 5d27aa4cf9..3b64ec67c6 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -4,15 +4,13 @@ use crate::Client; use bls::SecretKey; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_client::{ - networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}, - transfers::HotWallet, -}; +use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{RetryStrategy, Scratchpad, ScratchpadAddress}; use sn_protocol::{ storage::{try_deserialize_record, try_serialize_record, RecordKind}, NetworkAddress, }; +use sn_transfers::HotWallet; use tracing::info; use super::data::PutError; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 3314d8a1b3..faed1a2adc 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -46,7 +46,6 @@ pub use libp2p::Multiaddr; pub use client::{Client, ConnectError, CONNECT_TIMEOUT_SECS}; mod client; -mod secrets; #[cfg(feature = "data")] mod self_encryption; #[cfg(feature = "transfers")] diff --git a/autonomi/src/secrets.rs b/autonomi/src/secrets.rs deleted file mode 100644 index bdbe9ea800..0000000000 --- a/autonomi/src/secrets.rs +++ /dev/null @@ -1,42 +0,0 @@ -use sn_client::acc_packet::user_secret::account_wallet_secret_key; -use sn_client::transfers::MainSecretKey; - -#[derive(Debug, thiserror::Error)] -pub enum SecretsError { - /// Should never happen - #[error("Unexpected error")] - Unexpected, - /// Failed to parse entropy - #[error("Error parsing entropy for mnemonic phrase")] - FailedToParseEntropy, - /// Invalid mnemonic seed phrase - #[error("Invalid mnemonic seed phrase")] - InvalidMnemonicSeedPhrase, - /// Invalid key bytes - #[error("Invalid key bytes")] - InvalidKeyBytes, -} - -impl From for SecretsError { - fn from(value: sn_client::Error) -> Self { - match value { - sn_client::Error::FailedToParseEntropy => SecretsError::FailedToParseEntropy, - sn_client::Error::InvalidMnemonicSeedPhrase => SecretsError::InvalidMnemonicSeedPhrase, - sn_client::Error::InvalidKeyBytes => SecretsError::InvalidKeyBytes, - _ => SecretsError::Unexpected, - } - } -} - -#[allow(dead_code)] -fn generate_mnemonic() -> Result { - sn_client::acc_packet::user_secret::random_eip2333_mnemonic().map_err(SecretsError::from) -} - -#[allow(dead_code)] -fn main_sk_from_mnemonic( - mnemonic: bip39::Mnemonic, - derivation_passphrase: &str, -) -> Result { - account_wallet_secret_key(mnemonic, derivation_passphrase).map_err(SecretsError::from) -} diff --git a/autonomi/src/wallet/mod.rs b/autonomi/src/wallet/mod.rs index 25ba99bc03..988451a4e7 100644 --- a/autonomi/src/wallet/mod.rs +++ b/autonomi/src/wallet/mod.rs @@ -1,11 +1,11 @@ pub mod error; use crate::wallet::error::WalletError; -use sn_client::transfers::{HotWallet, MainSecretKey}; use sn_transfers::{ CashNote, CashNoteRedemption, DerivationIndex, MainPubkey, NanoTokens, SignedSpend, SignedTransaction, SpendReason, Transfer, UniquePubkey, UnsignedTransaction, }; +use sn_transfers::{HotWallet, MainSecretKey}; use std::collections::{BTreeMap, HashSet}; use std::path::PathBuf; diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs index c01ae23187..4844f5c810 100644 --- a/autonomi/tests/common/mod.rs +++ b/autonomi/tests/common/mod.rs @@ -1,11 +1,20 @@ #![allow(dead_code)] +use std::path::Path; + +use bip39::Mnemonic; +use bls::SecretKey; use bytes::Bytes; +use curv::elliptic::curves::ECScalar as _; use libp2p::Multiaddr; -use rand::Rng; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; +use rand::{Rng, RngCore as _}; use sn_peers_acquisition::parse_peer_addr; -use sn_transfers::{get_faucet_data_dir, HotWallet}; +use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; + +const MNEMONIC_FILENAME: &str = "account_secret"; +const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; +const ACCOUNT_WALLET_DERIVATION: &str = "m/2/0"; +const DEFAULT_WALLET_DERIVIATION_PASSPHRASE: &str = "default"; /// When launching a testnet locally, we can use the faucet wallet. pub fn load_hot_wallet_from_faucet() -> HotWallet { @@ -37,3 +46,96 @@ pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { peers_str.split(',').map(parse_peer_addr).collect() } +/// Load a account from disk, with wallet, or create a new one using the mnemonic system +fn load_account_wallet_or_create_with_mnemonic( + root_dir: &Path, + derivation_passphrase: Option<&str>, +) -> Result> { + let wallet = HotWallet::load_from(root_dir); + + match wallet { + Ok(wallet) => Ok(wallet), + Err(error) => { + tracing::warn!("Issue loading wallet, creating a new one: {error}"); + + let mnemonic = load_or_create_mnemonic(root_dir)?; + let wallet = + secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?; + + Ok(HotWallet::create_from_key(root_dir, wallet, None)?) + } + } +} + +fn load_or_create_mnemonic(root_dir: &Path) -> Result> { + match read_mnemonic_from_disk(root_dir) { + Ok(mnemonic) => { + tracing::info!("Using existing mnemonic from {root_dir:?}"); + Ok(mnemonic) + } + Err(error) => { + tracing::warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}"); + let mnemonic = random_eip2333_mnemonic()?; + write_mnemonic_to_disk(root_dir, &mnemonic)?; + Ok(mnemonic) + } + } +} + +fn secret_key_from_mnemonic( + mnemonic: Mnemonic, + derivation_passphrase: Option, +) -> Result> { + let passphrase = + derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned()); + account_wallet_secret_key(mnemonic, &passphrase) +} + +fn create_faucet_account_and_wallet() -> HotWallet { + let root_dir = get_faucet_data_dir(); + + println!("Loading faucet wallet... {root_dir:#?}"); + load_account_wallet_or_create_with_mnemonic(&root_dir, None) + .expect("Faucet wallet shall be created successfully.") +} + +pub fn write_mnemonic_to_disk( + files_dir: &Path, + mnemonic: &bip39::Mnemonic, +) -> Result<(), Box> { + let filename = files_dir.join(MNEMONIC_FILENAME); + let content = mnemonic.to_string(); + std::fs::write(filename, content)?; + Ok(()) +} + +pub(super) fn read_mnemonic_from_disk( + files_dir: &Path, +) -> Result> { + let filename = files_dir.join(MNEMONIC_FILENAME); + let content = std::fs::read_to_string(filename)?; + let mnemonic = bip39::Mnemonic::parse_normalized(&content)?; + Ok(mnemonic) +} + +fn random_eip2333_mnemonic() -> Result> { + let mut entropy = [1u8; 32]; + let rng = &mut rand::rngs::OsRng; + rng.fill_bytes(&mut entropy); + let mnemonic = bip39::Mnemonic::from_entropy(&entropy)?; + Ok(mnemonic) +} + +/// Derive a wallet secret key from the mnemonic for the account. +fn account_wallet_secret_key( + mnemonic: bip39::Mnemonic, + passphrase: &str, +) -> Result> { + let seed = mnemonic.to_seed(passphrase); + + let root_sk = eip2333::derive_master_sk(&seed)?; + let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_WALLET_DERIVATION); + let key_bytes = derived_key.serialize(); + let sk = SecretKey::from_bytes(key_bytes.into())?; + Ok(MainSecretKey::new(sk)) +} From 7639624c6d47380fb905b0a3f9a75f93dc70468d Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 18 Sep 2024 23:27:47 +0100 Subject: [PATCH 043/255] chore: standardise versioning for binaries The RFC for the release process stipulated there should be four versioning arguments: * --version * --crate-version * --package-version * --network-version Here, `--crate-version` is the semantic version of the crate, `--package-version` is the release cycle version, e.g., `2024.09.1.1`, and `--network-version` prints the compatible network protocol. The `--version` argument will then print all of this information. The `--package-version` argument does not apply to the nightly release. The approach for printing the version information is to provide our own `version` flag, by using the `disable_version_flag` attribute with `clap`. If you want to use `clap`, all the information needs to be completely static and available at compile time. This is convoluted, especially for things like the network protocol version and the crate version, and it would have led to a lot of repetition. We can avoid the difficulty by providing the information dynamically, and we can use the `sn_build_info` crate to define the version string once. On binaries that used subcommands, for the versioning to work correctly, the subcommand needs to be made optional. The `get_bin_version` helper used by the node manager was updated parse the new versioning information from both stable and nightly releases. Now, the function can handle different `--version` output formats by extracting the version number for stable releases prefixed with a 'v' and the date for nightly releases. Since we are going to introduce a nightly build, this commit also introduces a nightly feature, which will control what versioning information is used. For the nightly, the binaries will be versioned with the date on which they are built. The `--package-version` argument does not apply to the nightly release; it is not necessary if all the binaries have the same version, which is the date. --- Cargo.lock | 10 +++ nat-detection/Cargo.toml | 5 ++ nat-detection/src/main.rs | 39 +++++++++- node-launchpad/Cargo.toml | 5 ++ node-launchpad/src/bin/tui/main.rs | 40 +++++++++- node-launchpad/src/utils.rs | 24 ------ sn_auditor/Cargo.toml | 3 + sn_auditor/src/main.rs | 50 ++++++++++++- sn_build_info/Cargo.toml | 8 ++ sn_build_info/build.rs | 44 +++++++++++ sn_build_info/src/lib.rs | 81 +++++++++++++++++++- sn_cli/Cargo.toml | 1 + sn_cli/src/bin/main.rs | 52 ++++++++++--- sn_cli/src/bin/subcommands/mod.rs | 21 +++++- sn_faucet/Cargo.toml | 1 + sn_faucet/src/main.rs | 99 +++++++++++++++++------- sn_node/Cargo.toml | 7 +- sn_node/src/bin/safenode/main.rs | 61 +++++++++++++-- sn_node_manager/Cargo.toml | 2 + sn_node_manager/src/bin/cli/main.rs | 100 +++++++++++++++++-------- sn_node_manager/src/bin/daemon/main.rs | 45 +++++++++-- sn_node_manager/src/helpers.rs | 33 +++++--- sn_node_manager/src/local.rs | 9 ++- sn_node_rpc_client/Cargo.toml | 4 + sn_node_rpc_client/src/main.rs | 42 +++++++++-- 25 files changed, 649 insertions(+), 137 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f296410140..1b7d8dab75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4621,7 +4621,9 @@ dependencies = [ "color-eyre", "futures", "libp2p", + "sn_build_info", "sn_networking", + "sn_protocol", "tokio", "tracing", "tracing-log 0.2.0", @@ -4758,7 +4760,9 @@ dependencies = [ "signal-hook", "sn-node-manager", "sn-releases", + "sn_build_info", "sn_peers_acquisition", + "sn_protocol", "sn_service_management", "strip-ansi-escapes", "strum", @@ -6986,6 +6990,7 @@ dependencies = [ "serde_json", "service-manager", "sn-releases", + "sn_build_info", "sn_logging", "sn_peers_acquisition", "sn_protocol", @@ -7034,9 +7039,11 @@ dependencies = [ "lazy_static", "serde", "serde_json", + "sn_build_info", "sn_client", "sn_logging", "sn_peers_acquisition", + "sn_protocol", "tiny_http", "tokio", "tracing", @@ -7059,6 +7066,8 @@ dependencies = [ name = "sn_build_info" version = "0.1.13" dependencies = [ + "chrono", + "tracing", "vergen", ] @@ -7368,6 +7377,7 @@ dependencies = [ "hex 0.4.3", "libp2p", "libp2p-identity", + "sn_build_info", "sn_client", "sn_logging", "sn_node", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 21e67b55e8..47f925dd70 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -13,6 +13,9 @@ version = "0.2.5" name = "nat-detection" path = "src/main.rs" +[features] +nightly = [] + [dependencies] clap = { version = "4.5.4", features = ["derive"] } clap-verbosity-flag = "2.2.0" @@ -28,7 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } +sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_networking = { path = "../sn_networking", version = "0.18.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.9" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/nat-detection/src/main.rs b/nat-detection/src/main.rs index 645b181266..fccbe3ea4c 100644 --- a/nat-detection/src/main.rs +++ b/nat-detection/src/main.rs @@ -35,7 +35,7 @@ const RETRY_INTERVAL: Duration = Duration::from_secs(10); /// - 11: Public under UPnP /// - 12: Private or Unknown NAT #[derive(Debug, Parser)] -#[clap(version, author, verbatim_doc_comment)] +#[clap(disable_version_flag = true)] struct Opt { /// Port to listen on. /// @@ -60,15 +60,50 @@ struct Opt { #[command(flatten)] verbose: clap_verbosity_flag::Verbosity, + + /// Print the crate version + #[clap(long)] + crate_version: bool, + + /// Print the package version + #[clap(long)] + #[cfg(not(feature = "nightly"))] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[tokio::main] async fn main() -> Result<()> { color_eyre::install()?; - // Process command line arguments. let opt = Opt::parse(); + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi NAT Detection", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let registry = tracing_subscriber::registry().with(tracing_subscriber::fmt::layer()); // Use `RUST_LOG` if set, else use the verbosity flag (where `-vvvv` is trace level). let _ = if std::env::var_os("RUST_LOG").is_some() { diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 7f2f493f51..f1b006bd67 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -14,6 +14,9 @@ build = "build.rs" name = "node-launchpad" path = "src/bin/tui/main.rs" +[features] +nightly = [] + [dependencies] atty = "0.2.14" better-panic = "0.3.0" @@ -48,8 +51,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" +sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn-node-manager = { version = "0.10.4", path = "../sn_node_manager" } sn_peers_acquisition = { version = "0.5.1", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn-releases = "~0.2.6" sn_service_management = { version = "0.3.12", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" diff --git a/node-launchpad/src/bin/tui/main.rs b/node-launchpad/src/bin/tui/main.rs index 2ceb235900..d3074018af 100644 --- a/node-launchpad/src/bin/tui/main.rs +++ b/node-launchpad/src/bin/tui/main.rs @@ -16,7 +16,7 @@ use color_eyre::eyre::Result; use node_launchpad::{ app::App, config::configure_winsw, - utils::{initialize_logging, initialize_panic_handler, version}, + utils::{initialize_logging, initialize_panic_handler}, }; #[cfg(target_os = "windows")] use sn_node_manager::config::is_running_as_root; @@ -25,7 +25,7 @@ use std::{env, path::PathBuf}; use tokio::task::LocalSet; #[derive(Parser, Debug)] -#[command(author, version = version(), about)] +#[command(disable_version_flag = true)] pub struct Cli { #[arg( short, @@ -53,12 +53,48 @@ pub struct Cli { #[command(flatten)] pub(crate) peers: PeersArgs, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the package version. + #[clap(long)] + #[cfg(not(feature = "nightly"))] + package_version: bool, + + /// Print the version. + #[clap(long)] + version: bool, } async fn tokio_main() -> Result<()> { initialize_panic_handler()?; let args = Cli::parse(); + if args.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node Launchpad", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + info!("Starting app with args: {args:?}"); let mut app = App::new( args.tick_rate, diff --git a/node-launchpad/src/utils.rs b/node-launchpad/src/utils.rs index ffb997246c..02b6b72fa1 100644 --- a/node-launchpad/src/utils.rs +++ b/node-launchpad/src/utils.rs @@ -14,15 +14,6 @@ use tracing_subscriber::{ self, prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, Layer, }; -const VERSION_MESSAGE: &str = concat!( - env!("CARGO_PKG_VERSION"), - "-", - env!("VERGEN_GIT_DESCRIBE"), - " (", - env!("VERGEN_BUILD_DATE"), - ")" -); - pub fn initialize_panic_handler() -> Result<()> { let (panic_hook, eyre_hook) = color_eyre::config::HookBuilder::default() .panic_section(format!( @@ -132,18 +123,3 @@ macro_rules! trace_dbg { trace_dbg!(level: tracing::Level::DEBUG, $ex) }; } - -pub fn version() -> String { - let author = clap::crate_authors!(); - - let data_dir_path = get_launchpad_data_dir_path().unwrap().display().to_string(); - - format!( - "\ -{VERSION_MESSAGE} - -Authors: {author} - -Data directory: {data_dir_path}" - ) -} diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index cf78b8877d..f396596218 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -16,6 +16,7 @@ local-discovery = [ "sn_peers_acquisition/local-discovery", ] network-contacts = ["sn_peers_acquisition/network-contacts"] +nightly = [] open-metrics = ["sn_client/open-metrics"] websockets = ["sn_client/websockets"] svg-dag = ["graphviz-rust", "dag-collection"] @@ -31,9 +32,11 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" +sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_client = { path = "../sn_client", version = "0.110.1" } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.9" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs index 2faf8551e1..1cbdaf2f58 100644 --- a/sn_auditor/src/main.rs +++ b/sn_auditor/src/main.rs @@ -19,6 +19,7 @@ use dag_db::SpendDagDb; use sn_client::Client; use sn_logging::{Level, LogBuilder, LogFormat, LogOutputDest}; use sn_peers_acquisition::PeersArgs; +use sn_protocol::version::IDENTIFY_PROTOCOL_STR; use std::collections::BTreeSet; use std::path::PathBuf; use tiny_http::{Response, Server}; @@ -27,7 +28,7 @@ use tiny_http::{Response, Server}; const BETA_REWARDS_BACKUP_INTERVAL_SECS: u64 = 20 * 60; #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] struct Opt { #[command(flatten)] peers: PeersArgs, @@ -70,14 +71,59 @@ struct Opt { /// discord usernames of the beta participants #[clap(short = 'k', long, value_name = "hex_secret_key")] beta_encryption_key: Option, + + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, + + /// Print the network protocol version. + #[clap(long)] + pub protocol_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[tokio::main] async fn main() -> Result<()> { let opt = Opt::parse(); + + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Auditor", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR) + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + + if opt.protocol_version { + println!("{}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + let log_builder = logging_init(opt.log_output_dest, opt.log_format)?; let _log_handles = log_builder.initialize()?; - let beta_participants = load_and_update_beta_participants(opt.beta_participants)?; let maybe_sk = if let Some(sk_str) = opt.beta_encryption_key { diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index a14eed90dd..51142a3f5d 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -9,9 +9,17 @@ name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" version = "0.1.13" +build = "build.rs" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } +[features] +nightly = [] + [lints] workspace = true + +[dependencies] +chrono = "0.4" +tracing = { version = "~0.1.26" } diff --git a/sn_build_info/build.rs b/sn_build_info/build.rs index 392c55da4e..7ca807729d 100644 --- a/sn_build_info/build.rs +++ b/sn_build_info/build.rs @@ -5,6 +5,8 @@ // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use std::fs; +use std::path::Path; use vergen::EmitBuilder; fn main() -> Result<(), Box> { @@ -18,5 +20,47 @@ fn main() -> Result<(), Box> { .git_describe(true, false, None) .emit()?; + let release_info_path = Path::new("../release-cycle-info"); + let contents = + fs::read_to_string(release_info_path).expect("Failed to read release-cycle-info"); + + let mut year = String::new(); + let mut month = String::new(); + let mut cycle = String::new(); + let mut counter = String::new(); + + for line in contents.lines() { + if line.starts_with("release-year:") { + year = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-month:") { + month = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-cycle:") { + cycle = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } else if line.starts_with("release-cycle-counter:") { + counter = line + .split(':') + .nth(1) + .map(|s| s.trim().to_string()) + .unwrap_or_default(); + } + } + + println!("cargo:rustc-env=RELEASE_YEAR={year}"); + println!("cargo:rustc-env=RELEASE_MONTH={month}"); + println!("cargo:rustc-env=RELEASE_CYCLE={cycle}"); + println!("cargo:rustc-env=RELEASE_CYCLE_COUNTER={counter}"); + Ok(()) } diff --git a/sn_build_info/src/lib.rs b/sn_build_info/src/lib.rs index 6b858254ac..1e270f2a73 100644 --- a/sn_build_info/src/lib.rs +++ b/sn_build_info/src/lib.rs @@ -6,14 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use chrono::Utc; +use tracing::debug; + /// Git information separated by slashes: ` / / ` pub const fn git_info() -> &'static str { concat!( - env!("VERGEN_GIT_SHA"), - " / ", env!("VERGEN_GIT_BRANCH"), " / ", - env!("VERGEN_GIT_DESCRIBE"), + env!("VERGEN_GIT_SHA"), " / ", env!("VERGEN_BUILD_DATE") ) @@ -33,3 +34,77 @@ pub const fn git_branch() -> &'static str { pub const fn git_sha() -> &'static str { env!("VERGEN_GIT_SHA") } + +/// Nightly version format: YYYY.MM.DD +pub fn nightly_version() -> String { + let now = Utc::now(); + now.format("%Y.%m.%d").to_string() +} + +/// Git information for nightly builds: ` / / ` +pub fn nightly_git_info() -> String { + format!("{} / {} / {}", nightly_version(), git_branch(), git_sha(),) +} + +pub fn package_version() -> String { + format!( + "{}.{}.{}.{}", + env!("RELEASE_YEAR"), + env!("RELEASE_MONTH"), + env!("RELEASE_CYCLE"), + env!("RELEASE_CYCLE_COUNTER") + ) +} + +pub fn full_version_info( + app_name: &str, + crate_version: &str, + protocol_version: Option<&str>, +) -> String { + let mut info = format!("{app_name} v{crate_version}"); + + if let Some(version) = protocol_version { + info.push_str(&format!("\nNetwork version: {version}")); + } + + info.push_str(&format!( + "\nPackage version: {}\nGit info: {}", + package_version(), + git_info() + )); + + info +} + +pub fn full_nightly_version_info(app_name: &str, protocol_version: Option<&str>) -> String { + let mut info = format!("{app_name} -- Nightly Release {}", nightly_version(),); + if let Some(version) = protocol_version { + info.push_str(&format!("\nNetwork version: {version}")); + } + info.push_str(&format!("\nGit info: {} / {}", git_branch(), git_sha(),)); + info +} + +pub fn version_string( + app_name: &str, + crate_version: &str, + protocol_version: Option<&str>, +) -> String { + if cfg!(feature = "nightly") { + full_nightly_version_info(app_name, protocol_version) + } else { + full_version_info(app_name, crate_version, protocol_version) + } +} + +pub fn log_version_info(crate_version: &str, protocol_version: &str) { + if cfg!(feature = "nightly") { + debug!("nightly build info: {}", nightly_git_info()); + debug!("network version: {protocol_version}"); + } else { + debug!("version: {crate_version}"); + debug!("network version: {protocol_version}"); + debug!("package version: {}", package_version()); + debug!("git info: {}", git_info()); + } +} diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index acdaccc9c8..66d29270c8 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -27,6 +27,7 @@ local-discovery = [ ] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] +nightly = [] open-metrics = ["sn_client/open-metrics"] [dependencies] diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs index 0ac03d458b..d4c8cac1d0 100644 --- a/sn_cli/src/bin/main.rs +++ b/sn_cli/src/bin/main.rs @@ -30,6 +30,7 @@ use sn_client::transfers::bls_secret_from_hex; use sn_client::{Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}; #[cfg(feature = "metrics")] use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; +use sn_protocol::version::IDENTIFY_PROTOCOL_STR; use std::{io, path::PathBuf, time::Duration}; use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; @@ -39,6 +40,35 @@ const CLIENT_KEY: &str = "clientkey"; async fn main() -> Result<()> { color_eyre::install()?; let opt = Opt::parse(); + + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi CLI", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR) + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + if opt.protocol_version { + println!("{}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + let logging_targets = vec![ // TODO: Reset to nice and clean defaults once we have a better idea of what we want ("sn_networking".to_string(), Level::INFO), @@ -74,7 +104,7 @@ async fn main() -> Result<()> { let client_data_dir_path = get_client_data_dir_path()?; // Perform actions that do not require us connecting to the network and return early - if let SubCmd::Wallet(cmds) = &opt.cmd { + if let Some(SubCmd::Wallet(cmds)) = &opt.cmd { if let WalletCmds::Address { .. } | WalletCmds::Balance { .. } | WalletCmds::Create { .. } @@ -87,7 +117,7 @@ async fn main() -> Result<()> { } } - if let SubCmd::WatchOnlyWallet(cmds) = &opt.cmd { + if let Some(SubCmd::WatchOnlyWallet(cmds)) = &opt.cmd { if let WatchOnlyWalletCmds::Addresses | WatchOnlyWalletCmds::Balance { .. } | WatchOnlyWalletCmds::Deposit { .. } @@ -138,30 +168,32 @@ async fn main() -> Result<()> { }; progress_bar_handler.await?; - // default to verifying storage let should_verify_store = !opt.no_verify; // PowerShell seems having issue to showing the unwrapped error // Hence capture the result and print it out explicity. - let cmd_str = format!("{:?}", opt.cmd); let result = match opt.cmd { - SubCmd::Wallet(cmds) => { + Some(SubCmd::Wallet(cmds)) => { wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await } - SubCmd::WatchOnlyWallet(cmds) => { + Some(SubCmd::WatchOnlyWallet(cmds)) => { wo_wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await } - SubCmd::Files(cmds) => { + Some(SubCmd::Files(cmds)) => { files_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await } - SubCmd::Folders(cmds) => { + Some(SubCmd::Folders(cmds)) => { folders_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await } - SubCmd::Register(cmds) => { + Some(SubCmd::Register(cmds)) => { register_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await } + None => { + println!("Use --help to see available commands"); + return Ok(()); + } }; - println!("Completed with {result:?} of execute {cmd_str:?}"); + println!("Completed with {result:?}"); Ok(()) } diff --git a/sn_cli/src/bin/subcommands/mod.rs b/sn_cli/src/bin/subcommands/mod.rs index 7a7ba11cad..575e90b3d3 100644 --- a/sn_cli/src/bin/subcommands/mod.rs +++ b/sn_cli/src/bin/subcommands/mod.rs @@ -21,7 +21,7 @@ use std::time::Duration; // Please do not remove the blank lines in these doc comments. // They are used for inserting line breaks when the help menu is rendered in the UI. #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] pub(crate) struct Opt { /// Specify the logging output destination. /// @@ -49,7 +49,7 @@ pub(crate) struct Opt { /// Available sub commands. #[clap(subcommand)] - pub cmd: SubCmd, + pub cmd: Option, /// The maximum duration to wait for a connection to the network before timing out. #[clap(long = "timeout", global = true, value_parser = |t: &str| -> Result { Ok(t.parse().map(Duration::from_secs)?) })] @@ -60,6 +60,23 @@ pub(crate) struct Opt { /// This may increase operation speed, but offers no guarantees that operations were successful. #[clap(global = true, long = "no-verify", short = 'x')] pub no_verify: bool, + + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, + + /// Print the network protocol version. + #[clap(long)] + pub protocol_version: bool, + + /// Print the package version. + #[clap(long)] + #[cfg(not(feature = "nightly"))] + pub package_version: bool, + + /// Print version information. + #[clap(long)] + pub version: bool, } #[derive(Subcommand, Debug)] diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 3a203b3d8d..a8348c2aac 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -15,6 +15,7 @@ default = ["gifting"] distribution = ["base64", "bitcoin", "minreq"] gifting = [] initial-data = ["reqwest", "futures"] +nightly = [] [[bin]] path = "src/main.rs" diff --git a/sn_faucet/src/main.rs b/sn_faucet/src/main.rs index 833178a8f9..e01aecf426 100644 --- a/sn_faucet/src/main.rs +++ b/sn_faucet/src/main.rs @@ -22,15 +22,44 @@ use sn_client::{ }; use sn_logging::{Level, LogBuilder, LogOutputDest}; use sn_peers_acquisition::PeersArgs; +use sn_protocol::version::IDENTIFY_PROTOCOL_STR; use sn_transfers::{get_faucet_data_dir, HotWallet, MainPubkey, NanoTokens, Transfer}; use std::{path::PathBuf, time::Duration}; use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; -use tracing::{debug, error, info}; +use tracing::{error, info}; #[tokio::main] async fn main() -> Result<()> { let opt = Opt::parse(); + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Test Faucet", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR.to_string()) + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + if opt.protocol_version { + println!("Network version: {}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let bootstrap_peers = opt.peers.get_peers().await?; let bootstrap_peers = if bootstrap_peers.is_empty() { // empty vec is returned if `local-discovery` flag is provided @@ -57,14 +86,8 @@ async fn main() -> Result<()> { log_builder.output_dest(opt.log_output_dest); let _log_handles = log_builder.initialize()?; - debug!( - "faucet built with git version: {}", - sn_build_info::git_info() - ); - println!( - "faucet built with git version: {}", - sn_build_info::git_info() - ); + sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); + info!("Instantiating a SAFE Test Faucet..."); let secret_key = bls::SecretKey::random(); @@ -147,7 +170,7 @@ fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, } #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] struct Opt { /// Specify the logging output destination. /// @@ -167,7 +190,24 @@ struct Opt { /// Available sub commands. #[clap(subcommand)] - pub cmd: SubCmd, + pub cmd: Option, + + /// Print the crate version + #[clap(long)] + crate_version: bool, + + /// Print the protocol version + #[clap(long)] + protocol_version: bool, + + /// Print the package version + #[cfg(not(feature = "nightly"))] + #[clap(long)] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[derive(Subcommand, Debug, Clone)] @@ -198,22 +238,29 @@ enum SubCmd { RestartServer, } -async fn faucet_cmds(cmds: SubCmd, client: &Client, funded_wallet: HotWallet) -> Result<()> { - match cmds { - SubCmd::ClaimGenesis => { - claim_genesis(client, funded_wallet).await?; - } - SubCmd::Send { amount, to } => { - send_tokens(client, funded_wallet, &amount, &to).await?; - } - SubCmd::Server => { - // shouldn't return except on error - run_faucet_server(client).await?; - } - SubCmd::RestartServer => { - // shouldn't return except on error - restart_faucet_server(client).await?; +async fn faucet_cmds( + cmds: Option, + client: &Client, + funded_wallet: HotWallet, +) -> Result<()> { + if let Some(cmds) = cmds { + match cmds { + SubCmd::ClaimGenesis => { + claim_genesis(client, funded_wallet).await?; + } + SubCmd::Send { amount, to } => { + send_tokens(client, funded_wallet, &amount, &to).await?; + } + SubCmd::Server => { + run_faucet_server(client).await?; + } + SubCmd::RestartServer => { + restart_faucet_server(client).await?; + } } + } else { + // Handle the case when no subcommand is provided + println!("No subcommand provided. Use --help for more information."); } Ok(()) } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index b5bf576e7c..99c6d3f273 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -15,14 +15,15 @@ path = "src/bin/safenode/main.rs" [features] default = ["metrics", "upnp", "reward-forward", "open-metrics"] +encrypt-records = ["sn_networking/encrypt-records"] local-discovery = ["sn_networking/local-discovery"] -otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] +nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -encrypt-records = ["sn_networking/encrypt-records"] -upnp = ["sn_networking/upnp"] +otlp = ["sn_logging/otlp"] reward-forward = ["sn_transfers/reward-forward"] +upnp = ["sn_networking/upnp"] [dependencies] assert_fs = "1.0.0" diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index cf30e04c65..c503504528 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -11,15 +11,17 @@ extern crate tracing; mod rpc_service; -use clap::Parser; -use eyre::{eyre, Result}; +use clap::{command, Parser}; +use color_eyre::{eyre::eyre, Result}; use libp2p::{identity::Keypair, PeerId}; #[cfg(feature = "metrics")] use sn_logging::metrics::init_metrics; use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; use sn_node::{Marker, NodeBuilder, NodeEvent, NodeEventsReceiver}; use sn_peers_acquisition::PeersArgs; -use sn_protocol::{node::get_safenode_root_dir, node_rpc::NodeCtrl}; +use sn_protocol::{ + node::get_safenode_root_dir, node_rpc::NodeCtrl, version::IDENTIFY_PROTOCOL_STR, +}; use std::{ env, io::Write, @@ -65,7 +67,7 @@ pub fn parse_log_output(val: &str) -> Result { // Please do not remove the blank lines in these doc comments. // They are used for inserting line breaks when the help menu is rendered in the UI. #[derive(Parser, Debug)] -#[clap(name = "safenode cli", version = env!("CARGO_PKG_VERSION"))] +#[command(disable_version_flag = true)] struct Opt { /// Specify whether the node is operating from a home network and situated behind a NAT without port forwarding /// capabilities. Setting this to true, activates hole-punching to facilitate direct connections from other nodes. @@ -177,12 +179,57 @@ struct Opt { required_if_eq("metrics_server_port", "0") )] enable_metrics_server: bool, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the network protocol version. + #[clap(long)] + protocol_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } fn main() -> Result<()> { color_eyre::install()?; let opt = Opt::parse(); + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node", + env!("CARGO_PKG_VERSION"), + Some(&IDENTIFY_PROTOCOL_STR) + ) + ); + return Ok(()); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + if opt.protocol_version { + println!("Network version: {}", *IDENTIFY_PROTOCOL_STR); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let node_socket_addr = SocketAddr::new(opt.ip, opt.port); let (root_dir, keypair) = get_root_dir_and_keypair(&opt.root_dir)?; @@ -197,10 +244,8 @@ fn main() -> Result<()> { env!("CARGO_PKG_VERSION") ); info!("\n{}\n{}", msg, "=".repeat(msg.len())); - debug!( - "safenode built with git version: {}", - sn_build_info::git_info() - ); + + sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); info!("Node started with initial_peers {bootstrap_peers:?}"); diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 3e415a0d9c..6dfd50bd04 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -22,6 +22,7 @@ chaos = [] default = ["quic"] local-discovery = [] network-contacts = [] +nightly = [] open-metrics = [] otlp = [] quic = [] @@ -44,6 +45,7 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" +sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } sn_protocol = { path = "../sn_protocol", version = "0.17.9" } diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 7e89275279..b827e3f6a4 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -22,22 +22,35 @@ use tracing::Level; const DEFAULT_NODE_COUNT: u16 = 25; #[derive(Parser)] -#[command(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] pub(crate) struct Cmd { /// Available sub commands. #[clap(subcommand)] - pub cmd: SubCmd, + pub cmd: Option, - #[clap(short, long, action = clap::ArgAction::Count, default_value_t = 2)] - verbose: u8, + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, /// Output debug-level logging to stderr. #[clap(long, conflicts_with = "trace")] debug: bool, + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + /// Output trace-level logging to stderr. #[clap(long, conflicts_with = "debug")] trace: bool, + + #[clap(short, long, action = clap::ArgAction::Count, default_value_t = 2)] + verbose: u8, + + /// Print version information. + #[clap(long)] + version: bool, } #[derive(Subcommand, Debug)] @@ -997,6 +1010,26 @@ pub enum LocalSubCmd { async fn main() -> Result<()> { color_eyre::install()?; let args = Cmd::parse(); + + if args.version { + println!( + "{}", + sn_build_info::version_string("Autonomi Node Manager", env!("CARGO_PKG_VERSION"), None) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + let verbosity = VerbosityLevel::from(args.verbose); let _log_handle = if args.debug || args.trace { @@ -1015,7 +1048,7 @@ async fn main() -> Result<()> { tracing::info!("Executing cmd: {:?}", args.cmd); match args.cmd { - SubCmd::Add { + Some(SubCmd::Add { auto_restart, auto_set_nat_flags, count, @@ -1038,7 +1071,7 @@ async fn main() -> Result<()> { upnp, user, version, - } => { + }) => { let _ = cmd::node::add( auto_restart, auto_set_nat_flags, @@ -1067,7 +1100,7 @@ async fn main() -> Result<()> { .await?; Ok(()) } - SubCmd::Auditor(AuditorSubCmd::Add { + Some(SubCmd::Auditor(AuditorSubCmd::Add { beta_encryption_key, env_variables, log_dir_path, @@ -1075,7 +1108,7 @@ async fn main() -> Result<()> { peers, url, version, - }) => { + })) => { cmd::auditor::add( beta_encryption_key, env_variables, @@ -1088,32 +1121,32 @@ async fn main() -> Result<()> { ) .await } - SubCmd::Auditor(AuditorSubCmd::Start {}) => cmd::auditor::start(verbosity).await, - SubCmd::Auditor(AuditorSubCmd::Stop {}) => cmd::auditor::stop(verbosity).await, - SubCmd::Auditor(AuditorSubCmd::Upgrade { + Some(SubCmd::Auditor(AuditorSubCmd::Start {})) => cmd::auditor::start(verbosity).await, + Some(SubCmd::Auditor(AuditorSubCmd::Stop {})) => cmd::auditor::stop(verbosity).await, + Some(SubCmd::Auditor(AuditorSubCmd::Upgrade { do_not_start, force, env_variables, url, version, - }) => { + })) => { cmd::auditor::upgrade(do_not_start, force, env_variables, url, version, verbosity).await } - SubCmd::Balance { + Some(SubCmd::Balance { peer_id: peer_ids, service_name: service_names, - } => cmd::node::balance(peer_ids, service_names, verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Add { + }) => cmd::node::balance(peer_ids, service_names, verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Add { address, env_variables, port, path, url, version, - }) => cmd::daemon::add(address, env_variables, port, path, url, version, verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Start {}) => cmd::daemon::start(verbosity).await, - SubCmd::Daemon(DaemonSubCmd::Stop {}) => cmd::daemon::stop(verbosity).await, - SubCmd::Faucet(faucet_command) => match faucet_command { + })) => cmd::daemon::add(address, env_variables, port, path, url, version, verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Start {})) => cmd::daemon::start(verbosity).await, + Some(SubCmd::Daemon(DaemonSubCmd::Stop {})) => cmd::daemon::stop(verbosity).await, + Some(SubCmd::Faucet(faucet_command)) => match faucet_command { FaucetSubCmd::Add { env_variables, log_dir_path, @@ -1153,7 +1186,7 @@ async fn main() -> Result<()> { .await } }, - SubCmd::Local(local_command) => match local_command { + Some(SubCmd::Local(local_command)) => match local_command { LocalSubCmd::Join { build, count, @@ -1239,27 +1272,27 @@ async fn main() -> Result<()> { json, } => cmd::local::status(details, fail, json).await, }, - SubCmd::NatDetection(NatDetectionSubCmd::Run { + Some(SubCmd::NatDetection(NatDetectionSubCmd::Run { path, servers, url, version, - }) => { + })) => { cmd::nat_detection::run_nat_detection(servers, true, path, url, version, verbosity) .await } - SubCmd::Remove { + Some(SubCmd::Remove { keep_directories, peer_id: peer_ids, service_name: service_names, - } => cmd::node::remove(keep_directories, peer_ids, service_names, verbosity).await, - SubCmd::Reset { force } => cmd::node::reset(force, verbosity).await, - SubCmd::Start { + }) => cmd::node::remove(keep_directories, peer_ids, service_names, verbosity).await, + Some(SubCmd::Reset { force }) => cmd::node::reset(force, verbosity).await, + Some(SubCmd::Start { connection_timeout, interval, peer_id: peer_ids, service_name: service_names, - } => { + }) => { cmd::node::start( connection_timeout, interval, @@ -1269,16 +1302,16 @@ async fn main() -> Result<()> { ) .await } - SubCmd::Status { + Some(SubCmd::Status { details, fail, json, - } => cmd::node::status(details, fail, json).await, - SubCmd::Stop { + }) => cmd::node::status(details, fail, json).await, + Some(SubCmd::Stop { peer_id: peer_ids, service_name: service_names, - } => cmd::node::stop(peer_ids, service_names, verbosity).await, - SubCmd::Upgrade { + }) => cmd::node::stop(peer_ids, service_names, verbosity).await, + Some(SubCmd::Upgrade { connection_timeout, do_not_start, force, @@ -1289,7 +1322,7 @@ async fn main() -> Result<()> { env_variables: provided_env_variable, url, version, - } => { + }) => { cmd::node::upgrade( connection_timeout, do_not_start, @@ -1305,6 +1338,7 @@ async fn main() -> Result<()> { ) .await } + None => Ok(()), } } diff --git a/sn_node_manager/src/bin/daemon/main.rs b/sn_node_manager/src/bin/daemon/main.rs index 99925943be..5de75e2904 100644 --- a/sn_node_manager/src/bin/daemon/main.rs +++ b/sn_node_manager/src/bin/daemon/main.rs @@ -27,16 +27,26 @@ use tonic::{transport::Server, Code, Request, Response, Status}; use tracing::Level; #[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] +#[command(disable_version_flag = true)] struct Args { - /// Specify a port for the daemon to listen for RPCs. It defaults to 12500 if not set. - #[clap(long, default_value_t = DAEMON_DEFAULT_PORT)] - port: u16, /// Specify an Ipv4Addr for the daemon to listen on. This is useful if you want to manage the nodes remotely. /// /// If not set, the daemon listens locally for commands. #[clap(long, default_value_t = Ipv4Addr::new(127, 0, 0, 1))] address: Ipv4Addr, + /// Print the crate version. + #[clap(long)] + pub crate_version: bool, + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + pub package_version: bool, + /// Specify a port for the daemon to listen for RPCs. It defaults to 12500 if not set. + #[clap(long, default_value_t = DAEMON_DEFAULT_PORT)] + port: u16, + /// Print version information. + #[clap(long)] + version: bool, } struct SafeNodeManagerDaemon {} @@ -128,12 +138,35 @@ impl SafeNodeManagerDaemon {} #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { + let args = Args::parse(); + + if args.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node Manager RPC Daemon", + env!("CARGO_PKG_VERSION"), + None + ) + ); + return Ok(()); + } + + if args.crate_version { + println!("{}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if args.package_version { + println!("{}", sn_build_info::package_version()); + return Ok(()); + } + let _log_handles = get_log_builder()?.initialize()?; println!("Starting safenodemand"); - let args = Args::parse(); let service = SafeNodeManagerDaemon {}; - // adding our service to our server. if let Err(err) = Server::builder() .add_service(SafeNodeManagerServer::new(service)) .serve(SocketAddr::new(IpAddr::V4(args.address), args.port)) diff --git a/sn_node_manager/src/helpers.rs b/sn_node_manager/src/helpers.rs index a841b54e6f..bd0ca2baae 100644 --- a/sn_node_manager/src/helpers.rs +++ b/sn_node_manager/src/helpers.rs @@ -276,7 +276,7 @@ pub async fn download_and_extract_release( } pub fn get_bin_version(bin_path: &PathBuf) -> Result { - trace!("Obtaining version of binary {bin_path:?}"); + debug!("Obtaining version of binary {bin_path:?}"); let mut cmd = Command::new(bin_path) .arg("--version") .stdout(Stdio::piped()) @@ -293,15 +293,28 @@ pub fn get_bin_version(bin_path: &PathBuf) -> Result { .read_to_string(&mut output) .inspect_err(|err| error!("Output contained non utf8 chars: {err:?}"))?; - let version = output - .split_whitespace() - .last() - .ok_or_else(|| { - error!("Failed to parse version"); - eyre!("Failed to parse version") - })? - .to_string(); - trace!("Obtained version of binary: {version}"); + // Extract the first line of the output + let first_line = output.lines().next().ok_or_else(|| { + error!("No output received from binary"); + eyre!("No output received from binary") + })?; + + let version = if let Some(v_pos) = first_line.find('v') { + // Stable binary: Extract version after 'v' + first_line[v_pos + 1..] + .split_whitespace() + .next() + .map(String::from) + } else { + // Nightly binary: Extract the date at the end of the first line + first_line.split_whitespace().last().map(String::from) + } + .ok_or_else(|| { + error!("Failed to parse version from output"); + eyre!("Failed to parse version from output") + })?; + + debug!("Obtained version of binary: {version}"); Ok(version) } diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ec3a7ae34e..58d650cf67 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -62,11 +62,18 @@ impl Launcher for LocalSafeLauncher { fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result { info!("Launching the faucet server..."); + debug!("Using genesis_multiaddr: {}", genesis_multiaddr.to_string()); let args = vec![ "--peer".to_string(), genesis_multiaddr.to_string(), "server".to_string(), ]; + + debug!( + "Using faucet binary: {}", + self.faucet_bin_path.to_string_lossy() + ); + debug!("Using args: {}", args.join(" ")); let child = Command::new(self.faucet_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -369,8 +376,8 @@ pub async fn run_network( if !options.join { println!("Launching the faucet server..."); - let pid = launcher.launch_faucet(&bootstrap_peers[0])?; let version = get_bin_version(&options.faucet_bin_path)?; + let pid = launcher.launch_faucet(&bootstrap_peers[0])?; let faucet = FaucetServiceData { faucet_path: options.faucet_bin_path, local: true, diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 5949d13ecd..055f1913b9 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -14,6 +14,9 @@ version = "0.6.29" name = "safenode_rpc_client" path = "src/main.rs" +[features] +nightly = [] + [dependencies] assert_fs = "1.0.0" async-trait = "0.1" @@ -23,6 +26,7 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } +sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_client = { path = "../sn_client", version = "0.110.1" } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_node = { path = "../sn_node", version = "0.111.2" } diff --git a/sn_node_rpc_client/src/main.rs b/sn_node_rpc_client/src/main.rs index 7d019bff95..7930a3b712 100644 --- a/sn_node_rpc_client/src/main.rs +++ b/sn_node_rpc_client/src/main.rs @@ -9,26 +9,35 @@ use clap::Parser; use color_eyre::eyre::Result; - use sn_logging::{Level, LogBuilder}; use sn_node::NodeEvent; - use sn_protocol::safenode_proto::{safe_node_client::SafeNodeClient, NodeEventsRequest}; - use sn_service_management::rpc::{RpcActions, RpcClient}; - use std::{net::SocketAddr, time::Duration}; use tokio_stream::StreamExt; use tonic::Request; #[derive(Parser, Debug)] -#[clap(version, name = "safenode RPC client")] +#[command(disable_version_flag = true)] struct Opt { /// Address of the node's RPC service, e.g. 127.0.0.1:12001. addr: SocketAddr, /// subcommands #[clap(subcommand)] cmd: Cmd, + + /// Print the crate version. + #[clap(long)] + crate_version: bool, + + /// Print the package version. + #[cfg(not(feature = "nightly"))] + #[clap(long)] + package_version: bool, + + /// Print version information. + #[clap(long)] + version: bool, } #[derive(Parser, Debug)] @@ -90,6 +99,29 @@ async fn main() -> Result<()> { let _log_appender_guard = LogBuilder::new(logging_targets).initialize()?; let opt = Opt::parse(); + + if opt.version { + println!( + "{}", + sn_build_info::version_string( + "Autonomi Node RPC Client", + env!("CARGO_PKG_VERSION"), + None + ) + ); + } + + if opt.crate_version { + println!("Crate version: {}", env!("CARGO_PKG_VERSION")); + return Ok(()); + } + + #[cfg(not(feature = "nightly"))] + if opt.package_version { + println!("Package version: {}", sn_build_info::package_version()); + return Ok(()); + } + let addr = opt.addr; match opt.cmd { From 73194e3b252417b43abb0daa2cf60238bd319193 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 24 Sep 2024 17:25:22 +0100 Subject: [PATCH 044/255] ci: nightly releases The nightly release will build the binary set from the `main` branch every night at midnight UTC. The workflow: * Builds the binaries with the `nightly` feature, which versions them using the current date, rather than a Semantic Version. * Uploads packaged binaries to S3, again with the date for the version. * Also uploads packaged binaries to S3 with 'nightly' as the version, which will always be the latest. The previous 'nightly' version will be deleted from the bucket. * Creates a Github Release and uploads archives with the binaries packaged by architecture, as per the stable release. To prevent our releases page being flooded, the previous nightly release will be deleted before the current one is created. --- .github/workflows/nightly-release.yml | 251 ++++++++++++++++++++++++++ Justfile | 109 ++++++++--- 2 files changed, 338 insertions(+), 22 deletions(-) create mode 100644 .github/workflows/nightly-release.yml diff --git a/.github/workflows/nightly-release.yml b/.github/workflows/nightly-release.yml new file mode 100644 index 0000000000..70db60d68e --- /dev/null +++ b/.github/workflows/nightly-release.yml @@ -0,0 +1,251 @@ +name: nightly release + +on: + schedule: + - cron: '0 0 * * *' # Run every night at midnight UTC + workflow_dispatch: # This also allows the workflow to be triggered manually + +env: + WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs + +jobs: + build: + if: ${{ github.repository_owner == 'maidsafe' }} + name: build + environment: stable + env: + FOUNDATION_PK: ${{ vars.FOUNDATION_PK }} + GENESIS_PK: ${{ vars.GENESIS_PK }} + GENESIS_SK: ${{ secrets.GENESIS_SK }} + NETWORK_ROYALTIES_PK: ${{ vars.NETWORK_ROYALTIES_PK }} + PAYMENT_FORWARD_PK: ${{ vars.PAYMENT_FORWARD_PK }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: windows-latest + target: x86_64-pc-windows-msvc + - os: macos-latest + target: x86_64-apple-darwin + - os: macos-latest + target: aarch64-apple-darwin + - os: ubuntu-latest + target: x86_64-unknown-linux-musl + - os: ubuntu-latest + target: arm-unknown-linux-musleabi + - os: ubuntu-latest + target: armv7-unknown-linux-musleabihf + - os: ubuntu-latest + target: aarch64-unknown-linux-musl + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: cargo-bins/cargo-binstall@main + - shell: bash + run: cargo binstall --no-confirm just + + - name: build nightly release artifacts + shell: bash + run: | + just build-release-artifacts "${{ matrix.target }}" "true" + + - uses: actions/upload-artifact@main + with: + name: safe_network-${{ matrix.target }} + path: | + artifacts + !artifacts/.cargo-lock + + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 + env: + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Release Failed" + + s3-release: + if: ${{ github.repository_owner == 'maidsafe' }} + name: s3 release + runs-on: ubuntu-latest + needs: [build] + env: + AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: eu-west-2 + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-pc-windows-msvc + path: artifacts/x86_64-pc-windows-msvc/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-unknown-linux-musl + path: artifacts/x86_64-unknown-linux-musl/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-apple-darwin + path: artifacts/x86_64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-apple-darwin + path: artifacts/aarch64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-arm-unknown-linux-musleabi + path: artifacts/arm-unknown-linux-musleabi/release + - uses: actions/download-artifact@master + with: + name: safe_network-armv7-unknown-linux-musleabihf + path: artifacts/armv7-unknown-linux-musleabihf/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-unknown-linux-musl + path: artifacts/aarch64-unknown-linux-musl/release + + - uses: cargo-bins/cargo-binstall@main + - name: install just + shell: bash + run: cargo binstall --no-confirm just + + - name: remove latest nightly release + shell: bash + run: | + just delete-s3-bin "faucet" "nightly" + just delete-s3-bin "nat-detection" "nightly" + just delete-s3-bin "node-launchpad" "nightly" + just delete-s3-bin "safe" "nightly" + just delete-s3-bin "safenode" "nightly" + just delete-s3-bin "safenode_rpc_client" "nightly" + just delete-s3-bin "safenode-manager" "nightly" + just delete-s3-bin "safenodemand" "nightly" + just delete-s3-bin "sn_auditor" "nightly" + + - name: upload binaries to S3 + shell: bash + run: | + version=$(date +"%Y.%m.%d") + just package-bin "faucet" "$version" + just package-bin "nat-detection" "$version" + just package-bin "node-launchpad" "$version" + just package-bin "safe" "$version" + just package-bin "safenode" "$version" + just package-bin "safenode_rpc_client" "$version" + just package-bin "safenode-manager" "$version" + just package-bin "safenodemand" "$version" + just package-bin "sn_auditor" "$version" + just upload-all-packaged-bins-to-s3 + + rm -rf packaged_bins + just package-bin "faucet" "nightly" + just package-bin "nat-detection" "nightly" + just package-bin "node-launchpad" "nightly" + just package-bin "safe" "nightly" + just package-bin "safenode" "nightly" + just package-bin "safenode_rpc_client" "nightly" + just package-bin "safenode-manager" "nightly" + just package-bin "safenodemand" "nightly" + just package-bin "sn_auditor" "nightly" + just upload-all-packaged-bins-to-s3 + + github-release: + if: ${{ github.repository_owner == 'maidsafe' }} + name: github release + runs-on: ubuntu-latest + needs: [s3-release] + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-pc-windows-msvc + path: artifacts/x86_64-pc-windows-msvc/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-unknown-linux-musl + path: artifacts/x86_64-unknown-linux-musl/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-apple-darwin + path: artifacts/x86_64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-apple-darwin + path: artifacts/aarch64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-arm-unknown-linux-musleabi + path: artifacts/arm-unknown-linux-musleabi/release + - uses: actions/download-artifact@master + with: + name: safe_network-armv7-unknown-linux-musleabihf + path: artifacts/armv7-unknown-linux-musleabihf/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-unknown-linux-musl + path: artifacts/aarch64-unknown-linux-musl/release + + - uses: cargo-bins/cargo-binstall@main + - name: install just + shell: bash + run: cargo binstall --no-confirm just + + - name: set package version + shell: bash + run: | + version=$(date +"%Y.%m.%d") + echo "PACKAGE_VERSION=$version" >> $GITHUB_ENV + + - name: package release artifacts + shell: bash + run: just package-all-architectures + + - name: delete existing nightly release + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + run: | + releases=$(gh api repos/${{ github.repository }}/releases --paginate) + echo "$releases" | jq -c '.[]' | while read release; do + tag_name=$(echo $release | jq -r '.tag_name') + release_id=$(echo $release | jq -r '.id') + + if [[ $tag_name == nightly* ]]; then + echo "deleting nightly release $tag_name" + gh api -X DELETE repos/${{ github.repository }}/releases/$release_id + exit 0 + fi + done + + - name: create new nightly release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + with: + tag_name: nightly-${{ env.PACKAGE_VERSION }} + release_name: "${{ env.PACKAGE_VERSION }} Nightly Release" + body: | + Nightly release of the Autonomi binary set, built from the `main` branch. + + These binaries should be compatible with the stable network, but they should be considered experimental. + + For the most reliable experience, prefer the latest stable release. + draft: false + prerelease: true + + - name: upload artifacts as assets + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + shell: bash + run: | + ( + cd packaged_architectures + ls | xargs gh release upload nightly-${{ env.PACKAGE_VERSION }} + ) + + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 + env: + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Nightly Release Failed" \ No newline at end of file diff --git a/Justfile b/Justfile index 8e260b9804..693929fcaf 100644 --- a/Justfile +++ b/Justfile @@ -65,11 +65,12 @@ kill-testbed: doctl compute droplet delete $droplet_id fi -build-release-artifacts arch: +build-release-artifacts arch nightly="false": #!/usr/bin/env bash set -e arch="{{arch}}" + nightly="{{nightly}}" supported_archs=( "x86_64-pc-windows-msvc" "x86_64-apple-darwin" @@ -107,9 +108,9 @@ build-release-artifacts arch: mkdir artifacts cargo clean - echo "===============" - echo "= Public Keys =" - echo "===============" + echo "================" + echo "= Network Keys =" + echo "================" echo "FOUNDATION_PK: $FOUNDATION_PK" echo "GENESIS_PK: $GENESIS_PK" echo "NETWORK_ROYALTIES_PK: $NETWORK_ROYALTIES_PK" @@ -118,28 +119,33 @@ build-release-artifacts arch: cross_container_opts="--env \"GENESIS_PK=$GENESIS_PK\" --env \"GENESIS_SK=$GENESIS_SK\" --env \"FOUNDATION_PK=$FOUNDATION_PK\" --env \"NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK\" --env \"PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK\"" export CROSS_CONTAINER_OPTS=$cross_container_opts + nightly_feature="" + if [[ "$nightly" == "true" ]]; then + nightly_feature="--features nightly" + fi + if [[ $arch == arm* || $arch == armv7* || $arch == aarch64* ]]; then echo "Passing to cross CROSS_CONTAINER_OPTS=$CROSS_CONTAINER_OPTS" cargo binstall --no-confirm cross - cross build --release --target $arch --bin faucet --features=distribution - cross build --release --target $arch --bin nat-detection - cross build --release --target $arch --bin node-launchpad - cross build --release --features="network-contacts,distribution" --target $arch --bin safe - cross build --release --features=network-contacts --target $arch --bin safenode - cross build --release --target $arch --bin safenode-manager - cross build --release --target $arch --bin safenodemand - cross build --release --target $arch --bin safenode_rpc_client - cross build --release --target $arch --bin sn_auditor + cross build --release --target $arch --bin faucet --features=distribution $nightly_feature + cross build --release --target $arch --bin nat-detection $nightly_feature + cross build --release --target $arch --bin node-launchpad $nightly_feature + cross build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature + cross build --release --features=network-contacts --target $arch --bin safenode $nightly_feature + cross build --release --target $arch --bin safenode-manager $nightly_feature + cross build --release --target $arch --bin safenodemand $nightly_feature + cross build --release --target $arch --bin safenode_rpc_client $nightly_feature + cross build --release --target $arch --bin sn_auditor $nightly_feature else - cargo build --release --target $arch --bin faucet --features=distribution - cargo build --release --target $arch --bin nat-detection - cargo build --release --target $arch --bin node-launchpad - cargo build --release --features="network-contacts,distribution" --target $arch --bin safe - cargo build --release --features=network-contacts --target $arch --bin safenode - cargo build --release --target $arch --bin safenode-manager - cargo build --release --target $arch --bin safenodemand - cargo build --release --target $arch --bin safenode_rpc_client - cargo build --release --target $arch --bin sn_auditor + cargo build --release --target $arch --bin faucet --features=distribution $nightly_feature + cargo build --release --target $arch --bin nat-detection $nightly_feature + cargo build --release --target $arch --bin node-launchpad $nightly_feature + cargo build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature + cargo build --release --features=network-contacts --target $arch --bin safenode $nightly_feature + cargo build --release --target $arch --bin safenode-manager $nightly_feature + cargo build --release --target $arch --bin safenodemand $nightly_feature + cargo build --release --target $arch --bin safenode_rpc_client $nightly_feature + cargo build --release --target $arch --bin sn_auditor $nightly_feature fi find target/$arch/release -maxdepth 1 -type f -exec cp '{}' artifacts \; @@ -347,6 +353,65 @@ upload-packaged-bin-to-s3 bin_name: fi done +delete-s3-bin bin_name version: + #!/usr/bin/env bash + set -e + + case "{{bin_name}}" in + faucet) + bucket="sn-faucet" + ;; + nat-detection) + bucket="nat-detection" + ;; + node-launchpad) + bucket="node-launchpad" + ;; + safe) + bucket="sn-cli" + ;; + safenode) + bucket="sn-node" + ;; + safenode-manager) + bucket="sn-node-manager" + ;; + safenodemand) + bucket="sn-node-manager" + ;; + safenode_rpc_client) + bucket="sn-node-rpc-client" + ;; + sn_auditor) + bucket="sn-auditor" + ;; + *) + echo "The {{bin_name}} binary is not supported" + exit 1 + ;; + esac + + architectures=( + "x86_64-pc-windows-msvc" + "x86_64-apple-darwin" + "aarch64-apple-darwin" + "x86_64-unknown-linux-musl" + "arm-unknown-linux-musleabi" + "armv7-unknown-linux-musleabihf" + "aarch64-unknown-linux-musl" + ) + + for arch in "${architectures[@]}"; do + zip_filename="{{bin_name}}-{{version}}-${arch}.zip" + tar_filename="{{bin_name}}-{{version}}-${arch}.tar.gz" + s3_zip_path="s3://$bucket/$zip_filename" + s3_tar_path="s3://$bucket/$tar_filename" + aws s3 rm "$s3_zip_path" + echo "deleted $s3_zip_path" + aws s3 rm "$s3_tar_path" + echo "deleted $s3_tar_path" + done + package-all-architectures: #!/usr/bin/env bash set -e From b59f048064127d0c8fb68a964f9048059e670824 Mon Sep 17 00:00:00 2001 From: qima Date: Wed, 25 Sep 2024 22:15:22 +0800 Subject: [PATCH 045/255] feat(node): carry out peer storage check actively --- sn_networking/src/event/request_response.rs | 42 +++++++++++++++------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index de16815b44..4550772bf4 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -238,22 +238,37 @@ impl SwarmDriver { let mut rng = thread_rng(); // 5% probability if more_than_one_key && rng.gen_bool(0.05) { - let keys_to_verify = self.select_verification_data_candidates(sender); + self.verify_peer_storage(sender.clone()); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {holder:?}"); - } else { - self.send_event(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }); + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + if rng.gen_bool(0.2) { + let close_group_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&self.self_peer_id.into()) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect_vec(); + if close_group_peers.len() == CLOSE_GROUP_SIZE { + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(close_group_peers[index]); + if sender != candidate { + self.verify_peer_storage(candidate); + break; + } + } + } } } } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn select_verification_data_candidates(&mut self, peer: NetworkAddress) -> Vec { + fn verify_peer_storage(&mut self, peer: NetworkAddress) { let mut closest_peers = self .swarm .behaviour_mut() @@ -268,7 +283,7 @@ impl SwarmDriver { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return vec![]; + return; }; let all_keys = self @@ -309,9 +324,12 @@ impl SwarmDriver { // AND choose candidate from certain reduced range. if verify_candidates.len() > 50 { let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); - vec![verify_candidates[index].clone()] + self.send_event(NetworkEvent::ChunkProofVerification { + peer_id: target_peer, + keys_to_verify: vec![verify_candidates[index].clone()], + }); } else { - vec![] + debug!("No valid candidate to be checked against peer {peer:?}"); } } } From e8ce2219fb00fdffc625aebe21d8ad6e2e43282a Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 26 Sep 2024 14:16:28 +0900 Subject: [PATCH 046/255] feat: evmlib evm_testnet sn_evm --- Cargo.lock | 1851 +++++++++++++++++-- Cargo.toml | 4 +- evm_testnet/Cargo.toml | 17 + evm_testnet/README.md | 26 + evm_testnet/src/main.rs | 108 ++ evmlib/Cargo.toml | 19 + evmlib/README.md | 3 + evmlib/artifacts/AutonomiNetworkToken.json | 897 +++++++++ evmlib/artifacts/ChunkPayments.json | 108 ++ evmlib/src/common.rs | 10 + evmlib/src/contract/chunk_payments/error.rs | 14 + evmlib/src/contract/chunk_payments/mod.rs | 87 + evmlib/src/contract/mod.rs | 2 + evmlib/src/contract/network_token.rs | 84 + evmlib/src/cryptography.rs | 7 + evmlib/src/event.rs | 54 + evmlib/src/lib.rs | 108 ++ evmlib/src/testnet.rs | 123 ++ evmlib/src/transaction.rs | 197 ++ evmlib/src/utils.rs | 17 + evmlib/src/wallet.rs | 315 ++++ evmlib/tests/chunk_payments.rs | 112 ++ evmlib/tests/common/mod.rs | 5 + evmlib/tests/common/quote.rs | 9 + evmlib/tests/network_token.rs | 83 + evmlib/tests/wallet.rs | 112 ++ sn_evm/CHANGELOG.md | 917 +++++++++ sn_evm/Cargo.toml | 37 + sn_evm/README.md | 46 + sn_evm/src/amount.rs | 253 +++ sn_evm/src/data_payments.rs | 349 ++++ sn_evm/src/error.rs | 33 + sn_evm/src/evm.rs | 30 + sn_evm/src/lib.rs | 29 + 34 files changed, 5940 insertions(+), 126 deletions(-) create mode 100644 evm_testnet/Cargo.toml create mode 100644 evm_testnet/README.md create mode 100644 evm_testnet/src/main.rs create mode 100644 evmlib/Cargo.toml create mode 100644 evmlib/README.md create mode 100644 evmlib/artifacts/AutonomiNetworkToken.json create mode 100644 evmlib/artifacts/ChunkPayments.json create mode 100644 evmlib/src/common.rs create mode 100644 evmlib/src/contract/chunk_payments/error.rs create mode 100644 evmlib/src/contract/chunk_payments/mod.rs create mode 100644 evmlib/src/contract/mod.rs create mode 100644 evmlib/src/contract/network_token.rs create mode 100644 evmlib/src/cryptography.rs create mode 100644 evmlib/src/event.rs create mode 100644 evmlib/src/lib.rs create mode 100644 evmlib/src/testnet.rs create mode 100644 evmlib/src/transaction.rs create mode 100644 evmlib/src/utils.rs create mode 100644 evmlib/src/wallet.rs create mode 100644 evmlib/tests/chunk_payments.rs create mode 100644 evmlib/tests/common/mod.rs create mode 100644 evmlib/tests/common/quote.rs create mode 100644 evmlib/tests/network_token.rs create mode 100644 evmlib/tests/wallet.rs create mode 100644 sn_evm/CHANGELOG.md create mode 100644 sn_evm/Cargo.toml create mode 100644 sn_evm/README.md create mode 100644 sn_evm/src/amount.rs create mode 100644 sn_evm/src/data_payments.rs create mode 100644 sn_evm/src/error.rs create mode 100644 sn_evm/src/evm.rs create mode 100644 sn_evm/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 1b7d8dab75..dcc9ca1559 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,6 +128,581 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "alloy" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4a4aaae80afd4be443a6aecd92a6b255dcdd000f97996928efb33d8a71e100" +dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-core", + "alloy-eips", + "alloy-genesis", + "alloy-network", + "alloy-node-bindings", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-serde", + "alloy-signer", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", +] + +[[package]] +name = "alloy-chains" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" +dependencies = [ + "num_enum", + "strum", +] + +[[package]] +name = "alloy-consensus" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04c309895995eaa4bfcc345f5515a39c7df9447798645cc8bf462b6c5bf1dc96" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4e0ef72b0876ae3068b2ed7dfae9ae1779ce13cfaec2ee1f08f5bd0348dc57" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-pubsub", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror", +] + +[[package]] +name = "alloy-core" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "529fc6310dc1126c8de51c376cbc59c79c7f662bd742be7dc67055d5421a81b4" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-types", +] + +[[package]] +name = "alloy-dyn-abi" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413902aa18a97569e60f679c23f46a18db1656d87ab4d4e49d0e1e52042f66df" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "const-hex", + "itoa", + "serde", + "serde_json", + "winnow", +] + +[[package]] +name = "alloy-eips" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9431c99a3b3fe606ede4b3d4043bdfbcb780c45b8d8d226c3804e2b75cfbe68" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "derive_more", + "k256", + "once_cell", + "serde", + "sha2 0.10.8", +] + +[[package]] +name = "alloy-genesis" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79614dfe86144328da11098edcc7bc1a3f25ad8d3134a9eb9e857e06f0d9840d" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc05b04ac331a9f07e3a4036ef7926e49a8bf84a99a1ccfc7e2ab55a5fcbb372" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e2865c4c3bb4cdad3f0d9ec1ab5c0c657ba69a375651bd35e32fb6c180ccc2" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e701fc87ef9a3139154b0b4ccb935b565d27ffd9de020fe541bf2dec5ae4ede" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-network-primitives" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec9d5a0f9170b10988b6774498a022845e13eda94318440d17709d50687f67f9" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-node-bindings" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16faebb9ea31a244fd6ce3288d47df4be96797d9c3c020144b8f2c31543a4512" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "k256", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "url", +] + +[[package]] +name = "alloy-primitives" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256", + "keccak-asm", + "proptest", + "rand 0.8.5", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9c0ab10b93de601a6396fc7ff2ea10d3b28c46f079338fa562107ebf9857c8" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-node-bindings", + "alloy-primitives", + "alloy-pubsub", + "alloy-rpc-client", + "alloy-rpc-types-anvil", + "alloy-rpc-types-eth", + "alloy-signer-local", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "async-stream", + "async-trait", + "auto_impl", + "dashmap 5.5.3", + "futures", + "futures-utils-wasm", + "lru", + "pin-project", + "reqwest 0.12.7", + "serde", + "serde_json", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "alloy-pubsub" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f5da2c55cbaf229bad3c5f8b00b5ab66c74ef093e5f3a753d874cfecf7d2281" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "bimap", + "futures", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b38e3ffdb285df5d9f60cb988d336d9b8e3505acb78750c3bc60336a7af41d3" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-pubsub", + "alloy-transport", + "alloy-transport-http", + "alloy-transport-ipc", + "alloy-transport-ws", + "futures", + "pin-project", + "reqwest 0.12.7", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c31a3750b8f5a350d17354e46a52b0f2f19ec5f2006d816935af599dedc521" +dependencies = [ + "alloy-rpc-types-anvil", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-anvil" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ab6509cd38b2e8c8da726e0f61c1e314a81df06a38d37ddec8bced3f8d25ed" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-rpc-types-engine" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff63f51b2fb2f547df5218527fd0653afb1947bf7fead5b3ce58c75d170b30f7" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", + "jsonwebtoken", + "rand 0.8.5", + "serde", + "thiserror", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81e18424d962d7700a882fe423714bd5b9dde74c7a7589d4255ea64068773aef" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.13.0", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-serde" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33feda6a53e6079895aed1d08dcb98a1377b000d80d16370fbbdb8155d547ef" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "740a25b92e849ed7b0fa013951fe2f64be9af1ad5abe805037b44fb7770c5c47" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve 0.13.8", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-signer-local" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b0707d4f63e4356a110b30ef3add8732ab6d181dd7be4607bf79b8777105cee" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b40397ddcdcc266f59f959770f601ce1280e699a91fc1862f29cef91707cd09" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "867a5469d61480fea08c7333ffeca52d5b621f5ca2e44f271b117ec1fc9a0525" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.5.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.77", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e482dc33a32b6fadbc0f599adea520bd3aaa585c141a80b404d0a3e3fa72528" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.77", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a91ca40fa20793ae9c3841b83e74569d1cc9af29a2f5237314fd3452d51e38c7" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0590afbdacf2f8cca49d025a2466f3b6584a016a8b28f532f29f8da1007bae" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2437d145d80ea1aecde8574d2058cceb8b3c9cba05f6aea8e67907c660d46698" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest 0.12.7", + "serde_json", + "tower", + "tracing", + "url", +] + +[[package]] +name = "alloy-transport-ipc" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804494366e20468776db4e18f9eb5db7db0fe14f1271eb6dbf155d867233405c" +dependencies = [ + "alloy-json-rpc", + "alloy-pubsub", + "alloy-transport", + "bytes", + "futures", + "interprocess", + "pin-project", + "serde_json", + "tokio", + "tokio-util 0.7.12", + "tracing", +] + +[[package]] +name = "alloy-transport-ws" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af855163e7df008799941aa6dd324a43ef2bf264b08ba4b22d44aad6ced65300" +dependencies = [ + "alloy-pubsub", + "alloy-transport", + "futures", + "http 1.1.0", + "rustls 0.23.13", + "serde_json", + "tokio", + "tokio-tungstenite 0.23.1", + "tracing", + "ws_stream_wasm", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -144,71 +719,195 @@ dependencies = [ ] [[package]] -name = "anes" -version = "0.1.6" +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint 0.4.6", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] [[package]] -name = "anstream" -version = "0.6.15" +name = "ark-ff-asm" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", + "quote", + "syn 1.0.109", ] [[package]] -name = "anstyle" -version = "1.0.8" +name = "ark-ff-macros" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "quote", + "syn 1.0.109", +] [[package]] -name = "anstyle-parse" -version = "0.2.5" +name = "ark-ff-macros" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ - "utf8parse", + "num-bigint 0.4.6", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "anstyle-query" -version = "1.1.1" +name = "ark-serialize" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" dependencies = [ - "windows-sys 0.52.0", + "ark-std 0.3.0", + "digest 0.9.0", ] [[package]] -name = "anstyle-wincon" -version = "3.0.4" +name = "ark-serialize" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" dependencies = [ - "anstyle", - "windows-sys 0.52.0", + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint 0.4.6", ] [[package]] -name = "anyhow" -version = "1.0.89" +name = "ark-std" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] [[package]] -name = "arc-swap" -version = "1.7.1" +name = "ark-std" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] [[package]] name = "arrayref" @@ -367,6 +1066,17 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures", + "pharos", + "rustc_version 0.4.1", +] + [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -402,6 +1112,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "autocfg" version = "0.1.8" @@ -425,7 +1146,7 @@ dependencies = [ "blsttc", "bytes", "eyre", - "libp2p", + "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", "self_encryption", @@ -529,6 +1250,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -569,6 +1296,12 @@ dependencies = [ "console", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -762,8 +1495,8 @@ checksum = "1ff3694b352ece02eb664a09ffb948ee69b35afa2e6ac444a6b8cb9d515deebd" dependencies = [ "blst", "byte-slice-cast", - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "pairing", "rand_core 0.6.4", "serde", @@ -778,8 +1511,8 @@ checksum = "1186a39763321a0b73d1a10aa4fc067c5d042308509e8f6cc31d2c2a7ac61ac2" dependencies = [ "blst", "blstrs", - "ff", - "group", + "ff 0.12.1", + "group 0.12.1", "hex 0.4.3", "hex_fmt", "pairing", @@ -892,6 +1625,21 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +dependencies = [ + "blst", + "cc", + "glob", + "hex 0.4.3", + "libc", + "once_cell", + "serde", +] + [[package]] name = "camino" version = "1.1.9" @@ -918,7 +1666,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1213,7 +1961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" dependencies = [ "async-trait", - "convert_case", + "convert_case 0.6.0", "json5", "lazy_static", "nom", @@ -1249,6 +1997,19 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "const-hex" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex 0.4.3", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -1281,6 +2042,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "convert_case" version = "0.6.0" @@ -1485,6 +2252,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1539,7 +2318,7 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rustc_version 0.4.1", "subtle", "zeroize", ] @@ -1613,6 +2392,19 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "dashmap" version = "6.1.0" @@ -1705,6 +2497,17 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_deref" version = "1.1.1" @@ -1716,6 +2519,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_more" +version = "0.99.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +dependencies = [ + "convert_case 0.4.0", + "proc-macro2", + "quote", + "rustc_version 0.4.1", + "syn 2.0.77", +] + [[package]] name = "dialoguer" version = "0.11.0" @@ -1766,6 +2582,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -1858,6 +2675,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "doctest-file" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" + [[package]] name = "dot-generator" version = "0.2.0" @@ -1898,11 +2721,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" dependencies = [ "der 0.6.1", - "elliptic-curve", - "rfc6979", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", "signature 1.6.4", ] +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der 0.7.9", + "digest 0.10.7", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.2.0", + "spki 0.7.3", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -1940,16 +2777,35 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.1.1", + "crypto-bigint 0.4.9", "der 0.6.1", "digest 0.10.7", - "ff", + "ff 0.12.1", "generic-array 0.14.7", - "group", + "group 0.12.1", "pkcs8 0.9.0", "rand_core 0.6.4", - "sec1", + "sec1 0.3.0", + "subtle", + "zeroize", +] + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct 0.2.0", + "crypto-bigint 0.5.5", + "digest 0.10.7", + "ff 0.13.0", + "generic-array 0.14.7", + "group 0.13.0", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2042,6 +2898,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "evmlib" +version = "0.1.0" +dependencies = [ + "alloy", + "rand 0.8.5", + "serde", + "thiserror", + "tokio", +] + [[package]] name = "eyre" version = "0.6.12" @@ -2081,6 +2948,17 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "ff" version = "0.12.1" @@ -2092,6 +2970,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff-zeroize" version = "0.6.3" @@ -2158,6 +3046,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -2189,6 +3089,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2374,6 +3289,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "generic-array" version = "0.12.4" @@ -2391,6 +3312,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -3002,12 +3924,23 @@ dependencies = [ name = "group" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_xorshift 0.3.0", + "subtle", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", - "rand 0.8.5", + "ff 0.13.0", "rand_core 0.6.4", - "rand_xorshift 0.3.0", "subtle", ] @@ -3143,6 +4076,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + [[package]] name = "hex_fmt" version = "0.3.0" @@ -3420,6 +4359,22 @@ dependencies = [ "tokio-io-timeout", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.8" @@ -3553,6 +4508,26 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "indenter" version = "0.3.3" @@ -3640,6 +4615,21 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "interprocess" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +dependencies = [ + "doctest-file", + "futures-core", + "libc", + "recvmsg", + "tokio", + "widestring", + "windows-sys 0.52.0", +] + [[package]] name = "into-attr" version = "0.1.1" @@ -3759,6 +4749,34 @@ dependencies = [ "serde", ] +[[package]] +name = "jsonwebtoken" +version = "9.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" +dependencies = [ + "base64 0.21.7", + "js-sys", + "pem", + "ring 0.17.8", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "once_cell", + "sha2 0.10.8", +] + [[package]] name = "keccak" version = "0.1.5" @@ -3768,6 +4786,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3786,6 +4814,32 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libp2p" +version = "0.53.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom 0.2.15", + "instant", + "libp2p-allow-block-list 0.3.0", + "libp2p-connection-limits 0.3.1", + "libp2p-core 0.41.3", + "libp2p-identify 0.44.2", + "libp2p-identity", + "libp2p-kad 0.45.3", + "libp2p-metrics 0.14.1", + "libp2p-swarm 0.44.2", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror", +] + [[package]] name = "libp2p" version = "0.54.1" @@ -3797,22 +4851,22 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.15", - "libp2p-allow-block-list", + "libp2p-allow-block-list 0.4.0", "libp2p-autonat", - "libp2p-connection-limits", - "libp2p-core", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", "libp2p-dns", "libp2p-gossipsub", - "libp2p-identify", + "libp2p-identify 0.45.0", "libp2p-identity", - "libp2p-kad", + "libp2p-kad 0.46.2", "libp2p-mdns", - "libp2p-metrics", + "libp2p-metrics 0.15.0", "libp2p-noise", "libp2p-quic", "libp2p-relay", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "libp2p-tcp", "libp2p-upnp", "libp2p-websocket", @@ -3824,15 +4878,27 @@ dependencies = [ "thiserror", ] +[[package]] +name = "libp2p-allow-block-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" +dependencies = [ + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "void", +] + [[package]] name = "libp2p-allow-block-list" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "void", ] @@ -3849,10 +4915,10 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -3863,16 +4929,56 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-connection-limits" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" +dependencies = [ + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "void", +] + [[package]] name = "libp2p-connection-limits" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" dependencies = [ - "libp2p-core", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", + "void", +] + +[[package]] +name = "libp2p-core" +version = "0.41.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5a8920cbd8540059a01950c1e5c96ea8d89eb50c51cd366fc18bdf540a6e48f" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", "libp2p-identity", - "libp2p-swarm", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot", + "pin-project", + "quick-protobuf", + "rand 0.8.5", + "rw-stream-sink", + "smallvec", + "thiserror", + "tracing", + "unsigned-varint 0.8.0", "void", + "web-time", ] [[package]] @@ -3912,7 +5018,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot", "smallvec", @@ -3935,9 +5041,9 @@ dependencies = [ "futures-ticker", "getrandom 0.2.15", "hex_fmt", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -3950,6 +5056,29 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-identify" +version = "0.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" +dependencies = [ + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror", + "tracing", + "void", +] + [[package]] name = "libp2p-identify" version = "0.45.0" @@ -3961,9 +5090,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "lru", "quick-protobuf", "quick-protobuf-codec", @@ -3991,6 +5120,35 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-kad" +version = "0.45.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" +dependencies = [ + "arrayvec", + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "instant", + "libp2p-core 0.41.3", + "libp2p-identity", + "libp2p-swarm 0.44.2", + "quick-protobuf", + "quick-protobuf-codec", + "rand 0.8.5", + "sha2 0.10.8", + "smallvec", + "thiserror", + "tracing", + "uint", + "void", +] + [[package]] name = "libp2p-kad" version = "0.46.2" @@ -4005,9 +5163,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -4030,9 +5188,9 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "rand 0.8.5", "smallvec", "socket2", @@ -4041,6 +5199,23 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-metrics" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdac91ae4f291046a3b2660c039a2830c931f84df2ee227989af92f7692d3357" +dependencies = [ + "futures", + "instant", + "libp2p-core 0.41.3", + "libp2p-identify 0.44.2", + "libp2p-identity", + "libp2p-kad 0.45.3", + "libp2p-swarm 0.44.2", + "pin-project", + "prometheus-client", +] + [[package]] name = "libp2p-metrics" version = "0.15.0" @@ -4048,12 +5223,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" dependencies = [ "futures", - "libp2p-core", - "libp2p-identify", + "libp2p-core 0.42.0", + "libp2p-identify 0.45.0", "libp2p-identity", - "libp2p-kad", + "libp2p-kad 0.46.2", "libp2p-relay", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "pin-project", "prometheus-client", "web-time", @@ -4069,7 +5244,7 @@ dependencies = [ "bytes", "curve25519-dalek 4.1.3", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "multiaddr", "multihash", @@ -4095,7 +5270,7 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-tls", "parking_lot", @@ -4121,9 +5296,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -4145,9 +5320,9 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.45.1", "rand 0.8.5", "serde", "smallvec", @@ -4156,6 +5331,28 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-swarm" +version = "0.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80cae6cb75f89dbca53862f9ebe0b9f463aa7b302762fcfaafb9e51dcc9b0f7e" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "instant", + "libp2p-core 0.41.3", + "libp2p-identity", + "lru", + "multistream-select", + "once_cell", + "rand 0.8.5", + "smallvec", + "tracing", + "void", +] + [[package]] name = "libp2p-swarm" version = "0.45.1" @@ -4167,7 +5364,7 @@ dependencies = [ "futures", "futures-timer", "getrandom 0.2.15", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "libp2p-swarm-derive", "lru", @@ -4204,7 +5401,7 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "socket2", "tokio", @@ -4219,7 +5416,7 @@ checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" dependencies = [ "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "rcgen", "ring 0.17.8", @@ -4239,8 +5436,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", "tokio", "tracing", "void", @@ -4255,7 +5452,7 @@ dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", "parking_lot", "pin-project-lite", @@ -4276,7 +5473,7 @@ dependencies = [ "bytes", "futures", "js-sys", - "libp2p-core", + "libp2p-core 0.42.0", "parking_lot", "send_wrapper 0.6.0", "thiserror", @@ -4293,7 +5490,7 @@ checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" dependencies = [ "either", "futures", - "libp2p-core", + "libp2p-core 0.42.0", "thiserror", "tracing", "yamux 0.12.1", @@ -4620,7 +5817,7 @@ dependencies = [ "clap-verbosity-flag", "color-eyre", "futures", - "libp2p", + "libp2p 0.54.1", "sn_build_info", "sn_networking", "sn_protocol", @@ -4630,6 +5827,23 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -4933,6 +6147,26 @@ dependencies = [ "libc", ] +[[package]] +name = "num_enum" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "num_threads" version = "0.1.7" @@ -4979,16 +6213,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] -name = "opaque-debug" -version = "0.2.3" +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opaque-debug" -version = "0.3.1" +name = "openssl-sys" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "opentelemetry" @@ -5133,8 +6411,8 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", "sha2 0.10.8", ] @@ -5144,7 +6422,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" dependencies = [ - "group", + "group 0.12.1", ] [[package]] @@ -5162,6 +6440,32 @@ dependencies = [ "zeroize", ] +[[package]] +name = "parity-scale-codec" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "parking" version = "2.2.1" @@ -5299,6 +6603,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures", + "rustc_version 0.4.1", +] + [[package]] name = "pin-project" version = "1.1.5" @@ -5532,6 +6846,26 @@ dependencies = [ "yansi", ] +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -6094,6 +7428,12 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "recvmsg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" + [[package]] name = "redox_syscall" version = "0.5.4" @@ -6214,11 +7554,13 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.3", + "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -6231,6 +7573,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.0", "tower-service", "url", @@ -6257,11 +7600,21 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ - "crypto-bigint", + "crypto-bigint 0.4.9", "hmac 0.12.1", "zeroize", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "rgb" version = "0.8.50" @@ -6301,6 +7654,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rmp" version = "0.8.14" @@ -6371,6 +7734,36 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ruint" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint 0.4.6", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand 0.8.5", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust-ini" version = "0.19.0" @@ -6393,13 +7786,28 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.23", ] [[package]] @@ -6574,6 +7982,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -6612,7 +8029,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ - "base16ct", + "base16ct 0.1.1", "der 0.6.1", "generic-array 0.14.7", "pkcs8 0.9.0", @@ -6620,6 +8037,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct 0.2.0", + "der 0.7.9", + "generic-array 0.14.7", + "pkcs8 0.10.2", + "subtle", + "zeroize", +] + [[package]] name = "secp256k1" version = "0.20.3" @@ -6669,6 +8100,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "self_encryption" version = "0.29.2" @@ -6694,6 +8148,15 @@ dependencies = [ "xor_name", ] +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.23" @@ -6703,6 +8166,15 @@ dependencies = [ "serde", ] +[[package]] +name = "semver-parser" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -6877,6 +8349,16 @@ dependencies = [ "opaque-debug 0.3.1", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -6945,9 +8427,22 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ + "digest 0.10.7", "rand_core 0.6.4", ] +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint 0.4.6", + "num-traits", + "thiserror", + "time", +] + [[package]] name = "slab" version = "0.4.9" @@ -6977,7 +8472,7 @@ dependencies = [ "colored", "dirs-next", "indicatif", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "mockall 0.12.1", "nix 0.27.1", @@ -6985,7 +8480,7 @@ dependencies = [ "prost 0.9.0", "rand 0.8.5", "reqwest 0.12.7", - "semver", + "semver 1.0.23", "serde", "serde_json", "service-manager", @@ -7018,7 +8513,7 @@ dependencies = [ "lazy_static", "regex", "reqwest 0.12.7", - "semver", + "semver 1.0.23", "serde_json", "tar", "thiserror", @@ -7092,7 +8587,7 @@ dependencies = [ "futures", "hex 0.4.3", "indicatif", - "libp2p", + "libp2p 0.54.1", "rand 0.8.5", "rayon", "reqwest 0.12.7", @@ -7126,14 +8621,14 @@ dependencies = [ "console_error_panic_hook", "crdts", "custom_debug", - "dashmap", + "dashmap 6.1.0", "dirs-next", "eyre", "futures", "getrandom 0.2.15", "hex 0.4.3", "itertools 0.12.1", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "petgraph", "prometheus-client", @@ -7197,6 +8692,28 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sn_evm" +version = "0.1.0" +dependencies = [ + "custom_debug", + "evmlib", + "hex 0.4.3", + "lazy_static", + "libp2p 0.53.2", + "rand 0.8.5", + "ring 0.17.8", + "rmp-serde", + "serde", + "serde_json", + "tempfile", + "thiserror", + "tiny-keccak", + "tokio", + "tracing", + "xor_name", +] + [[package]] name = "sn_faucet" version = "0.5.1" @@ -7285,7 +8802,7 @@ dependencies = [ "hyper 0.14.30", "itertools 0.12.1", "lazy_static", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "prometheus-client", "quickcheck", @@ -7331,7 +8848,7 @@ dependencies = [ "futures", "hex 0.4.3", "itertools 0.12.1", - "libp2p", + "libp2p 0.54.1", "prometheus-client", "prost 0.9.0", "rand 0.8.5", @@ -7375,7 +8892,7 @@ dependencies = [ "clap", "color-eyre", "hex 0.4.3", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "sn_build_info", "sn_client", @@ -7399,7 +8916,7 @@ version = "0.5.1" dependencies = [ "clap", "lazy_static", - "libp2p", + "libp2p 0.54.1", "rand 0.8.5", "reqwest 0.12.7", "sn_protocol", @@ -7421,7 +8938,7 @@ dependencies = [ "dirs-next", "hex 0.4.3", "lazy_static", - "libp2p", + "libp2p 0.54.1", "prost 0.9.0", "rmp-serde", "serde", @@ -7461,11 +8978,11 @@ version = "0.3.12" dependencies = [ "async-trait", "dirs-next", - "libp2p", + "libp2p 0.54.1", "libp2p-identity", "mockall 0.11.4", "prost 0.9.0", - "semver", + "semver 1.0.23", "serde", "serde_json", "service-manager", @@ -7495,7 +9012,7 @@ dependencies = [ "fs2", "hex 0.4.3", "lazy_static", - "libp2p", + "libp2p 0.54.1", "pprof", "rand 0.8.5", "rayon", @@ -7526,7 +9043,7 @@ dependencies = [ "curve25519-dalek 4.1.3", "rand_core 0.6.4", "ring 0.17.8", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.8", "subtle", ] @@ -7694,6 +9211,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c837dc8852cb7074e46b444afb81783140dab12c58867b49fb3898fbafedf7ea" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -7820,7 +9349,7 @@ version = "0.4.5" dependencies = [ "color-eyre", "dirs-next", - "libp2p", + "libp2p 0.54.1", "serde", "serde_json", ] @@ -7997,6 +9526,16 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -8038,6 +9577,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.12", ] [[package]] @@ -8049,7 +9589,23 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite", + "tungstenite 0.21.0", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" +dependencies = [ + "futures-util", + "log", + "rustls 0.23.13", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.26.0", + "tungstenite 0.23.0", + "webpki-roots 0.26.6", ] [[package]] @@ -8418,6 +9974,26 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tungstenite" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.1.0", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.23.13", + "rustls-pki-types", + "sha1", + "thiserror", + "utf-8", +] + [[package]] name = "typenum" version = "1.17.0" @@ -8607,6 +10183,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vergen" version = "8.3.2" @@ -8705,7 +10287,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-tungstenite", + "tokio-tungstenite 0.21.0", "tokio-util 0.7.12", "tower-service", "tracing", @@ -9161,6 +10743,25 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures", + "js-sys", + "log", + "pharos", + "rustc_version 0.4.1", + "send_wrapper 0.6.0", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 4cdf8b3458..10ebb63d70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,8 +2,10 @@ resolver = "2" members = [ "autonomi", + "evmlib", "sn_auditor", "sn_build_info", + "sn_evm", "sn_cli", "sn_client", "sn_faucet", @@ -29,7 +31,7 @@ arithmetic_overflow = "forbid" mutable_transmutes = "forbid" no_mangle_const_items = "forbid" unknown_crate_types = "forbid" -unsafe_code = "forbid" +unsafe_code = "warn" trivial_casts = "warn" trivial_numeric_casts = "warn" unused_extern_crates = "warn" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml new file mode 100644 index 0000000000..0bea2cd685 --- /dev/null +++ b/evm_testnet/Cargo.toml @@ -0,0 +1,17 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "evm_testnet" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.0" + +[dependencies] +clap = { version = "4.5", features = ["derive"] } +evmlib = { path = "../evmlib" } +tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } + +[lints] +workspace = true \ No newline at end of file diff --git a/evm_testnet/README.md b/evm_testnet/README.md new file mode 100644 index 0000000000..3eab9ed3d5 --- /dev/null +++ b/evm_testnet/README.md @@ -0,0 +1,26 @@ +## EVM Testnet + +Tool to run a local Ethereum node that automatically deploys all Autonomi smart contracts. + +### Requirements + +1. Install Foundry to get access to Anvil nodes: https://book.getfoundry.sh/getting-started/installation + +### Usage + +```bash +cargo run --bin evm_testnet -- --royalties-wallet --genesis-wallet +``` + +Example output: + +``` +************************* +* Ethereum node started * +************************* +RPC URL: http://localhost:60093/ +Payment token address: 0x5FbDB2315678afecb367f032d93F642f64180aa3 +Chunk payments address: 0x8464135c8F25Da09e49BC8782676a84730C318bC +Deployer wallet private key: 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +Genesis wallet balance: (tokens: 20000000000000000000000000, gas: 9998998011366954730202) +``` diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs new file mode 100644 index 0000000000..09f38821e5 --- /dev/null +++ b/evm_testnet/src/main.rs @@ -0,0 +1,108 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use clap::Parser; +use evmlib::common::{Address, Amount}; +use evmlib::testnet::Testnet; +use evmlib::wallet::{balance_of_gas_tokens, balance_of_tokens, Wallet}; +use std::str::FromStr; + +/// A tool to start a local Ethereum node. +#[derive(Debug, Parser)] +#[clap(version, author, verbatim_doc_comment)] +struct Args { + /// Address that will receive the chunk payments royalties. + #[clap(long, short)] + royalties_wallet: Address, + /// Wallet that will hold ~all gas funds and payment tokens. + #[clap(long, short)] + genesis_wallet: Option
, +} + +#[tokio::main] +async fn main() { + let args = Args::parse(); + start_node(args.genesis_wallet, args.royalties_wallet).await; +} + +async fn start_node(genesis_wallet: Option
, royalties_wallet: Address) { + let testnet = Testnet::new(royalties_wallet).await; + + println!("*************************"); + println!("* Ethereum node started *"); + println!("*************************"); + + // Transfer all gas and payment tokens to the genesis wallet. + if let Some(genesis) = genesis_wallet { + transfer_funds(&testnet, genesis).await; + } + + print_testnet_details(&testnet, genesis_wallet).await; + keep_alive(testnet).await; + + println!("Ethereum node stopped."); +} + +async fn transfer_funds(testnet: &Testnet, genesis_wallet: Address) { + let wallet = + Wallet::new_from_private_key(testnet.to_network(), &testnet.default_wallet_private_key()) + .expect("Could not init deployer wallet"); + + let token_amount = wallet + .balance_of_tokens() + .await + .expect("Could not get balance of tokens"); + + // Transfer all payment tokens. + let _ = wallet.transfer_tokens(genesis_wallet, token_amount).await; + + let gas_amount = wallet + .balance_of_gas_tokens() + .await + .expect("Could not get balance of gas tokens"); + + let sub_amount = Amount::from_str("1000000000000000000").expect("Could not parse sub amount"); + + // Transfer almost all gas. Save some gas for this tx. + let _ = wallet + .transfer_gas_tokens(genesis_wallet, gas_amount - sub_amount) + .await; +} + +async fn print_testnet_details(testnet: &Testnet, genesis_wallet: Option
) { + let network = testnet.to_network(); + + println!("RPC URL: {}", network.rpc_url()); + println!("Payment token address: {}", network.payment_token_address()); + println!( + "Chunk payments address: {}", + network.chunk_payments_address() + ); + println!( + "Deployer wallet private key: {}", + testnet.default_wallet_private_key() + ); + + if let Some(genesis) = genesis_wallet { + let tokens = balance_of_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + + let gas = balance_of_gas_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + + println!("Genesis wallet balance (atto): (tokens: {tokens}, gas: {gas})"); + } +} + +async fn keep_alive(variable: T) { + let _ = tokio::signal::ctrl_c().await; + println!("Received Ctrl-C, stopping..."); + drop(variable); +} diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml new file mode 100644 index 0000000000..bd7fa723ae --- /dev/null +++ b/evmlib/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "evmlib" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.0" + +[dependencies] +alloy = { version = "0.2", features = ["full", "provider-anvil-node"] } +serde = "1.0" +thiserror = "1.0" +tokio = "1.38.0" +rand = "0.8.5" + +[lints] +workspace = true diff --git a/evmlib/README.md b/evmlib/README.md new file mode 100644 index 0000000000..b0ee569fdd --- /dev/null +++ b/evmlib/README.md @@ -0,0 +1,3 @@ +## Testing + +1. Install Foundry to get access to Anvil nodes: https://book.getfoundry.sh/getting-started/installation diff --git a/evmlib/artifacts/AutonomiNetworkToken.json b/evmlib/artifacts/AutonomiNetworkToken.json new file mode 100644 index 0000000000..b075133e1c --- /dev/null +++ b/evmlib/artifacts/AutonomiNetworkToken.json @@ -0,0 +1,897 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "AutonomiNetworkToken", + "sourceName": "contracts/AutonomiNetworkToken.sol", + "abi": [ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "CheckpointUnorderedInsertion", + "type": "error" + }, + { + "inputs": [], + "name": "ECDSAInvalidSignature", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "length", + "type": "uint256" + } + ], + "name": "ECDSAInvalidSignatureLength", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "ECDSAInvalidSignatureS", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "increasedSupply", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "cap", + "type": "uint256" + } + ], + "name": "ERC20ExceededSafeSupply", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "allowance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientAllowance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "balance", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "needed", + "type": "uint256" + } + ], + "name": "ERC20InsufficientBalance", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "approver", + "type": "address" + } + ], + "name": "ERC20InvalidApprover", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "receiver", + "type": "address" + } + ], + "name": "ERC20InvalidReceiver", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "sender", + "type": "address" + } + ], + "name": "ERC20InvalidSender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "ERC20InvalidSpender", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "name": "ERC2612ExpiredSignature", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "signer", + "type": "address" + }, + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "ERC2612InvalidSigner", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + }, + { + "internalType": "uint48", + "name": "clock", + "type": "uint48" + } + ], + "name": "ERC5805FutureLookup", + "type": "error" + }, + { + "inputs": [], + "name": "ERC6372InconsistentClock", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "currentNonce", + "type": "uint256" + } + ], + "name": "InvalidAccountNonce", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidShortString", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "bits", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "SafeCastOverflowedUintDowncast", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "string", + "name": "str", + "type": "string" + } + ], + "name": "StringTooLong", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + } + ], + "name": "VotesExpiredSignature", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "delegator", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "fromDelegate", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "toDelegate", + "type": "address" + } + ], + "name": "DelegateChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "delegate", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "previousVotes", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newVotes", + "type": "uint256" + } + ], + "name": "DelegateVotesChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "EIP712DomainChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [], + "name": "CLOCK_MODE", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "DOMAIN_SEPARATOR", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "burnFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint32", + "name": "pos", + "type": "uint32" + } + ], + "name": "checkpoints", + "outputs": [ + { + "components": [ + { + "internalType": "uint48", + "name": "_key", + "type": "uint48" + }, + { + "internalType": "uint208", + "name": "_value", + "type": "uint208" + } + ], + "internalType": "struct Checkpoints.Checkpoint208", + "name": "", + "type": "tuple" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clock", + "outputs": [ + { + "internalType": "uint48", + "name": "", + "type": "uint48" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "decimals", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatee", + "type": "address" + } + ], + "name": "delegate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "delegatee", + "type": "address" + }, + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "delegateBySig", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "delegates", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "eip712Domain", + "outputs": [ + { + "internalType": "bytes1", + "name": "fields", + "type": "bytes1" + }, + { + "internalType": "string", + "name": "name", + "type": "string" + }, + { + "internalType": "string", + "name": "version", + "type": "string" + }, + { + "internalType": "uint256", + "name": "chainId", + "type": "uint256" + }, + { + "internalType": "address", + "name": "verifyingContract", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + }, + { + "internalType": "uint256[]", + "name": "extensions", + "type": "uint256[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + } + ], + "name": "getPastTotalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "internalType": "uint256", + "name": "timepoint", + "type": "uint256" + } + ], + "name": "getPastVotes", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "getVotes", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "nonces", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "numCheckpoints", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "permit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x6101606040523480156200001257600080fd5b506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e00000000000000000000000081525080604051806040016040528060018152602001603160f81b8152506040518060400160405280601481526020017f4175746f6e6f6d694e6574776f726b546f6b656e0000000000000000000000008152506040518060400160405280600381526020016210539560ea1b8152508160039081620000c79190620009b5565b506004620000d68282620009b5565b50620000e891508390506005620001c0565b61012052620000f9816006620001c0565b61014052815160208084019190912060e052815190820120610100524660a0526200018760e05161010051604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201529081019290925260608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b60805250503060c05250620001ba33620001a46012600a62000b94565b620001b4906301312d0062000ba5565b620001f9565b62000cae565b6000602083511015620001e057620001d8836200023b565b9050620001f3565b81620001ed8482620009b5565b5060ff90505b92915050565b6001600160a01b038216620002295760405163ec442f0560e01b8152600060048201526024015b60405180910390fd5b62000237600083836200027e565b5050565b600080829050601f8151111562000269578260405163305a27a960e01b815260040162000220919062000bbf565b8051620002768262000c10565b179392505050565b6200028b83838362000290565b505050565b6200029d838383620002ff565b6001600160a01b038316620002f2576000620002b860025490565b90506001600160d01b0380821115620002ef57604051630e58ae9360e11b8152600481018390526024810182905260440162000220565b50505b6200028b83838362000432565b6001600160a01b0383166200032e57806002600082825462000322919062000c35565b90915550620003a29050565b6001600160a01b03831660009081526020819052604090205481811015620003835760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640162000220565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b038216620003c057600280548290039055620003df565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040516200042591815260200190565b60405180910390a3505050565b6001600160a01b038316620004675762000464600a62000953620004ca60201b176200045e84620004df565b62000519565b50505b6001600160a01b038216620004965762000493600a6200095f6200055660201b176200045e84620004df565b50505b6001600160a01b038381166000908152600860205260408082205485841683529120546200028b9291821691168362000564565b6000620004d8828462000c4b565b9392505050565b60006001600160d01b0382111562000515576040516306dfcc6560e41b815260d060048201526024810183905260440162000220565b5090565b600080620005496200052a620006cb565b620005406200053988620006dc565b868860201c565b8791906200072b565b915091505b935093915050565b6000620004d8828462000c75565b816001600160a01b0316836001600160a01b031614158015620005875750600081115b156200028b576001600160a01b038316156200062a576001600160a01b038316600090815260096020908152604082208291620005d5919062000556901b6200095f176200045e86620004df565b6001600160d01b031691506001600160d01b03169150846001600160a01b031660008051602062002bda83398151915283836040516200061f929190918252602082015260400190565b60405180910390a250505b6001600160a01b038216156200028b576001600160a01b038216600090815260096020908152604082208291620006729190620004ca901b62000953176200045e86620004df565b6001600160d01b031691506001600160d01b03169150836001600160a01b031660008051602062002bda8339815191528383604051620006bc929190918252602082015260400190565b60405180910390a25050505050565b6000620006d76200073b565b905090565b8054600090801562000722576200070883620006fa60018462000c98565b600091825260209091200190565b54660100000000000090046001600160d01b0316620004d8565b60009392505050565b6000806200054985858562000748565b6000620006d743620008da565b8254600090819080156200087b5760006200076a87620006fa60018562000c98565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015620007c257604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff808816911603620008165784620007e988620006fa60018662000c98565b80546001600160d01b039290921666010000000000000265ffffffffffff9092169190911790556200086a565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506200054e9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816200054e565b600065ffffffffffff82111562000515576040516306dfcc6560e41b8152603060048201526024810183905260440162000220565b634e487b7160e01b600052604160045260246000fd5b600181811c908216806200093a57607f821691505b6020821081036200095b57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200028b576000816000526020600020601f850160051c810160208610156200098c5750805b601f850160051c820191505b81811015620009ad5782815560010162000998565b505050505050565b81516001600160401b03811115620009d157620009d16200090f565b620009e981620009e2845462000925565b8462000961565b602080601f83116001811462000a21576000841562000a085750858301515b600019600386901b1c1916600185901b178555620009ad565b600085815260208120601f198616915b8281101562000a525788860151825594840194600190910190840162000a31565b508582101562000a715787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052601160045260246000fd5b600181815b8085111562000ad857816000190482111562000abc5762000abc62000a81565b8085161562000aca57918102915b93841c939080029062000a9c565b509250929050565b60008262000af157506001620001f3565b8162000b0057506000620001f3565b816001811462000b19576002811462000b245762000b44565b6001915050620001f3565b60ff84111562000b385762000b3862000a81565b50506001821b620001f3565b5060208310610133831016604e8410600b841016171562000b69575081810a620001f3565b62000b75838362000a97565b806000190482111562000b8c5762000b8c62000a81565b029392505050565b6000620004d860ff84168362000ae0565b8082028115828204841417620001f357620001f362000a81565b60006020808352835180602085015260005b8181101562000bef5785810183015185820160400152820162000bd1565b506000604082860101526040601f19601f8301168501019250505092915050565b805160208083015191908110156200095b5760001960209190910360031b1b16919050565b80820180821115620001f357620001f362000a81565b6001600160d01b0381811683821601908082111562000c6e5762000c6e62000a81565b5092915050565b6001600160d01b0382811682821603908082111562000c6e5762000c6e62000a81565b81810381811115620001f357620001f362000a81565b60805160a05160c05160e051610100516101205161014051611ed162000d096000396000610d9901526000610d6c01526000610b3401526000610b0c01526000610a6701526000610a9101526000610abb0152611ed16000f3fe608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033dec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a724", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106101735760003560e01c806370a08231116100de57806395d89b4111610097578063c3cda52011610071578063c3cda5201461036e578063d505accf14610381578063dd62ed3e14610394578063f1127ed8146103cd57600080fd5b806395d89b41146103405780639ab24eb014610348578063a9059cbb1461035b57600080fd5b806370a08231146102a457806379cc6790146102cd5780637ecebe00146102e057806384b0196e146102f35780638e539e8c1461030e57806391ddadf41461032157600080fd5b80633a46b1a8116101305780633a46b1a8146101f557806342966c68146102085780634bf5d7e91461021d578063587cde1e146102255780635c19a95c146102695780636fcfff451461027c57600080fd5b806306fdde0314610178578063095ea7b31461019657806318160ddd146101b957806323b872dd146101cb578063313ce567146101de5780633644e515146101ed575b600080fd5b61018061040c565b60405161018d9190611ad2565b60405180910390f35b6101a96101a4366004611b01565b61049e565b604051901515815260200161018d565b6002545b60405190815260200161018d565b6101a96101d9366004611b2b565b6104b8565b6040516012815260200161018d565b6101bd6104dc565b6101bd610203366004611b01565b6104eb565b61021b610216366004611b67565b610571565b005b61018061057e565b610251610233366004611b80565b6001600160a01b039081166000908152600860205260409020541690565b6040516001600160a01b03909116815260200161018d565b61021b610277366004611b80565b6105f6565b61028f61028a366004611b80565b610605565b60405163ffffffff909116815260200161018d565b6101bd6102b2366004611b80565b6001600160a01b031660009081526020819052604090205490565b61021b6102db366004611b01565b610610565b6101bd6102ee366004611b80565b610625565b6102fb610630565b60405161018d9796959493929190611b9b565b6101bd61031c366004611b67565b610676565b6103296106e0565b60405165ffffffffffff909116815260200161018d565b6101806106ea565b6101bd610356366004611b80565b6106f9565b6101a9610369366004611b01565b610729565b61021b61037c366004611c45565b610737565b61021b61038f366004611c9d565b6107f4565b6101bd6103a2366004611d07565b6001600160a01b03918216600090815260016020908152604080832093909416825291909152205490565b6103e06103db366004611d3a565b61092e565b60408051825165ffffffffffff1681526020928301516001600160d01b0316928101929092520161018d565b60606003805461041b90611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461044790611d7a565b80156104945780601f1061046957610100808354040283529160200191610494565b820191906000526020600020905b81548152906001019060200180831161047757829003601f168201915b5050505050905090565b6000336104ac81858561096b565b60019150505b92915050565b6000336104c685828561097d565b6104d18585856109fb565b506001949350505050565b60006104e6610a5a565b905090565b6000806104f66106e0565b90508065ffffffffffff16831061053657604051637669fc0f60e11b81526004810184905265ffffffffffff821660248201526044015b60405180910390fd5b61056061054284610b85565b6001600160a01b038616600090815260096020526040902090610bbc565b6001600160d01b0316949350505050565b61057b3382610c72565b50565b6060610588610ca8565b65ffffffffffff166105986106e0565b65ffffffffffff16146105be576040516301bfc1c560e61b815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b336106018183610cb3565b5050565b60006104b282610d25565b61061b82338361097d565b6106018282610c72565b60006104b282610d47565b600060608060008060006060610644610d65565b61064c610d92565b60408051600080825260208201909252600f60f81b9b939a50919850469750309650945092509050565b6000806106816106e0565b90508065ffffffffffff1683106106bc57604051637669fc0f60e11b81526004810184905265ffffffffffff8216602482015260440161052d565b6106d06106c884610b85565b600a90610bbc565b6001600160d01b03169392505050565b60006104e6610ca8565b60606004805461041b90611d7a565b6001600160a01b038116600090815260096020526040812061071a90610dbf565b6001600160d01b031692915050565b6000336104ac8185856109fb565b8342111561075b57604051632341d78760e11b81526004810185905260240161052d565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b0388169181019190915260608101869052608081018590526000906107d5906107cd9060a00160405160208183030381529060405280519060200120610df8565b858585610e25565b90506107e18187610e53565b6107eb8188610cb3565b50505050505050565b834211156108185760405163313c898160e11b81526004810185905260240161052d565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886108658c6001600160a01b0316600090815260076020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e00160405160208183030381529060405280519060200120905060006108c082610df8565b905060006108d082878787610e25565b9050896001600160a01b0316816001600160a01b031614610917576040516325c0072360e11b81526001600160a01b0380831660048301528b16602482015260440161052d565b6109228a8a8a61096b565b50505050505050505050565b604080518082019091526000808252602082015261094c8383610ea6565b9392505050565b600061094c8284611dca565b600061094c8284611df1565b6109788383836001610edc565b505050565b6001600160a01b0383811660009081526001602090815260408083209386168352929052205460001981146109f557818110156109e657604051637dc7a0d960e11b81526001600160a01b0384166004820152602481018290526044810183905260640161052d565b6109f584848484036000610edc565b50505050565b6001600160a01b038316610a2557604051634b637e8f60e11b81526000600482015260240161052d565b6001600160a01b038216610a4f5760405163ec442f0560e01b81526000600482015260240161052d565b610978838383610fb1565b6000306001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016148015610ab357507f000000000000000000000000000000000000000000000000000000000000000046145b15610add57507f000000000000000000000000000000000000000000000000000000000000000090565b6104e6604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f60208201527f0000000000000000000000000000000000000000000000000000000000000000918101919091527f000000000000000000000000000000000000000000000000000000000000000060608201524660808201523060a082015260009060c00160405160208183030381529060405280519060200120905090565b600065ffffffffffff821115610bb8576040516306dfcc6560e41b8152603060048201526024810183905260440161052d565b5090565b815460009081816005811115610c1b576000610bd784610fbc565b610be19085611e11565b60008881526020902090915081015465ffffffffffff9081169087161015610c0b57809150610c19565b610c16816001611e24565b92505b505b6000610c29878785856110a4565b90508015610c6457610c4e87610c40600184611e11565b600091825260209091200190565b54600160301b90046001600160d01b0316610c67565b60005b979650505050505050565b6001600160a01b038216610c9c57604051634b637e8f60e11b81526000600482015260240161052d565b61060182600083610fb1565b60006104e643610b85565b6001600160a01b0382811660008181526008602052604080822080548686166001600160a01b0319821681179092559151919094169392849290917f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46109788183610d2086611106565b611124565b6001600160a01b0381166000908152600960205260408120546104b290611290565b6001600160a01b0381166000908152600760205260408120546104b2565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060056112c1565b60606104e67f000000000000000000000000000000000000000000000000000000000000000060066112c1565b80546000908015610def57610dd983610c40600184611e11565b54600160301b90046001600160d01b031661094c565b60009392505050565b60006104b2610e05610a5a565b8360405161190160f01b8152600281019290925260228201526042902090565b600080600080610e378888888861136c565b925092509250610e47828261143b565b50909695505050505050565b6001600160a01b0382166000908152600760205260409020805460018101909155818114610978576040516301d4b62360e61b81526001600160a01b03841660048201526024810182905260440161052d565b60408051808201909152600080825260208201526001600160a01b038316600090815260096020526040902061094c90836114f4565b6001600160a01b038416610f065760405163e602df0560e01b81526000600482015260240161052d565b6001600160a01b038316610f3057604051634a1406b160e11b81526000600482015260240161052d565b6001600160a01b03808516600090815260016020908152604080832093871683529290522082905580156109f557826001600160a01b0316846001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92584604051610fa391815260200190565b60405180910390a350505050565b610978838383611564565b600081600003610fce57506000919050565b60006001610fdb846115cb565b901c6001901b90506001818481610ff457610ff4611e37565b048201901c9050600181848161100c5761100c611e37565b048201901c9050600181848161102457611024611e37565b048201901c9050600181848161103c5761103c611e37565b048201901c9050600181848161105457611054611e37565b048201901c9050600181848161106c5761106c611e37565b048201901c9050600181848161108457611084611e37565b048201901c905061094c8182858161109e5761109e611e37565b0461165f565b60005b818310156110fe5760006110bb8484611675565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff1611156110ea578092506110f8565b6110f5816001611e24565b93505b506110a7565b509392505050565b6001600160a01b0381166000908152602081905260408120546104b2565b816001600160a01b0316836001600160a01b0316141580156111465750600081115b15610978576001600160a01b038316156111ee576001600160a01b038316600090815260096020526040812081906111899061095f61118486611690565b6116c4565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a72483836040516111e3929190918252602082015260400190565b60405180910390a250505b6001600160a01b03821615610978576001600160a01b038216600090815260096020526040812081906112279061095361118486611690565b6001600160d01b031691506001600160d01b03169150836001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611281929190918252602082015260400190565b60405180910390a25050505050565b600063ffffffff821115610bb8576040516306dfcc6560e41b8152602060048201526024810183905260440161052d565b606060ff83146112db576112d4836116fd565b90506104b2565b8180546112e790611d7a565b80601f016020809104026020016040519081016040528092919081815260200182805461131390611d7a565b80156113605780601f1061133557610100808354040283529160200191611360565b820191906000526020600020905b81548152906001019060200180831161134357829003601f168201915b505050505090506104b2565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a08411156113a75750600091506003905082611431565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661142757506000925060019150829050611431565b9250600091508190505b9450945094915050565b600082600381111561144f5761144f611e4d565b03611458575050565b600182600381111561146c5761146c611e4d565b0361148a5760405163f645eedf60e01b815260040160405180910390fd5b600282600381111561149e5761149e611e4d565b036114bf5760405163fce698f760e01b81526004810182905260240161052d565b60038260038111156114d3576114d3611e4d565b03610601576040516335e2f38360e21b81526004810182905260240161052d565b6040805180820190915260008082526020820152826000018263ffffffff168154811061152357611523611e63565b60009182526020918290206040805180820190915291015465ffffffffffff81168252600160301b90046001600160d01b0316918101919091529392505050565b61156f83838361173c565b6001600160a01b0383166115c057600061158860025490565b90506001600160d01b03808211156115bd57604051630e58ae9360e11b8152600481018390526024810182905260440161052d565b50505b610978838383611866565b600080608083901c156115e057608092831c92015b604083901c156115f257604092831c92015b602083901c1561160457602092831c92015b601083901c1561161657601092831c92015b600883901c1561162857600892831c92015b600483901c1561163a57600492831c92015b600283901c1561164c57600292831c92015b600183901c156104b25760010192915050565b600081831061166e578161094c565b5090919050565b60006116846002848418611e79565b61094c90848416611e24565b60006001600160d01b03821115610bb8576040516306dfcc6560e41b815260d060048201526024810183905260440161052d565b6000806116f06116d26106e0565b6116e86116de88610dbf565b868863ffffffff16565b8791906118dc565b915091505b935093915050565b6060600061170a836118ea565b604080516020808252818301909252919250600091906020820181803683375050509182525060208101929092525090565b6001600160a01b03831661176757806002600082825461175c9190611e24565b909155506117d99050565b6001600160a01b038316600090815260208190526040902054818110156117ba5760405163391434e360e21b81526001600160a01b0385166004820152602481018290526044810183905260640161052d565b6001600160a01b03841660009081526020819052604090209082900390555b6001600160a01b0382166117f557600280548290039055611814565b6001600160a01b03821660009081526020819052604090208054820190555b816001600160a01b0316836001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8360405161185991815260200190565b60405180910390a3505050565b6001600160a01b03831661188857611885600a61095361118484611690565b50505b6001600160a01b0382166118aa576118a7600a61095f61118484611690565b50505b6001600160a01b0383811660009081526008602052604080822054858416835291205461097892918216911683611124565b6000806116f0858585611912565b600060ff8216601f8111156104b257604051632cd44ac360e21b815260040160405180910390fd5b825460009081908015611a3157600061193087610c40600185611e11565b60408051808201909152905465ffffffffffff808216808452600160301b9092046001600160d01b03166020840152919250908716101561198457604051632520601d60e01b815260040160405180910390fd5b805165ffffffffffff8088169116036119d057846119a788610c40600186611e11565b80546001600160d01b0392909216600160301b0265ffffffffffff909216919091179055611a21565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d81529190912094519151909216600160301b029216919091179101555b6020015192508391506116f59050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a815291822095519251909316600160301b0291909316179201919091559050816116f5565b6000815180845260005b81811015611ab257602081850181015186830182015201611a96565b506000602082860101526020601f19601f83011685010191505092915050565b60208152600061094c6020830184611a8c565b80356001600160a01b0381168114611afc57600080fd5b919050565b60008060408385031215611b1457600080fd5b611b1d83611ae5565b946020939093013593505050565b600080600060608486031215611b4057600080fd5b611b4984611ae5565b9250611b5760208501611ae5565b9150604084013590509250925092565b600060208284031215611b7957600080fd5b5035919050565b600060208284031215611b9257600080fd5b61094c82611ae5565b60ff60f81b881681526000602060e06020840152611bbc60e084018a611a8c565b8381036040850152611bce818a611a8c565b606085018990526001600160a01b038816608086015260a0850187905284810360c08601528551808252602080880193509091019060005b81811015611c2257835183529284019291840191600101611c06565b50909c9b505050505050505050505050565b803560ff81168114611afc57600080fd5b60008060008060008060c08789031215611c5e57600080fd5b611c6787611ae5565b95506020870135945060408701359350611c8360608801611c34565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215611cb857600080fd5b611cc188611ae5565b9650611ccf60208901611ae5565b95506040880135945060608801359350611ceb60808901611c34565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215611d1a57600080fd5b611d2383611ae5565b9150611d3160208401611ae5565b90509250929050565b60008060408385031215611d4d57600080fd5b611d5683611ae5565b9150602083013563ffffffff81168114611d6f57600080fd5b809150509250929050565b600181811c90821680611d8e57607f821691505b602082108103611dae57634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b6001600160d01b03818116838216019080821115611dea57611dea611db4565b5092915050565b6001600160d01b03828116828216039080821115611dea57611dea611db4565b818103818111156104b2576104b2611db4565b808201808211156104b2576104b2611db4565b634e487b7160e01b600052601260045260246000fd5b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b600082611e9657634e487b7160e01b600052601260045260246000fd5b50049056fea2646970667358221220ed8af9c04c0db3fd29db1bfe40925e4a5564caf35ad274b7ecb6f7d723229caf64736f6c63430008180033", + "linkReferences": {}, + "deployedLinkReferences": {} +} diff --git a/evmlib/artifacts/ChunkPayments.json b/evmlib/artifacts/ChunkPayments.json new file mode 100644 index 0000000000..000c56318d --- /dev/null +++ b/evmlib/artifacts/ChunkPayments.json @@ -0,0 +1,108 @@ +{ + "_format": "hh-sol-artifact-1", + "contractName": "ChunkPayments", + "sourceName": "contracts/ChunkPayments.sol", + "abi": [ + { + "inputs": [ + { + "internalType": "address", + "name": "_tokenAddress", + "type": "address" + }, + { + "internalType": "address", + "name": "_foundationWallet", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "rewardAddress", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "quoteHash", + "type": "bytes32" + } + ], + "name": "ChunkPaymentMade", + "type": "event" + }, + { + "inputs": [], + "name": "PAYMENT_TOKEN_ADDRESS", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "ROYALTIES_WALLET", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "rewardAddress", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "quoteHash", + "type": "bytes32" + } + ], + "internalType": "struct ChunkPayments.ChunkPayment[]", + "name": "chunkPayments", + "type": "tuple[]" + } + ], + "name": "submitChunkPayments", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } + ], + "bytecode": "0x60c060405234801561001057600080fd5b5060405161075b38038061075b83398101604081905261002f91610130565b6001600160a01b0382166100965760405162461bcd60e51b8152602060048201526024808201527f546f6b656e20616464726573732063616e6e6f74206265207a65726f206164646044820152637265737360e01b60648201526084015b60405180910390fd5b6001600160a01b0381166100fd5760405162461bcd60e51b815260206004820152602860248201527f466f756e646174696f6e2077616c6c65742063616e6e6f74206265207a65726f604482015267206164647265737360c01b606482015260840161008d565b6001600160a01b039182166080521660a052610163565b80516001600160a01b038116811461012b57600080fd5b919050565b6000806040838503121561014357600080fd5b61014c83610114565b915061015a60208401610114565b90509250929050565b60805160a0516105b96101a2600039600081816060015261018401526000818160a3015281816101cf015281816102d101526103d101526105b96000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806305aa488a146100465780631c317b341461005b5780635c0d32861461009e575b600080fd5b610059610054366004610444565b6100c5565b005b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6000805b8281101561017d57368484838181106100e4576100e46104b9565b90506060020190506000600a82602001356100ff91906104cf565b905061010b81856104f1565b93506101283361011e6020850185610518565b84602001356101ae565b60408201356020830180359061013e9085610518565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450506001016100c9565b506101a9337f0000000000000000000000000000000000000000000000000000000000000000836101ae565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa158015610218573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061023c9190610548565b101561029a5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b03831630146103a357604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561031a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061033e9190610548565b10156103a35760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610291565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561041a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061043e9190610561565b50505050565b6000806020838503121561045757600080fd5b823567ffffffffffffffff8082111561046f57600080fd5b818501915085601f83011261048357600080fd5b81358181111561049257600080fd5b8660206060830285010111156104a757600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b6000826104ec57634e487b7160e01b600052601260045260246000fd5b500490565b8082018082111561051257634e487b7160e01b600052601160045260246000fd5b92915050565b60006020828403121561052a57600080fd5b81356001600160a01b038116811461054157600080fd5b9392505050565b60006020828403121561055a57600080fd5b5051919050565b60006020828403121561057357600080fd5b8151801515811461054157600080fdfea264697066735822122094c63e1f2c74507a86a2259c9b1cb5a11238724ae1164198b92142b5386eda6164736f6c63430008180033", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806305aa488a146100465780631c317b341461005b5780635c0d32861461009e575b600080fd5b610059610054366004610444565b6100c5565b005b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6000805b8281101561017d57368484838181106100e4576100e46104b9565b90506060020190506000600a82602001356100ff91906104cf565b905061010b81856104f1565b93506101283361011e6020850185610518565b84602001356101ae565b60408201356020830180359061013e9085610518565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450506001016100c9565b506101a9337f0000000000000000000000000000000000000000000000000000000000000000836101ae565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa158015610218573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061023c9190610548565b101561029a5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b03831630146103a357604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561031a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061033e9190610548565b10156103a35760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610291565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561041a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061043e9190610561565b50505050565b6000806020838503121561045757600080fd5b823567ffffffffffffffff8082111561046f57600080fd5b818501915085601f83011261048357600080fd5b81358181111561049257600080fd5b8660206060830285010111156104a757600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b6000826104ec57634e487b7160e01b600052601260045260246000fd5b500490565b8082018082111561051257634e487b7160e01b600052601160045260246000fd5b92915050565b60006020828403121561052a57600080fd5b81356001600160a01b038116811461054157600080fd5b9392505050565b60006020828403121561055a57600080fd5b5051919050565b60006020828403121561057357600080fd5b8151801515811461054157600080fdfea264697066735822122094c63e1f2c74507a86a2259c9b1cb5a11238724ae1164198b92142b5386eda6164736f6c63430008180033", + "linkReferences": {}, + "deployedLinkReferences": {} +} \ No newline at end of file diff --git a/evmlib/src/common.rs b/evmlib/src/common.rs new file mode 100644 index 0000000000..4897a1e2cf --- /dev/null +++ b/evmlib/src/common.rs @@ -0,0 +1,10 @@ +use alloy::primitives::FixedBytes; + +pub type Address = alloy::primitives::Address; +pub type Hash = FixedBytes<32>; +pub type TxHash = alloy::primitives::TxHash; +pub type U256 = alloy::primitives::U256; +pub type QuoteHash = Hash; +pub type Amount = U256; +pub type QuotePayment = (QuoteHash, Address, Amount); +pub type EthereumWallet = alloy::network::EthereumWallet; diff --git a/evmlib/src/contract/chunk_payments/error.rs b/evmlib/src/contract/chunk_payments/error.rs new file mode 100644 index 0000000000..9e0770a0a8 --- /dev/null +++ b/evmlib/src/contract/chunk_payments/error.rs @@ -0,0 +1,14 @@ +use crate::contract::network_token; +use alloy::transports::{RpcError, TransportErrorKind}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + ContractError(#[from] alloy::contract::Error), + #[error(transparent)] + RpcError(#[from] RpcError), + #[error(transparent)] + NetworkTokenError(#[from] network_token::Error), + #[error("The transfer limit of 256 has been exceeded")] + TransferLimitExceeded, +} diff --git a/evmlib/src/contract/chunk_payments/mod.rs b/evmlib/src/contract/chunk_payments/mod.rs new file mode 100644 index 0000000000..fb310ba5a1 --- /dev/null +++ b/evmlib/src/contract/chunk_payments/mod.rs @@ -0,0 +1,87 @@ +pub mod error; + +use crate::common; +use crate::common::{Address, TxHash}; +use crate::contract::chunk_payments::error::Error; +use crate::contract::chunk_payments::ChunkPaymentsContract::ChunkPaymentsContractInstance; +use alloy::providers::{Network, Provider}; +use alloy::sol; +use alloy::transports::Transport; + +/// The max amount of transfers within one chunk payments transaction. +pub const MAX_TRANSFERS_PER_TRANSACTION: usize = 512; + +sol!( + #[allow(clippy::too_many_arguments)] + #[allow(missing_docs)] + #[sol(rpc)] + ChunkPaymentsContract, + "artifacts/ChunkPayments.json" +); + +pub struct ChunkPayments, N: Network> { + pub contract: ChunkPaymentsContractInstance, +} + +impl ChunkPayments +where + T: Transport + Clone, + P: Provider, + N: Network, +{ + /// Create a new ChunkPayments contract instance. + pub fn new(contract_address: Address, provider: P) -> Self { + let contract = ChunkPaymentsContract::new(contract_address, provider); + ChunkPayments { contract } + } + + /// Deploys the ChunkPayments smart contract to the network of the provider. + /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! + pub async fn deploy( + provider: P, + payment_token_address: Address, + royalties_wallet: Address, + ) -> Self { + let contract = + ChunkPaymentsContract::deploy(provider, payment_token_address, royalties_wallet) + .await + .expect("Could not deploy contract"); + + ChunkPayments { contract } + } + + pub fn set_provider(&mut self, provider: P) { + let address = *self.contract.address(); + self.contract = ChunkPaymentsContract::new(address, provider); + } + + /// Pay for quotes. + /// Input: (quote_id, reward_address, amount). + pub async fn pay_for_quotes>( + &self, + chunk_payments: I, + ) -> Result { + let chunk_payments: Vec = chunk_payments + .into_iter() + .map(|(hash, addr, amount)| ChunkPaymentsContract::ChunkPayment { + rewardAddress: addr, + amount, + quoteHash: hash, + }) + .collect(); + + if chunk_payments.len() > MAX_TRANSFERS_PER_TRANSACTION { + return Err(Error::TransferLimitExceeded); + } + + let tx_hash = self + .contract + .submitChunkPayments(chunk_payments) + .send() + .await? + .watch() + .await?; + + Ok(tx_hash) + } +} diff --git a/evmlib/src/contract/mod.rs b/evmlib/src/contract/mod.rs new file mode 100644 index 0000000000..5afb41f09b --- /dev/null +++ b/evmlib/src/contract/mod.rs @@ -0,0 +1,2 @@ +pub mod chunk_payments; +pub mod network_token; diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs new file mode 100644 index 0000000000..588f5eb12d --- /dev/null +++ b/evmlib/src/contract/network_token.rs @@ -0,0 +1,84 @@ +use crate::common::{Address, TxHash, U256}; +use crate::contract::network_token::NetworkTokenContract::NetworkTokenContractInstance; +use alloy::providers::{Network, Provider}; +use alloy::sol; +use alloy::transports::{RpcError, Transport, TransportErrorKind}; + +sol!( + #[allow(clippy::too_many_arguments)] + #[allow(missing_docs)] + #[sol(rpc)] + NetworkTokenContract, + "artifacts/AutonomiNetworkToken.json" +); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + ContractError(#[from] alloy::contract::Error), + #[error(transparent)] + RpcError(#[from] RpcError), +} + +pub struct NetworkToken, N: Network> { + pub contract: NetworkTokenContractInstance, +} + +impl NetworkToken +where + T: Transport + Clone, + P: Provider, + N: Network, +{ + /// Create a new NetworkToken contract instance. + pub fn new(contract_address: Address, provider: P) -> Self { + let contract = NetworkTokenContract::new(contract_address, provider); + NetworkToken { contract } + } + + /// Deploys the AutonomiNetworkToken smart contract to the network of the provider. + /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! + pub async fn deploy(provider: P) -> Self { + let contract = NetworkTokenContract::deploy(provider) + .await + .expect("Could not deploy contract"); + NetworkToken { contract } + } + + pub fn set_provider(&mut self, provider: P) { + let address = *self.contract.address(); + self.contract = NetworkTokenContract::new(address, provider); + } + + /// Get the raw token balance of an address. + pub async fn balance_of(&self, account: Address) -> Result { + let balance = self.contract.balanceOf(account).call().await?._0; + Ok(balance) + } + + /// Approve spender to spend a raw amount of tokens. + pub async fn approve(&self, spender: Address, value: U256) -> Result { + let tx_hash = self + .contract + .approve(spender, value) + .send() + .await? + .watch() + .await?; + + Ok(tx_hash) + } + + /// Transfer a raw amount of tokens. + pub async fn transfer(&self, receiver: Address, amount: U256) -> Result { + let tx_hash = self + .contract + .transfer(receiver, amount) + .send() + .await? + .watch() + .await?; + + Ok(tx_hash) + } +} diff --git a/evmlib/src/cryptography.rs b/evmlib/src/cryptography.rs new file mode 100644 index 0000000000..fea0297a83 --- /dev/null +++ b/evmlib/src/cryptography.rs @@ -0,0 +1,7 @@ +use crate::common::Hash; +use alloy::primitives::keccak256; + +/// Hash data using Keccak256. +pub fn hash>(data: T) -> Hash { + keccak256(data.as_ref()) +} diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs new file mode 100644 index 0000000000..deca415998 --- /dev/null +++ b/evmlib/src/event.rs @@ -0,0 +1,54 @@ +use crate::common::{Address, Hash, U256}; +use alloy::primitives::{b256, FixedBytes}; +use alloy::rpc::types::Log; + +// Should be updated when the smart contract changes! +pub(crate) const CHUNK_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = + b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Topics amount is unexpected. Was expecting 4")] + TopicsAmountUnexpected, + #[error("Event signature is missing")] + EventSignatureMissing, + #[error("Event signature does not match")] + EventSignatureDoesNotMatch, +} + +/// Struct for the ChunkPaymentEvent emitted by the ChunkPayments smart contract. +#[derive(Debug)] +pub(crate) struct ChunkPaymentEvent { + pub reward_address: Address, + pub amount: U256, + pub quote_hash: Hash, +} + +impl TryFrom for ChunkPaymentEvent { + type Error = Error; + + fn try_from(log: Log) -> Result { + // Verify the amount of topics + if log.topics().len() != 4 { + return Err(Error::TopicsAmountUnexpected); + } + + let topic0 = log.topics().first().ok_or(Error::EventSignatureMissing)?; + + // Verify the event signature + if topic0 != &CHUNK_PAYMENT_EVENT_SIGNATURE { + return Err(Error::EventSignatureDoesNotMatch); + } + + // Extract the data + let reward_address = Address::from_slice(&log.topics()[1][12..]); + let amount = U256::from_be_slice(&log.topics()[2][12..]); + let quote_hash = Hash::from_slice(log.topics()[3].as_slice()); + + Ok(Self { + reward_address, + amount, + quote_hash, + }) + } +} diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs new file mode 100644 index 0000000000..b3995ebe72 --- /dev/null +++ b/evmlib/src/lib.rs @@ -0,0 +1,108 @@ +use crate::common::{Address, QuoteHash, TxHash, U256}; +use crate::transaction::verify_chunk_payment; +use alloy::primitives::address; +use alloy::transports::http::reqwest; +use std::str::FromStr; +use std::sync::LazyLock; + +pub mod common; +pub mod contract; +pub mod cryptography; +pub(crate) mod event; +pub mod testnet; +pub mod transaction; +pub mod utils; +pub mod wallet; + +static PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL: LazyLock = LazyLock::new(|| { + "https://arb1.arbitrum.io/rpc" + .parse() + .expect("Invalid RPC URL") +}); + +const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = + address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + +// Should be updated when the smart contract changes! +const ARBITRUM_ONE_CHUNK_PAYMENTS_ADDRESS: Address = + address!("F15BfEA73b6a551C5c2e66026e4eB3b69c1F602c"); + +#[derive(Clone, Debug, PartialEq)] +pub struct CustomNetwork { + pub rpc_url_http: reqwest::Url, + pub payment_token_address: Address, + pub chunk_payments_address: Address, +} + +impl CustomNetwork { + pub fn new(rpc_url: &str, payment_token_addr: &str, chunk_payments_addr: &str) -> Self { + Self { + rpc_url_http: reqwest::Url::parse(rpc_url).expect("Invalid RPC URL"), + payment_token_address: Address::from_str(payment_token_addr) + .expect("Invalid payment token address"), + chunk_payments_address: Address::from_str(chunk_payments_addr) + .expect("Invalid chunk payments address"), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Network { + ArbitrumOne, + Custom(CustomNetwork), +} + +impl Network { + pub fn identifier(&self) -> &str { + match self { + Network::ArbitrumOne => "arbitrum-one", + Network::Custom(_) => "custom", + } + } + + pub fn rpc_url(&self) -> &reqwest::Url { + match self { + Network::ArbitrumOne => &PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL, + Network::Custom(custom) => &custom.rpc_url_http, + } + } + + pub fn payment_token_address(&self) -> &Address { + match self { + Network::ArbitrumOne => &ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS, + Network::Custom(custom) => &custom.payment_token_address, + } + } + + pub fn chunk_payments_address(&self) -> &Address { + match self { + Network::ArbitrumOne => &ARBITRUM_ONE_CHUNK_PAYMENTS_ADDRESS, + Network::Custom(custom) => &custom.chunk_payments_address, + } + } + + pub async fn verify_chunk_payment( + &self, + tx_hash: TxHash, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, + quote_expiration_timestamp_in_secs: u64, + ) -> Result<(), transaction::Error> { + verify_chunk_payment( + self, + tx_hash, + quote_hash, + reward_addr, + amount, + quote_expiration_timestamp_in_secs, + ) + .await + } +} + +impl Default for Network { + fn default() -> Self { + Self::ArbitrumOne + } +} diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs new file mode 100644 index 0000000000..5a19ea8d85 --- /dev/null +++ b/evmlib/src/testnet.rs @@ -0,0 +1,123 @@ +use crate::common::Address; +use crate::contract::chunk_payments::ChunkPayments; +use crate::contract::network_token::NetworkToken; +use crate::{CustomNetwork, Network}; +use alloy::hex::ToHexExt; +use alloy::network::{Ethereum, EthereumWallet}; +use alloy::node_bindings::{Anvil, AnvilInstance}; +use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; +use alloy::providers::{ProviderBuilder, ReqwestProvider}; +use alloy::signers::local::PrivateKeySigner; +use alloy::transports::http::{Client, Http}; + +pub struct Testnet { + anvil: AnvilInstance, + network_token_address: Address, + chunk_payments_address: Address, +} + +impl Testnet { + /// Starts an Anvil node and automatically deploys the network token and chunk payments smart contracts. + pub async fn new(royalties_wallet: Address) -> Self { + let anvil = start_node(); + + let network_token = deploy_network_token_contract(&anvil).await; + let chunk_payments = deploy_chunk_payments_contract( + &anvil, + *network_token.contract.address(), + royalties_wallet, + ) + .await; + + Testnet { + anvil, + network_token_address: *network_token.contract.address(), + chunk_payments_address: *chunk_payments.contract.address(), + } + } + + pub fn to_network(&self) -> Network { + let rpc_url = self + .anvil + .endpoint() + .parse() + .expect("Could not parse RPC URL"); + + Network::Custom(CustomNetwork { + rpc_url_http: rpc_url, + payment_token_address: self.network_token_address, + chunk_payments_address: self.chunk_payments_address, + }) + } + + pub fn default_wallet_private_key(&self) -> String { + // Fetches private key from the first default Anvil account (Alice). + let signer: PrivateKeySigner = self.anvil.keys()[0].clone().into(); + signer.to_bytes().encode_hex_with_prefix() + } +} + +/// Runs a local Anvil node. +pub fn start_node() -> AnvilInstance { + // Spin up a local Anvil node. + // Requires you to have Foundry installed: https://book.getfoundry.sh/getting-started/installation + Anvil::new() + .try_spawn() + .expect("Could not spawn Anvil node") +} + +pub async fn deploy_network_token_contract( + anvil: &AnvilInstance, +) -> NetworkToken< + Http, + FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, +> { + // Set up signer from the first default Anvil account (Alice). + let signer: PrivateKeySigner = anvil.keys()[0].clone().into(); + let wallet = EthereumWallet::from(signer); + + let rpc_url = anvil.endpoint().parse().expect("Could not parse RPC URL"); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url); + + // Deploy the contract. + NetworkToken::deploy(provider).await +} + +pub async fn deploy_chunk_payments_contract( + anvil: &AnvilInstance, + token_address: Address, + royalties_wallet: Address, +) -> ChunkPayments< + Http, + FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, +> { + // Set up signer from the second default Anvil account (Bob). + let signer: PrivateKeySigner = anvil.keys()[1].clone().into(); + let wallet = EthereumWallet::from(signer); + + let rpc_url = anvil.endpoint().parse().expect("Could not parse RPC URL"); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url); + + // Deploy the contract. + ChunkPayments::deploy(provider, token_address, royalties_wallet).await +} diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs new file mode 100644 index 0000000000..b83d30f750 --- /dev/null +++ b/evmlib/src/transaction.rs @@ -0,0 +1,197 @@ +use crate::common::{Address, QuoteHash, TxHash, U256}; +use crate::event::{ChunkPaymentEvent, CHUNK_PAYMENT_EVENT_SIGNATURE}; +use crate::Network; +use alloy::eips::BlockNumberOrTag; +use alloy::primitives::FixedBytes; +use alloy::providers::{Provider, ProviderBuilder}; +use alloy::rpc::types::{Block, Filter, Log, TransactionReceipt}; +use alloy::transports::{RpcError, TransportErrorKind}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + RpcError(#[from] RpcError), + #[error("Transaction is not confirmed")] + TransactionUnconfirmed, + #[error("Transaction was not found")] + TransactionNotFound, + #[error("Transaction has not been included in a block yet")] + TransactionNotInBlock, + #[error("Block was not found")] + BlockNotFound, + #[error("No event proof found")] + EventProofNotFound, + #[error("Payment was done after the quote expired")] + QuoteExpired, +} + +/// Get a transaction receipt by its hash. +pub async fn get_transaction_receipt_by_hash( + network: &Network, + transaction_hash: TxHash, +) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let maybe_receipt = provider.get_transaction_receipt(transaction_hash).await?; + Ok(maybe_receipt) +} + +/// Get a block by its block number. +async fn get_block_by_number(network: &Network, block_number: u64) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let block = provider + .get_block_by_number(BlockNumberOrTag::Number(block_number), true) + .await?; + Ok(block) +} + +/// Get transaction logs using a filter. +async fn get_transaction_logs(network: &Network, filter: Filter) -> Result, Error> { + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .on_http(network.rpc_url().clone()); + let logs = provider.get_logs(&filter).await?; + Ok(logs) +} + +/// Get a ChunkPaymentMade event, filtered by a hashed chunk address and a node address. +/// Useful for a node if it wants to check if payment for a certain chunk has been made. +async fn get_chunk_payment_event( + network: &Network, + block_number: u64, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, +) -> Result, Error> { + let topic1: FixedBytes<32> = FixedBytes::left_padding_from(reward_addr.as_slice()); + + let filter = Filter::new() + .event_signature(CHUNK_PAYMENT_EVENT_SIGNATURE) + .topic1(topic1) + .topic2(amount) + .topic3(quote_hash) + .from_block(block_number) + .to_block(block_number); + + get_transaction_logs(network, filter).await +} + +/// Verify if a chunk payment is confirmed. +pub async fn verify_chunk_payment( + network: &Network, + tx_hash: TxHash, + quote_hash: QuoteHash, + reward_addr: Address, + amount: U256, + quote_expiration_timestamp_in_secs: u64, +) -> Result<(), Error> { + let transaction = get_transaction_receipt_by_hash(network, tx_hash) + .await? + .ok_or(Error::TransactionNotFound)?; + + // If the status is True, it means the tx is confirmed. + if !transaction.status() { + return Err(Error::TransactionUnconfirmed); + } + + let block_number = transaction + .block_number + .ok_or(Error::TransactionNotInBlock)?; + + let block = get_block_by_number(network, block_number) + .await? + .ok_or(Error::BlockNotFound)?; + + // Check if payment was done within the quote expiration timeframe. + if quote_expiration_timestamp_in_secs < block.header.timestamp { + return Err(Error::QuoteExpired); + } + + let logs = + get_chunk_payment_event(network, block_number, quote_hash, reward_addr, amount).await?; + + for log in logs { + if log.transaction_hash != Some(tx_hash) { + // Wrong transaction. + continue; + } + + if let Ok(event) = ChunkPaymentEvent::try_from(log) { + // Check if the event matches what we expect. + // The smart contract handles royalties, so we don't have to check that. + if event.quote_hash == quote_hash + && event.reward_address == reward_addr + && event.amount >= amount + { + return Ok(()); + } + } + } + + Err(Error::EventProofNotFound) +} + +#[cfg(test)] +mod tests { + use crate::common::{Address, U256}; + use crate::transaction::{ + get_chunk_payment_event, get_transaction_receipt_by_hash, verify_chunk_payment, + }; + use crate::Network; + use alloy::hex::FromHex; + use alloy::primitives::b256; + + #[tokio::test] + async fn test_get_transaction_receipt_by_hash() { + let network = Network::ArbitrumOne; + + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); + + assert!(get_transaction_receipt_by_hash(&network, tx_hash) + .await + .unwrap() + .is_some()); + } + + #[tokio::test] + async fn test_get_chunk_payment_event() { + let network = Network::ArbitrumOne; + + let block_number: u64 = 250043261; + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let amount = U256::from(200); + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); + + let logs = + get_chunk_payment_event(&network, block_number, quote_hash, reward_address, amount) + .await + .unwrap(); + + assert_eq!(logs.len(), 1); + } + + #[tokio::test] + async fn test_verify_chunk_payment() { + let network = Network::ArbitrumOne; + + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let amount = U256::from(200); + + let result = verify_chunk_payment( + &network, + tx_hash, + quote_hash, + reward_address, + amount, + 4102441200, + ) + .await; + + assert!(result.is_ok(), "Error: {:?}", result.err()); + } +} diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs new file mode 100644 index 0000000000..08378e875d --- /dev/null +++ b/evmlib/src/utils.rs @@ -0,0 +1,17 @@ +use crate::common::{Address, Amount, Hash}; +use rand::Rng; + +/// Returns the amount of royalties expected for a certain transfer amount. +pub fn calculate_royalties_from_amount(amount: Amount) -> Amount { + amount / Amount::from(10) +} + +/// Generate a random Address. +pub fn dummy_address() -> Address { + Address::new(rand::rngs::OsRng.gen()) +} + +/// generate a random Hash. +pub fn dummy_hash() -> Hash { + Hash::new(rand::rngs::OsRng.gen()) +} diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs new file mode 100644 index 0000000000..7f01497fb5 --- /dev/null +++ b/evmlib/src/wallet.rs @@ -0,0 +1,315 @@ +use std::collections::BTreeMap; + +use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; +use crate::contract::chunk_payments::{ChunkPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use crate::contract::network_token::NetworkToken; +use crate::contract::{chunk_payments, network_token}; +use crate::utils::calculate_royalties_from_amount; +use crate::Network; +use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; +use alloy::providers::fillers::{ + ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, RecommendedFiller, WalletFiller, +}; +use alloy::providers::{Identity, Provider, ProviderBuilder, ReqwestProvider}; +use alloy::rpc::types::TransactionRequest; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use alloy::transports::http::{reqwest, Client, Http}; +use alloy::transports::{RpcError, TransportErrorKind}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Private key is invalid")] + PrivateKeyInvalid, + #[error(transparent)] + RpcError(#[from] RpcError), + #[error("Network token contract error: {0}")] + NetworkTokenContract(#[from] network_token::Error), + #[error("Chunk payments contract error: {0}")] + ChunkPaymentsContract(#[from] chunk_payments::error::Error), +} + +pub struct Wallet { + wallet: EthereumWallet, + network: Network, +} + +impl Wallet { + /// Creates a new Wallet object with the specific Network and EthereumWallet. + pub fn new(network: Network, wallet: EthereumWallet) -> Self { + Self { wallet, network } + } + + /// Convenience function that creates a new Wallet with a random EthereumWallet. + pub fn new_with_random_wallet(network: Network) -> Self { + Self::new(network, random()) + } + + /// Creates a new Wallet based on the given private_key. It will fail with Error::PrivateKeyInvalid if private_key is invalid. + pub fn new_from_private_key(network: Network, private_key: &str) -> Result { + let wallet = from_private_key(private_key)?; + Ok(Self::new(network, wallet)) + } + + /// Returns the address of this wallet. + pub fn address(&self) -> Address { + wallet_address(&self.wallet) + } + + /// Returns the raw balance of payment tokens for this wallet. + pub async fn balance_of_tokens(&self) -> Result { + balance_of_tokens(wallet_address(&self.wallet), &self.network).await + } + + /// Returns the raw balance of gas tokens for this wallet. + pub async fn balance_of_gas_tokens(&self) -> Result { + balance_of_gas_tokens(wallet_address(&self.wallet), &self.network).await + } + + /// Transfer a raw amount of payment tokens to another address. + pub async fn transfer_tokens( + &self, + to: Address, + amount: U256, + ) -> Result { + transfer_tokens(self.wallet.clone(), &self.network, to, amount).await + } + + /// Transfer a raw amount of gas tokens to another address. + pub async fn transfer_gas_tokens( + &self, + to: Address, + amount: U256, + ) -> Result { + transfer_gas_tokens(self.wallet.clone(), &self.network, to, amount).await + } + + /// Pays for a single quote. Returns transaction hash of the payment. + pub async fn pay_for_quote( + &self, + quote_hash: QuoteHash, + rewards_addr: Address, + amount: U256, + ) -> Result { + self.pay_for_quotes([(quote_hash, rewards_addr, amount)]) + .await + .map(|v| v.values().last().cloned().expect("Infallible")) + .map_err(|err| err.0) + } + + /// Function for batch payments of quotes. It accepts an iterator of QuotePayment and returns + /// transaction hashes of the payments by quotes. + pub async fn pay_for_quotes>( + &self, + chunk_payments: I, + ) -> Result, PayForQuotesError> { + pay_for_quotes(self.wallet.clone(), &self.network, chunk_payments).await + } +} + +/// Generate an EthereumWallet with a random private key. +fn random() -> EthereumWallet { + let signer: PrivateKeySigner = LocalSigner::random(); + EthereumWallet::from(signer) +} + +/// Creates a wallet from a private key in HEX format. +fn from_private_key(private_key: &str) -> Result { + let signer: PrivateKeySigner = private_key.parse().map_err(|_| Error::PrivateKeyInvalid)?; + Ok(EthereumWallet::from(signer)) +} + +// TODO(optimization): Find a way to reuse/persist contracts and/or a provider without the wallet nonce going out of sync + +#[allow(clippy::type_complexity)] +fn http_provider( + rpc_url: reqwest::Url, +) -> FillProvider< + JoinFill, NonceFiller>, ChainIdFiller>, + ReqwestProvider, + Http, + Ethereum, +> { + ProviderBuilder::new() + .with_recommended_fillers() + .on_http(rpc_url) +} + +fn http_provider_with_wallet( + rpc_url: reqwest::Url, + wallet: EthereumWallet, +) -> FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, +> { + ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url) +} + +/// Returns the address of this wallet. +pub fn wallet_address(wallet: &EthereumWallet) -> Address { + >::default_signer_address(wallet) +} + +/// Returns the raw balance of payment tokens for this wallet. +pub async fn balance_of_tokens( + account: Address, + network: &Network, +) -> Result { + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.balance_of(account).await +} + +/// Returns the raw balance of gas tokens for this wallet. +pub async fn balance_of_gas_tokens( + account: Address, + network: &Network, +) -> Result { + let provider = http_provider(network.rpc_url().clone()); + let balance = provider.get_balance(account).await?; + Ok(balance) +} + +/// Approve an address / smart contract to spend this wallet's payment tokens. +async fn approve_to_spend_tokens( + wallet: EthereumWallet, + network: &Network, + spender: Address, + amount: U256, +) -> Result { + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.approve(spender, amount).await +} + +/// Transfer payment tokens from the supplied wallet to an address. +pub async fn transfer_tokens( + wallet: EthereumWallet, + network: &Network, + receiver: Address, + amount: U256, +) -> Result { + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.transfer(receiver, amount).await +} + +/// Transfer native/gas tokens from the supplied wallet to an address. +pub async fn transfer_gas_tokens( + wallet: EthereumWallet, + network: &Network, + receiver: Address, + amount: U256, +) -> Result { + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let tx = TransactionRequest::default() + .with_to(receiver) + .with_value(amount); + + let tx_hash = provider.send_transaction(tx).await?.watch().await?; + + Ok(tx_hash) +} + +/// Contains the payment error and the already succeeded batch payments (if any). +#[derive(Debug)] +pub struct PayForQuotesError(pub Error, pub BTreeMap); + +/// Use this wallet to pay for chunks in batched transfer transactions. +/// If the amount of transfers is more than one transaction can contain, the transfers will be split up over multiple transactions. +pub async fn pay_for_quotes>( + wallet: EthereumWallet, + network: &Network, + payments: T, +) -> Result, PayForQuotesError> { + let payments: Vec<_> = payments.into_iter().collect(); + let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); + let royalties = calculate_royalties_from_amount(total_amount); + + // 2 * royalties to have a small buffer for different rounding in the smart contract. + let total_amount_with_royalties = total_amount + (U256::from(2) * royalties); + + let mut tx_hashes_by_quote = BTreeMap::new(); + + // Approve the contract to spend enough of the client's tokens. + approve_to_spend_tokens( + wallet.clone(), + network, + *network.chunk_payments_address(), + total_amount_with_royalties, + ) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); + let chunk_payments = ChunkPayments::new(*network.chunk_payments_address(), provider); + + // Divide transfers over multiple transactions if they exceed the max per transaction. + let chunks = payments.chunks(MAX_TRANSFERS_PER_TRANSACTION); + + for batch in chunks { + let batch: Vec = batch.to_vec(); + + let tx_hash = chunk_payments + .pay_for_quotes(batch.clone()) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + + for (quote_hash, _, _) in batch { + tx_hashes_by_quote.insert(quote_hash, tx_hash); + } + } + + Ok(tx_hashes_by_quote) +} + +#[cfg(test)] +mod tests { + use crate::common::Amount; + use crate::testnet::Testnet; + use crate::utils::dummy_address; + use crate::wallet::{from_private_key, Wallet}; + use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; + use alloy::primitives::address; + + #[tokio::test] + async fn test_from_private_key() { + let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; + let wallet = from_private_key(private_key).unwrap(); + let account = >::default_signer_address(&wallet); + + // Assert that the addresses are the same, i.e. the wallet was successfully created from the private key + assert_eq!( + account, + address!("1975d01f46D70AAc0dd3fCf942d92650eE63C79A") + ); + } + + #[tokio::test] + async fn test_transfer_gas_tokens() { + let testnet = Testnet::new(dummy_address()).await; + let network = testnet.to_network(); + let wallet = + Wallet::new_from_private_key(network.clone(), &testnet.default_wallet_private_key()) + .unwrap(); + let receiver_wallet = Wallet::new_with_random_wallet(network); + let transfer_amount = Amount::from(117); + + let initial_balance = receiver_wallet.balance_of_gas_tokens().await.unwrap(); + + assert_eq!(initial_balance, Amount::from(0)); + + let _ = wallet + .transfer_gas_tokens(receiver_wallet.address(), transfer_amount) + .await + .unwrap(); + + let final_balance = receiver_wallet.balance_of_gas_tokens().await.unwrap(); + + assert_eq!(final_balance, transfer_amount); + } +} diff --git a/evmlib/tests/chunk_payments.rs b/evmlib/tests/chunk_payments.rs new file mode 100644 index 0000000000..ce5e24ce33 --- /dev/null +++ b/evmlib/tests/chunk_payments.rs @@ -0,0 +1,112 @@ +mod common; + +use crate::common::quote::random_quote_payment; +use crate::common::ROYALTIES_WALLET; +use alloy::network::{Ethereum, EthereumWallet}; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::utils::parse_ether; +use alloy::providers::ext::AnvilApi; +use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; +use alloy::providers::{ProviderBuilder, ReqwestProvider, WalletProvider}; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use alloy::transports::http::{Client, Http}; +use evmlib::common::U256; +use evmlib::contract::chunk_payments::{ChunkPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use evmlib::contract::network_token::NetworkToken; +use evmlib::testnet::{deploy_chunk_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::wallet::wallet_address; + +async fn setup() -> ( + AnvilInstance, + NetworkToken< + Http, + FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, + ChunkPayments< + Http, + FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, +) { + let anvil = start_node(); + + let network_token = deploy_network_token_contract(&anvil).await; + + let chunk_payments = + deploy_chunk_payments_contract(&anvil, *network_token.contract.address(), ROYALTIES_WALLET) + .await; + + (anvil, network_token, chunk_payments) +} + +#[allow(clippy::unwrap_used)] +#[allow(clippy::type_complexity)] +async fn provider_with_gas_funded_wallet( + anvil: &AnvilInstance, +) -> FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, +> { + let signer: PrivateKeySigner = LocalSigner::random(); + let wallet = EthereumWallet::from(signer); + + let rpc_url = anvil.endpoint().parse().unwrap(); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(wallet) + .on_http(rpc_url); + + let account = wallet_address(provider.wallet()); + + // Fund the wallet with plenty of gas tokens + provider + .anvil_set_balance(account, parse_ether("1000").expect("")) + .await + .unwrap(); + + provider +} + +#[tokio::test] +async fn test_deploy() { + setup().await; +} + +#[tokio::test] +async fn test_pay_for_quotes() { + let (_anvil, network_token, mut chunk_payments) = setup().await; + + let mut quote_payments = vec![]; + + for _ in 0..MAX_TRANSFERS_PER_TRANSACTION { + let quote_payment = random_quote_payment(); + quote_payments.push(quote_payment); + } + + let _ = network_token + .approve(*chunk_payments.contract.address(), U256::MAX) + .await + .unwrap(); + + // Contract provider has a different account coupled to it, + // so we set it to the same as the network token contract + chunk_payments.set_provider(network_token.contract.provider().clone()); + + let result = chunk_payments.pay_for_quotes(quote_payments).await; + + assert!(result.is_ok(), "Failed with error: {:?}", result.err()); +} diff --git a/evmlib/tests/common/mod.rs b/evmlib/tests/common/mod.rs new file mode 100644 index 0000000000..cc82e5bf16 --- /dev/null +++ b/evmlib/tests/common/mod.rs @@ -0,0 +1,5 @@ +use alloy::primitives::{address, Address}; + +pub mod quote; + +pub const ROYALTIES_WALLET: Address = address!("385e7887E5b41750E3679Da787B943EC42f37d75"); diff --git a/evmlib/tests/common/quote.rs b/evmlib/tests/common/quote.rs new file mode 100644 index 0000000000..d3d4f574ca --- /dev/null +++ b/evmlib/tests/common/quote.rs @@ -0,0 +1,9 @@ +use evmlib::common::{Amount, QuotePayment}; +use evmlib::utils::{dummy_address, dummy_hash}; + +pub fn random_quote_payment() -> QuotePayment { + let quote_hash = dummy_hash(); + let reward_address = dummy_address(); + let amount = Amount::from(200); + (quote_hash, reward_address, amount) +} diff --git a/evmlib/tests/network_token.rs b/evmlib/tests/network_token.rs new file mode 100644 index 0000000000..4f7a521abd --- /dev/null +++ b/evmlib/tests/network_token.rs @@ -0,0 +1,83 @@ +mod common; + +use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::U256; +use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; +use alloy::providers::{ReqwestProvider, WalletProvider}; +use alloy::signers::local::PrivateKeySigner; +use alloy::transports::http::{Client, Http}; +use evmlib::contract::network_token::NetworkToken; +use evmlib::testnet::{deploy_network_token_contract, start_node}; +use evmlib::wallet::wallet_address; +use std::str::FromStr; + +async fn setup() -> ( + AnvilInstance, + NetworkToken< + Http, + FillProvider< + JoinFill>, + ReqwestProvider, + Http, + Ethereum, + >, + Ethereum, + >, +) { + let anvil = start_node(); + + let network_token = deploy_network_token_contract(&anvil).await; + + (anvil, network_token) +} + +#[tokio::test] +async fn test_deploy() { + setup().await; +} + +#[tokio::test] +async fn test_balance_of() { + let (_anvil, contract) = setup().await; + + let account = >::default_signer_address( + contract.contract.provider().wallet(), + ); + + let balance = contract.balance_of(account).await.unwrap(); + + assert_eq!( + balance, + U256::from_str("20000000000000000000000000").unwrap() + ); +} + +#[tokio::test] +async fn test_approve() { + let (_anvil, network_token) = setup().await; + + let account = wallet_address(network_token.contract.provider().wallet()); + + let spend_value = U256::from(1); + let spender = PrivateKeySigner::random(); + + // Approve for the spender to spend a value from the funds of the owner (our default account). + let approval_result = network_token.approve(spender.address(), spend_value).await; + + assert!( + approval_result.is_ok(), + "Approval failed with error: {:?}", + approval_result.err() + ); + + let allowance = network_token + .contract + .allowance(account, spender.address()) + .call() + .await + .unwrap() + ._0; + + assert_eq!(allowance, spend_value); +} diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs new file mode 100644 index 0000000000..a212170040 --- /dev/null +++ b/evmlib/tests/wallet.rs @@ -0,0 +1,112 @@ +mod common; + +use crate::common::quote::random_quote_payment; +use crate::common::ROYALTIES_WALLET; +use alloy::network::EthereumWallet; +use alloy::node_bindings::AnvilInstance; +use alloy::primitives::utils::parse_ether; +use alloy::providers::ext::AnvilApi; +use alloy::providers::{ProviderBuilder, WalletProvider}; +use alloy::signers::local::{LocalSigner, PrivateKeySigner}; +use evmlib::common::{Amount, TxHash}; +use evmlib::contract::chunk_payments::MAX_TRANSFERS_PER_TRANSACTION; +use evmlib::testnet::{deploy_chunk_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::transaction::verify_chunk_payment; +use evmlib::wallet::{transfer_tokens, wallet_address, Wallet}; +use evmlib::{CustomNetwork, Network}; +use std::collections::HashSet; + +#[allow(clippy::unwrap_used)] +async fn local_testnet() -> (AnvilInstance, Network, EthereumWallet) { + let anvil = start_node(); + let rpc_url = anvil.endpoint().parse().unwrap(); + let network_token = deploy_network_token_contract(&anvil).await; + let payment_token_address = *network_token.contract.address(); + let chunk_payments = + deploy_chunk_payments_contract(&anvil, payment_token_address, ROYALTIES_WALLET).await; + + ( + anvil, + Network::Custom(CustomNetwork { + rpc_url_http: rpc_url, + payment_token_address, + chunk_payments_address: *chunk_payments.contract.address(), + }), + network_token.contract.provider().wallet().clone(), + ) +} + +#[allow(clippy::unwrap_used)] +async fn funded_wallet(network: &Network, genesis_wallet: EthereumWallet) -> Wallet { + let signer: PrivateKeySigner = LocalSigner::random(); + let wallet = EthereumWallet::from(signer); + let account = wallet_address(&wallet); + + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(genesis_wallet.clone()) + .on_http(network.rpc_url().clone()); + + // Fund the wallet with plenty of gas tokens + provider + .anvil_set_balance(account, parse_ether("1000").expect("")) + .await + .unwrap(); + + // Fund the wallet with plenty of ERC20 tokens + transfer_tokens( + genesis_wallet, + network, + account, + Amount::from(9999999999_u64), + ) + .await + .unwrap(); + + Wallet::new(network.clone(), wallet) +} + +#[tokio::test] +async fn test_pay_for_quotes_and_chunk_payment_verification() { + const TRANSFERS: usize = 600; + const EXPIRATION_TIMESTAMP_IN_SECS: u64 = 4102441200; // The year 2100 + + let (_anvil, network, genesis_wallet) = local_testnet().await; + let wallet = funded_wallet(&network, genesis_wallet).await; + + let mut quote_payments = vec![]; + + for _ in 0..TRANSFERS { + let quote = random_quote_payment(); + quote_payments.push(quote); + } + + let tx_hashes = wallet.pay_for_quotes(quote_payments.clone()).await.unwrap(); + + let unique_tx_hashes: HashSet = tx_hashes.values().cloned().collect(); + + assert_eq!( + unique_tx_hashes.len(), + TRANSFERS.div_ceil(MAX_TRANSFERS_PER_TRANSACTION) + ); + + for quote_payment in quote_payments.iter() { + let tx_hash = *tx_hashes.get("e_payment.0).unwrap(); + + let result = verify_chunk_payment( + &network, + tx_hash, + quote_payment.0, + quote_payment.1, + quote_payment.2, + EXPIRATION_TIMESTAMP_IN_SECS, + ) + .await; + + assert!( + result.is_ok(), + "Verification failed for: {quote_payment:?}. Error: {:?}", + result.err() + ); + } +} diff --git a/sn_evm/CHANGELOG.md b/sn_evm/CHANGELOG.md new file mode 100644 index 0000000000..ec4c00a34f --- /dev/null +++ b/sn_evm/CHANGELOG.md @@ -0,0 +1,917 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.18.6](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.5...sn_transfers-v0.18.6) - 2024-06-04 + +### Other +- release +- release + +## [0.18.5](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.4...sn_transfers-v0.18.5) - 2024-06-04 + +### Fixed +- *(transfer)* mismatched key shall result in decryption error + +### Other +- *(transfer)* make discord_name decryption backward compatible + +## [0.18.4](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.3...sn_transfers-v0.18.4) - 2024-06-03 + +### Fixed +- enable compile time sk setting for faucet/genesis + +## [0.18.2](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.1...sn_transfers-v0.18.2) - 2024-06-03 + +### Added +- *(faucet)* write foundation cash note to disk +- *(keys)* enable compile or runtime override of keys + +### Other +- use secrets during build process + +## [0.18.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.18.0...sn_transfers-v0.18.1) - 2024-05-24 + +### Added +- use default keys for genesis, or override +- use different key for payment forward +- remove two uneeded env vars +- pass genesis_cn pub fields separate to hide sk +- hide genesis keypair +- hide genesis keypair +- pass sk_str via cli opt +- *(node)* use separate keys of Foundation and Royalty +- *(wallet)* ensure genesis wallet attempts to load from local on init first +- *(faucet)* make gifting server feat dependent +- tracking beta rewards from the DAG +- *(audit)* collect payment forward statistics +- *(node)* periodically forward reward to specific address +- spend reason enum and sized cipher + +### Fixed +- correct genesis_pk naming +- genesis_cn public fields generated from hard coded value +- invalid spend reason in data payments + +### Other +- *(transfers)* comment and naming updates for clarity +- log genesis PK +- rename improperly named foundation_key +- reconfigure local network owner args +- *(refactor)* stabilise node size to 4k records, +- use const for default user or owner +- resolve errors after reverts +- Revert "feat(node): make spend and cash_note reason field configurable" +- Revert "feat: spend shows the purposes of outputs created for" +- Revert "chore: rename output reason to purpose for clarity" +- Revert "feat(cli): track spend creation reasons during audit" +- Revert "chore: refactor CASH_NOTE_REASON strings to consts" +- Revert "chore: address review comments" +- *(node)* use proper SpendReason enum +- add consts + +## [0.18.0-alpha.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.18.0-alpha.0...sn_transfers-v0.18.0-alpha.1) - 2024-05-07 + +### Added +- *(cli)* track spend creation reasons during audit +- spend shows the purposes of outputs created for +- *(node)* make spend and cash_note reason field configurable +- *(cli)* generate a mnemonic as wallet basis if no wallet found +- *(transfers)* do not genereate wallet by default +- [**breaking**] renamings in CashNote +- [**breaking**] rename token to amount in Spend +- unit testing dag, double spend poisoning tweaks + +### Fixed +- create faucet via account load or generation +- transfer tests for HotWallet creation +- *(client)* move acct_packet mnemonic into client layer +- typo + +### Other +- *(versions)* sync versions with latest crates.io vs +- address review comments +- refactor CASH_NOTE_REASON strings to consts +- rename output reason to purpose for clarity +- addres review comments +- *(transfers)* reduce error size +- *(deps)* bump dependencies +- *(transfer)* unit tests for PaymentQuote +- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 +- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 + +## [0.17.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.17.0...sn_transfers-v0.17.1) - 2024-03-28 + +### Added +- *(transfers)* implement WalletApi to expose common methods + +### Fixed +- *(uploader)* clarify the use of root and wallet dirs + +## [0.17.0](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.5...sn_transfers-v0.17.0) - 2024-03-27 + +### Added +- *(faucet)* rate limit based upon wallet locks +- *(transfers)* enable client to check if a quote has expired +- *(transfers)* [**breaking**] support multiple payments for the same xorname +- use Arc inside Client, Network to reduce clone cost + +### Other +- *(node)* refactor pricing metrics + +## [0.16.5](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.4...sn_transfers-v0.16.5) - 2024-03-21 + +### Added +- refactor DAG, improve error management and security +- dag error recording + +## [0.16.4](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.3...sn_transfers-v0.16.4) - 2024-03-14 + +### Added +- refactor spend validation + +### Other +- improve code quality + +## [0.16.3-alpha.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.3-alpha.0...sn_transfers-v0.16.3-alpha.1) - 2024-03-08 + +### Added +- [**breaking**] pretty serialisation for unique keys + +## [0.16.2](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.1...sn_transfers-v0.16.2) - 2024-03-06 + +### Other +- clean swarm commands errs and spend errors + +## [0.16.1](https://github.com/joshuef/safe_network/compare/sn_transfers-v0.16.0...sn_transfers-v0.16.1) - 2024-03-05 + +### Added +- provide `faucet add` command + +## [0.16.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.9...sn_transfers-v0.16.0) - 2024-02-23 + +### Added +- use the old serialisation as default, add some docs +- warn about old format when detected +- implement backwards compatible deserialisation +- [**breaking**] custom serde for unique keys + +## [0.15.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.7...sn_transfers-v0.15.8) - 2024-02-20 + +### Added +- spend and DAG utilities + +## [0.15.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.6...sn_transfers-v0.15.7) - 2024-02-20 + +### Added +- *(folders)* move folders/files metadata out of Folders entries + +## [0.15.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.5...sn_transfers-v0.15.6) - 2024-02-15 + +### Added +- *(client)* keep payee as part of storage payment cache + +### Other +- minor doc change based on peer review + +## [0.15.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.4...sn_transfers-v0.15.5) - 2024-02-14 + +### Other +- *(refactor)* move mod.rs files the modern way + +## [0.15.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.3...sn_transfers-v0.15.4) - 2024-02-13 + +### Fixed +- manage the genesis spend case + +## [0.15.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.2...sn_transfers-v0.15.3) - 2024-02-08 + +### Other +- copyright update to current year + +## [0.15.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.1...sn_transfers-v0.15.2) - 2024-02-07 + +### Added +- extendable local state DAG in cli + +## [0.15.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.15.0...sn_transfers-v0.15.1) - 2024-02-06 + +### Fixed +- *(node)* derive reward_key from main keypair + +## [0.15.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.43...sn_transfers-v0.15.0) - 2024-02-02 + +### Other +- *(cli)* minor changes to cli comments +- [**breaking**] renaming LocalWallet to HotWallet as it holds the secret key for signing tx +- *(readme)* add instructions of out-of-band transaction signing + +## [0.14.43](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.42...sn_transfers-v0.14.43) - 2024-01-29 + +### Other +- *(sn_transfers)* making some functions/helpers to be constructor methods of public structs + +## [0.14.42](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.41...sn_transfers-v0.14.42) - 2024-01-25 + +### Added +- client webtransport-websys feat + +## [0.14.41](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.40...sn_transfers-v0.14.41) - 2024-01-24 + +### Fixed +- dont lock files with wasm + +### Other +- make tokio dev dep for transfers + +## [0.14.40](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.39...sn_transfers-v0.14.40) - 2024-01-22 + +### Added +- spend dag utils + +## [0.14.39](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.38...sn_transfers-v0.14.39) - 2024-01-18 + +### Added +- *(faucet)* download snapshot of maid balances + +## [0.14.38](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.37...sn_transfers-v0.14.38) - 2024-01-16 + +### Fixed +- *(wallet)* remove unconfirmed_spends file from disk when all confirmed + +## [0.14.37](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.36...sn_transfers-v0.14.37) - 2024-01-15 + +### Fixed +- *(client)* do not store paying-out cash_notes into disk +- *(client)* cache payments via disk instead of memory map + +### Other +- *(client)* collect wallet handling time statistics + +## [0.14.36](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.35...sn_transfers-v0.14.36) - 2024-01-10 + +### Added +- *(transfers)* exposing APIs to build and send cashnotes from transactions signed offline +- *(transfers)* include the derivation index of inputs for generated unsigned transactions +- *(transfers)* exposing an API to create unsigned transfers to be signed offline later on + +### Other +- fixup send_spends and use ExcessiveNanoValue error +- *(transfers)* solving clippy issues about complex fn args + +## [0.14.35](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.34...sn_transfers-v0.14.35) - 2024-01-09 + +### Added +- *(client)* extra sleep between chunk verification + +## [0.14.34](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.33...sn_transfers-v0.14.34) - 2024-01-09 + +### Added +- *(cli)* safe wallet create saves new key + +## [0.14.33](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.32...sn_transfers-v0.14.33) - 2024-01-08 + +### Other +- more doc updates to readme files + +## [0.14.32](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.31...sn_transfers-v0.14.32) - 2024-01-05 + +### Other +- add clippy unwrap lint to workspace + +## [0.14.31](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.30...sn_transfers-v0.14.31) - 2023-12-19 + +### Added +- network royalties through audit POC + +## [0.14.30](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.29...sn_transfers-v0.14.30) - 2023-12-18 + +### Added +- *(transfers)* spent keys and created for others removed +- *(transfers)* add api for cleaning up CashNotes + +## [0.14.29](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.28...sn_transfers-v0.14.29) - 2023-12-14 + +### Other +- *(protocol)* print the first six hex characters for every address type + +## [0.14.28](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.27...sn_transfers-v0.14.28) - 2023-12-12 + +### Added +- *(transfers)* make wallet read resiliant to concurrent writes + +## [0.14.27](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.26...sn_transfers-v0.14.27) - 2023-12-06 + +### Added +- *(wallet)* basic impl of a watch-only wallet API + +### Other +- *(wallet)* adding unit tests for watch-only wallet impl. +- *(wallet)* another refactoring removing more redundant and unused wallet code +- *(wallet)* major refactoring removing redundant and unused code + +## [0.14.26](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.25...sn_transfers-v0.14.26) - 2023-12-06 + +### Other +- remove some needless cloning +- remove needless pass by value +- use inline format args +- add boilerplate for workspace lints + +## [0.14.25](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.24...sn_transfers-v0.14.25) - 2023-12-05 + +### Fixed +- protect against amounts tampering and incomplete spends attack + +## [0.14.24](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.23...sn_transfers-v0.14.24) - 2023-12-05 + +### Other +- *(transfers)* tidier debug methods for Transactions + +## [0.14.23](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.22...sn_transfers-v0.14.23) - 2023-11-29 + +### Added +- verify all the way to genesis +- verify spends through the cli + +### Fixed +- genesis check security flaw + +## [0.14.22](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.21...sn_transfers-v0.14.22) - 2023-11-28 + +### Added +- *(transfers)* serialise wallets and transfers data with MsgPack instead of bincode + +## [0.14.21](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.20...sn_transfers-v0.14.21) - 2023-11-23 + +### Added +- move derivation index random method to itself + +## [0.14.20](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.19...sn_transfers-v0.14.20) - 2023-11-22 + +### Other +- optimise log format of DerivationIndex + +## [0.14.19](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.18...sn_transfers-v0.14.19) - 2023-11-20 + +### Added +- *(networking)* shortcircuit response sending for replication + +## [0.14.18](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.17...sn_transfers-v0.14.18) - 2023-11-20 + +### Added +- quotes + +### Fixed +- use actual quote instead of dummy + +## [0.14.17](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.16...sn_transfers-v0.14.17) - 2023-11-16 + +### Added +- massive cleaning to prepare for quotes + +### Fixed +- wrong royaltie amount +- cashnote mixup when 2 of them are for the same node + +## [0.14.16](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.15...sn_transfers-v0.14.16) - 2023-11-15 + +### Added +- *(royalties)* make royalties payment to be 15% of the total storage cost + +## [0.14.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.14...sn_transfers-v0.14.15) - 2023-11-14 + +### Other +- *(royalties)* verify royalties fees amounts + +## [0.14.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.13...sn_transfers-v0.14.14) - 2023-11-10 + +### Added +- *(cli)* attempt to reload wallet from disk if storing it fails when receiving transfers online +- *(cli)* new cmd to listen to royalties payments and deposit them into a local wallet + +## [0.14.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.12...sn_transfers-v0.14.13) - 2023-11-10 + +### Other +- *(transfers)* more logs around payments... + +## [0.14.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.11...sn_transfers-v0.14.12) - 2023-11-09 + +### Other +- simplify when construct payess for storage + +## [0.14.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.10...sn_transfers-v0.14.11) - 2023-11-02 + +### Added +- keep transfers in mem instead of heavy cashnotes + +## [0.14.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.9...sn_transfers-v0.14.10) - 2023-11-01 + +### Other +- *(node)* don't log the transfers events + +## [0.14.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.8...sn_transfers-v0.14.9) - 2023-10-30 + +### Added +- `bincode::serialize` into `Bytes` without intermediate allocation + +## [0.14.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.7...sn_transfers-v0.14.8) - 2023-10-27 + +### Added +- *(rpc_client)* show total accumulated balance when decrypting transfers received + +## [0.14.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.6...sn_transfers-v0.14.7) - 2023-10-26 + +### Fixed +- typos + +## [0.14.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.5...sn_transfers-v0.14.6) - 2023-10-24 + +### Fixed +- *(tests)* nodes rewards tests to account for repayments amounts + +## [0.14.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.4...sn_transfers-v0.14.5) - 2023-10-24 + +### Added +- *(payments)* adding unencrypted CashNotes for network royalties and verifying correct payment +- *(payments)* network royalties payment made when storing content + +### Other +- *(api)* wallet APIs to account for network royalties fees when returning total cost paid for storage + +## [0.14.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.3...sn_transfers-v0.14.4) - 2023-10-24 + +### Fixed +- *(networking)* only validate _our_ transfers at nodes + +## [0.14.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.2...sn_transfers-v0.14.3) - 2023-10-18 + +### Other +- Revert "feat: keep transfers in mem instead of mem and i/o heavy cashnotes" + +## [0.14.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.1...sn_transfers-v0.14.2) - 2023-10-18 + +### Added +- keep transfers in mem instead of mem and i/o heavy cashnotes + +## [0.14.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.14.0...sn_transfers-v0.14.1) - 2023-10-17 + +### Fixed +- *(transfers)* dont overwrite existing payment transactions when we top up + +### Other +- adding comments and cleanup around quorum / payment fixes + +## [0.14.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.12...sn_transfers-v0.14.0) - 2023-10-12 + +### Added +- *(sn_transfers)* dont load Cns from disk, store value along w/ pubkey in wallet +- include protection for deposits + +### Fixed +- remove uneeded hideous key Clone trait +- deadlock +- place lock on another file to prevent windows lock issue +- lock wallet file instead of dir +- wallet concurrent access bugs + +### Other +- more detailed logging when client creating store cash_note + +## [0.13.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.11...sn_transfers-v0.13.12) - 2023-10-11 + +### Fixed +- expose RecordMismatch errors and cleanup wallet if we hit that + +### Other +- *(transfers)* add somre more clarity around DoubleSpendAttemptedForCashNotes +- *(docs)* cleanup comments and docs +- *(transfers)* remove pointless api + +## [0.13.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.10...sn_transfers-v0.13.11) - 2023-10-10 + +### Added +- *(transfer)* special event for transfer notifs over gossipsub + +## [0.13.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.9...sn_transfers-v0.13.10) - 2023-10-10 + +### Other +- *(sn_transfers)* improve transaction build mem perf + +## [0.13.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.8...sn_transfers-v0.13.9) - 2023-10-06 + +### Added +- feat!(sn_transfers): unify store api for wallet + +### Fixed +- readd api to load cash_notes from disk, update tests + +### Other +- update comments around RecordNotFound +- remove deposit vs received cashnote disctinction + +## [0.13.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.7...sn_transfers-v0.13.8) - 2023-10-06 + +### Other +- fix new clippy errors + +## [0.13.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.6...sn_transfers-v0.13.7) - 2023-10-05 + +### Added +- *(metrics)* enable node monitoring through dockerized grafana instance + +## [0.13.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.5...sn_transfers-v0.13.6) - 2023-10-05 + +### Fixed +- *(client)* remove concurrency limitations + +## [0.13.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.4...sn_transfers-v0.13.5) - 2023-10-05 + +### Fixed +- *(sn_transfers)* be sure we store CashNotes before writing the wallet file + +## [0.13.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.3...sn_transfers-v0.13.4) - 2023-10-05 + +### Added +- use progress bars on `files upload` + +## [0.13.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.2...sn_transfers-v0.13.3) - 2023-10-04 + +### Added +- *(sn_transfers)* impl From for NanoTokens + +### Fixed +- *(sn_transfers)* reuse payment overflow fix + +### Other +- *(sn_transfers)* clippy and fmt +- *(sn_transfers)* add reuse cashnote cases +- separate method and write test + +## [0.13.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.1...sn_transfers-v0.13.2) - 2023-10-02 + +### Added +- remove unused fee output + +## [0.13.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.13.0...sn_transfers-v0.13.1) - 2023-09-28 + +### Added +- client to client transfers + +## [0.13.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.2...sn_transfers-v0.13.0) - 2023-09-27 + +### Added +- deep clean sn_transfers, reduce exposition, remove dead code + +### Fixed +- benches +- uncomment benches in Cargo.toml + +### Other +- optimise bench +- improve cloning +- udeps + +## [0.12.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.1...sn_transfers-v0.12.2) - 2023-09-25 + +### Other +- *(transfers)* unused variable removal + +## [0.12.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.12.0...sn_transfers-v0.12.1) - 2023-09-25 + +### Other +- udeps +- cleanup renamings in sn_transfers +- remove mostly outdated mocks + +## [0.12.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.15...sn_transfers-v0.12.0) - 2023-09-21 + +### Added +- rename utxo by CashNoteRedemption +- dusking DBCs + +### Fixed +- udeps +- incompatible hardcoded value, add logs + +### Other +- remove dbc dust comments +- rename Nano NanoTokens +- improve naming + +## [0.11.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.14...sn_transfers-v0.11.15) - 2023-09-20 + +### Other +- major dep updates + +## [0.11.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.13...sn_transfers-v0.11.14) - 2023-09-18 + +### Added +- serialisation for transfers for out of band sending +- generic transfer receipt + +### Other +- add more docs +- add some docs + +## [0.11.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.12...sn_transfers-v0.11.13) - 2023-09-15 + +### Other +- refine log levels + +## [0.11.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.11...sn_transfers-v0.11.12) - 2023-09-14 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.10...sn_transfers-v0.11.11) - 2023-09-13 + +### Added +- *(register)* paying nodes for Register storage + +## [0.11.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.9...sn_transfers-v0.11.10) - 2023-09-12 + +### Added +- add tx and parent spends verification +- chunk payments using UTXOs instead of DBCs + +### Other +- use updated sn_dbc + +## [0.11.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.8...sn_transfers-v0.11.9) - 2023-09-11 + +### Other +- *(release)* sn_cli-v0.81.29/sn_client-v0.88.16/sn_registers-v0.2.6/sn_node-v0.89.29/sn_testnet-v0.2.120/sn_protocol-v0.6.6 + +## [0.11.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.7...sn_transfers-v0.11.8) - 2023-09-08 + +### Added +- *(client)* repay for chunks if they cannot be validated + +## [0.11.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.6...sn_transfers-v0.11.7) - 2023-09-05 + +### Other +- *(release)* sn_cli-v0.81.21/sn_client-v0.88.11/sn_registers-v0.2.5/sn_node-v0.89.21/sn_testnet-v0.2.112/sn_protocol-v0.6.5 + +## [0.11.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.5...sn_transfers-v0.11.6) - 2023-09-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.4...sn_transfers-v0.11.5) - 2023-09-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.3...sn_transfers-v0.11.4) - 2023-09-01 + +### Other +- *(transfers)* batch dbc storage +- *(transfers)* store dbcs by ref to avoid more clones +- *(transfers)* dont pass by value, this is a clone! +- *(client)* make unconfonfirmed txs btreeset, remove unnecessary cloning +- *(transfers)* improve update_local_wallet + +## [0.11.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.2...sn_transfers-v0.11.3) - 2023-08-31 + +### Other +- remove unused async + +## [0.11.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.1...sn_transfers-v0.11.2) - 2023-08-31 + +### Added +- *(node)* node to store rewards in a local wallet + +### Fixed +- *(cli)* don't try to create wallet paths when checking balance + +## [0.11.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.11.0...sn_transfers-v0.11.1) - 2023-08-31 + +### Other +- updated the following local packages: sn_protocol + +## [0.11.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.28...sn_transfers-v0.11.0) - 2023-08-30 + +### Added +- one transfer per data set, mapped dbcs to content addrs +- [**breaking**] pay each chunk holder direct +- feat!(protocol): gets keys with GetStoreCost +- feat!(protocol): get price and pay for each chunk individually +- feat!(protocol): remove chunk merkletree to simplify payment + +### Fixed +- *(tokio)* remove tokio fs + +### Other +- *(deps)* bump tokio to 1.32.0 +- *(client)* refactor client wallet to reduce dbc clones +- *(client)* pass around content payments map mut ref +- *(client)* error out early for invalid transfers + +## [0.10.28](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.27...sn_transfers-v0.10.28) - 2023-08-24 + +### Other +- rust 1.72.0 fixes + +## [0.10.27](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.26...sn_transfers-v0.10.27) - 2023-08-18 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.26](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.25...sn_transfers-v0.10.26) - 2023-08-11 + +### Added +- *(transfers)* add resend loop for unconfirmed txs + +## [0.10.25](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.24...sn_transfers-v0.10.25) - 2023-08-10 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.24](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.23...sn_transfers-v0.10.24) - 2023-08-08 + +### Added +- *(transfers)* add get largest dbc for spending + +### Fixed +- *(node)* prevent panic in storage calcs + +### Other +- *(faucet)* provide more money +- tidy store cost code + +## [0.10.23](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.22...sn_transfers-v0.10.23) - 2023-08-07 + +### Other +- rename network addresses confusing name method to xorname + +## [0.10.22](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.21...sn_transfers-v0.10.22) - 2023-08-01 + +### Other +- *(networking)* use TOTAL_SUPPLY from sn_transfers + +## [0.10.21](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.20...sn_transfers-v0.10.21) - 2023-08-01 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.20](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.19...sn_transfers-v0.10.20) - 2023-08-01 + +### Other +- *(release)* sn_cli-v0.80.17/sn_client-v0.87.0/sn_registers-v0.2.0/sn_node-v0.88.6/sn_testnet-v0.2.44/sn_protocol-v0.4.2 + +## [0.10.19](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.18...sn_transfers-v0.10.19) - 2023-07-31 + +### Fixed +- *(test)* using proper wallets during data_with_churn test + +## [0.10.18](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.17...sn_transfers-v0.10.18) - 2023-07-28 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.17](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.16...sn_transfers-v0.10.17) - 2023-07-26 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.16](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.15...sn_transfers-v0.10.16) - 2023-07-25 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.15](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.14...sn_transfers-v0.10.15) - 2023-07-21 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.14](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.13...sn_transfers-v0.10.14) - 2023-07-20 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.13](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.12...sn_transfers-v0.10.13) - 2023-07-19 + +### Added +- *(CI)* dbc verfication during network churning test + +## [0.10.12](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.11...sn_transfers-v0.10.12) - 2023-07-19 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.11](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.10...sn_transfers-v0.10.11) - 2023-07-18 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.10](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.9...sn_transfers-v0.10.10) - 2023-07-17 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.9](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.8...sn_transfers-v0.10.9) - 2023-07-17 + +### Added +- *(client)* keep storage payment proofs in local wallet + +## [0.10.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.7...sn_transfers-v0.10.8) - 2023-07-12 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.6...sn_transfers-v0.10.7) - 2023-07-11 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.5...sn_transfers-v0.10.6) - 2023-07-10 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.4...sn_transfers-v0.10.5) - 2023-07-06 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.3...sn_transfers-v0.10.4) - 2023-07-05 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.2...sn_transfers-v0.10.3) - 2023-07-04 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.1...sn_transfers-v0.10.2) - 2023-06-28 + +### Other +- updated the following local packages: sn_protocol + +## [0.10.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.10.0...sn_transfers-v0.10.1) - 2023-06-26 + +### Added +- display path when no deposits were found upon wallet deposit failure + +### Other +- adding proptests for payment proofs merkletree utilities +- payment proof map to use xorname as index instead of merkletree nodes type +- having the payment proof validation util to return the item's leaf index + +## [0.10.0](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.8...sn_transfers-v0.10.0) - 2023-06-22 + +### Added +- use standarised directories for files/wallet commands + +## [0.9.8](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.7...sn_transfers-v0.9.8) - 2023-06-21 + +### Other +- updated the following local packages: sn_protocol + +## [0.9.7](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.6...sn_transfers-v0.9.7) - 2023-06-21 + +### Fixed +- *(sn_transfers)* hardcode new genesis DBC for tests + +### Other +- *(node)* obtain parent_tx from SignedSpend + +## [0.9.6](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.5...sn_transfers-v0.9.6) - 2023-06-20 + +### Other +- updated the following local packages: sn_protocol + +## [0.9.5](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.4...sn_transfers-v0.9.5) - 2023-06-20 + +### Other +- specific error types for different payment proof verification scenarios + +## [0.9.4](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.3...sn_transfers-v0.9.4) - 2023-06-15 + +### Added +- add double spend test + +### Fixed +- parent spend checks +- parent spend issue + +## [0.9.3](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.2...sn_transfers-v0.9.3) - 2023-06-14 + +### Added +- include output DBC within payment proof for Chunks storage + +## [0.9.2](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.1...sn_transfers-v0.9.2) - 2023-06-12 + +### Added +- remove spendbook rw locks, improve logging + +## [0.9.1](https://github.com/maidsafe/safe_network/compare/sn_transfers-v0.9.0...sn_transfers-v0.9.1) - 2023-06-09 + +### Other +- manually change crate version diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml new file mode 100644 index 0000000000..4ee7753fde --- /dev/null +++ b/sn_evm/Cargo.toml @@ -0,0 +1,37 @@ +[package] +authors = ["MaidSafe Developers "] +description = "Safe Network EVM Transfers" +documentation = "https://docs.rs/sn_node" +edition = "2021" +homepage = "https://maidsafe.net" +license = "GPL-3.0" +name = "sn_evm" +readme = "README.md" +repository = "https://github.com/maidsafe/safe_network" +version = "0.1.0" + +[features] +test-utils = [] + +[dependencies] +custom_debug = "~0.6.1" +evmlib = { path = "../evmlib" } +hex = "~0.4.3" +lazy_static = "~1.4.0" +libp2p = { version = "0.53", features = ["identify", "kad"] } +rand = { version = "~0.8.5", features = ["small_rng"] } +rmp-serde = "1.1.1" +serde = { version = "1.0.133", features = ["derive", "rc"] } +serde_json = "1.0.108" +thiserror = "1.0.24" +tiny-keccak = { version = "~2.0.2", features = ["sha3"] } +tracing = { version = "~0.1.26" } +xor_name = "5.0.0" +ring = "0.17.8" +tempfile = "3.10.1" + +[dev-dependencies] +tokio = { version = "1.32.0", features = ["macros", "rt"] } + +[lints] +workspace = true diff --git a/sn_evm/README.md b/sn_evm/README.md new file mode 100644 index 0000000000..f17bfee940 --- /dev/null +++ b/sn_evm/README.md @@ -0,0 +1,46 @@ +# Safe Network EVM data payments + +This crate contains the logic for data payments on the SAFE Network using the Ethereum protocol. + +This crate provides a set of types and utilities for interacting with EVM-based networks. It offers abstraction over common tasks such as handling addresses, wallets, payments, and network configurations. Below is an overview of the main types exposed by the crate. + +## Exposed Types + +### RewardsAddress +Alias for `evmlib::common::Address`. Represents an EVM-compatible address used for handling rewards. + +### QuoteHash +Represents a unique hash identifying a quote. Useful for referencing and verifying `PaymentQuote`. + +### TxHash +Represents the transaction hash. Useful for identifying and tracking transactions on the blockchain. + +### EvmWallet +Alias for `evmlib::wallet::Wallet`. A wallet used to interact with EVM-compatible networks, providing key management and signing functionality. + +### EvmNetworkCustom +A custom network type that allows for interaction with custom EVM-based networks. + +### EvmNetwork +A standard network type for EVM-based networks such as Ethereum or ArbitrumOne. + +### PaymentQuote +Represents a quote for a payment transaction. Contains relevant data for processing payments through EVM-based networks. + +### QuotingMetrics +Represents metrics associated with generating a payment quote. Useful for performance measurement and optimization. + +### ProofOfPayment +Contains proof of a successful payment on an EVM-based network. Includes data like transaction hash and confirmation details. + +### Amount +Represents a general amount of tokens. Can be used to define any token value in a flexible way. + +### AttoTokens +Represents an amount in the smallest token unit, commonly "atto" (10^-18). Useful for working with precise amounts in smart contracts. + +### EvmError +A custom error type used for handling EVM-related errors within the library. + +### Result +A specialized `Result` type that wraps around `EvmError`. Standardizes error handling across operations. diff --git a/sn_evm/src/amount.rs b/sn_evm/src/amount.rs new file mode 100644 index 0000000000..80978c721f --- /dev/null +++ b/sn_evm/src/amount.rs @@ -0,0 +1,253 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{EvmError, Result}; + +pub use evmlib::common::Amount; +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + +/// The conversion from AttoTokens to raw value +const TOKEN_TO_RAW_POWER_OF_10_CONVERSION: u64 = 18; +/// The conversion from AttoTokens to raw value +const TOKEN_TO_RAW_CONVERSION: u64 = 1_000_000_000_000_000_000; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +/// An amount in SNT Atto. 10^18 Nanos = 1 SNT. +pub struct AttoTokens(Amount); + +impl AttoTokens { + /// Type safe representation of zero AttoTokens. + pub const fn zero() -> Self { + Self(Amount::ZERO) + } + + /// Returns whether it's a representation of zero AttoTokens. + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } + + /// New value from an amount + pub fn from_atto(value: Amount) -> Self { + Self(value) + } + + /// New value from a number of atto tokens. + pub fn from_u64(value: u64) -> Self { + Self(Amount::from(value)) + } + + /// New value from a number of atto tokens. + pub fn from_u128(value: u128) -> Self { + Self(Amount::from(value)) + } + + /// Total AttoTokens expressed in number of nano tokens. + pub fn as_atto(self) -> Amount { + self.0 + } + + /// Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(self, rhs: AttoTokens) -> Option { + self.0.checked_add(rhs.0).map(Self::from_atto) + } + + /// Computes `self - rhs`, returning `None` if overflow occurred. + pub fn checked_sub(self, rhs: AttoTokens) -> Option { + self.0.checked_sub(rhs.0).map(Self::from_atto) + } + + /// Converts the Nanos into bytes + pub fn to_bytes(&self) -> Vec { + self.0.as_le_bytes().to_vec() + } +} + +impl From for AttoTokens { + fn from(value: u64) -> Self { + Self(Amount::from(value)) + } +} + +impl FromStr for AttoTokens { + type Err = EvmError; + + fn from_str(value_str: &str) -> Result { + let mut itr = value_str.splitn(2, '.'); + let converted_units = { + let units = itr + .next() + .and_then(|s| s.parse::().ok()) + .ok_or_else(|| { + EvmError::FailedToParseAttoToken("Can't parse token units".to_string()) + })?; + + units + .checked_mul(Amount::from(TOKEN_TO_RAW_CONVERSION)) + .ok_or(EvmError::ExcessiveValue)? + }; + + let remainder = { + let remainder_str = itr.next().unwrap_or_default().trim_end_matches('0'); + + if remainder_str.is_empty() { + Amount::ZERO + } else { + let parsed_remainder = remainder_str.parse::().map_err(|_| { + EvmError::FailedToParseAttoToken("Can't parse token remainder".to_string()) + })?; + + let remainder_conversion = TOKEN_TO_RAW_POWER_OF_10_CONVERSION + .checked_sub(remainder_str.len() as u64) + .ok_or(EvmError::LossOfPrecision)?; + parsed_remainder * Amount::from(10).pow(Amount::from(remainder_conversion)) + } + }; + + Ok(Self(converted_units + remainder)) + } +} + +impl Display for AttoTokens { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + let unit = self.0 / Amount::from(TOKEN_TO_RAW_CONVERSION); + let remainder = self.0 % Amount::from(TOKEN_TO_RAW_CONVERSION); + write!(formatter, "{unit}.{remainder:09}") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_str() -> Result<()> { + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0")?); + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0.")?); + assert_eq!(AttoTokens::from_u64(0), AttoTokens::from_str("0.0")?); + assert_eq!( + AttoTokens::from_u64(1), + AttoTokens::from_str("0.000000000000000001")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1.")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_000), + AttoTokens::from_str("1.0")? + ); + assert_eq!( + AttoTokens::from_u64(1_000_000_000_000_000_001), + AttoTokens::from_str("1.000000000000000001")? + ); + assert_eq!( + AttoTokens::from_u64(1_100_000_000), + AttoTokens::from_str("1.1")? + ); + assert_eq!( + AttoTokens::from_u64(1_100_000_000_000_000_001), + AttoTokens::from_str("1.100000000000000001")? + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_000_000_000_000_000_000u128), + AttoTokens::from_str("4294967295")? + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_999_999_999_000_000_000_000_000u128), + AttoTokens::from_str("4294967295.999999999")?, + ); + assert_eq!( + AttoTokens::from_u128(4_294_967_295_999_999_999_000_000_000_000_000u128), + AttoTokens::from_str("4294967295.9999999990000")?, + ); + + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token units".to_string() + )), + AttoTokens::from_str("a") + ); + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token remainder".to_string() + )), + AttoTokens::from_str("0.a") + ); + assert_eq!( + Err(EvmError::FailedToParseAttoToken( + "Can't parse token remainder".to_string() + )), + AttoTokens::from_str("0.0.0") + ); + assert_eq!( + Err(EvmError::LossOfPrecision), + AttoTokens::from_str("0.0000000009") + ); + assert_eq!( + Err(EvmError::ExcessiveValue), + AttoTokens::from_str("18446744074") + ); + Ok(()) + } + + #[test] + fn display() { + assert_eq!("0.000000000", format!("{}", AttoTokens::from_u64(0))); + assert_eq!("0.000000001", format!("{}", AttoTokens::from_u64(1))); + assert_eq!("0.000000010", format!("{}", AttoTokens::from_u64(10))); + assert_eq!( + "1.000000000", + format!("{}", AttoTokens::from_u64(1_000_000_000_000_000_000)) + ); + assert_eq!( + "1.000000001", + format!("{}", AttoTokens::from_u64(1_000_000_000_000_000_001)) + ); + assert_eq!( + "4294967295.000000000", + format!("{}", AttoTokens::from_u64(4_294_967_295_000_000_000)) + ); + } + + #[test] + fn checked_add_sub() { + assert_eq!( + Some(AttoTokens::from_u64(3)), + AttoTokens::from_u64(1).checked_add(AttoTokens::from_u64(2)) + ); + assert_eq!( + None, + AttoTokens::from_u64(u64::MAX).checked_add(AttoTokens::from_u64(1)) + ); + assert_eq!( + None, + AttoTokens::from_u64(u64::MAX).checked_add(AttoTokens::from_u64(u64::MAX)) + ); + + assert_eq!( + Some(AttoTokens::from_u64(0)), + AttoTokens::from_u64(u64::MAX).checked_sub(AttoTokens::from_u64(u64::MAX)) + ); + assert_eq!( + None, + AttoTokens::from_u64(0).checked_sub(AttoTokens::from_u64(u64::MAX)) + ); + assert_eq!( + None, + AttoTokens::from_u64(10).checked_sub(AttoTokens::from_u64(11)) + ); + } +} diff --git a/sn_evm/src/data_payments.rs b/sn_evm/src/data_payments.rs new file mode 100644 index 0000000000..4791c4af96 --- /dev/null +++ b/sn_evm/src/data_payments.rs @@ -0,0 +1,349 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::{AttoTokens, EvmError}; +use evmlib::{ + common::{Address as RewardsAddress, QuoteHash}, + utils::dummy_address, +}; +use libp2p::{identity::PublicKey, PeerId}; +use serde::{Deserialize, Serialize}; +use std::time::SystemTime; +use xor_name::XorName; + +/// The time in seconds that a quote is valid for +pub const QUOTE_EXPIRATION_SECS: u64 = 3600; + +/// The margin allowed for live_time +const LIVE_TIME_MARGIN: u64 = 10; + +/// Quoting metrics that got used to generate a quote, or to track peer's status. +#[derive( + Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize, custom_debug::Debug, +)] +pub struct QuotingMetrics { + /// the records stored + pub close_records_stored: usize, + /// the max_records configured + pub max_records: usize, + /// number of times that got paid + pub received_payment_count: usize, + /// the duration that node keeps connected to the network, measured in hours + /// TODO: take `restart` into accout + pub live_time: u64, +} + +impl QuotingMetrics { + /// construct an empty QuotingMetrics + pub fn new() -> Self { + Self { + close_records_stored: 0, + max_records: 0, + received_payment_count: 0, + live_time: 0, + } + } +} + +impl Default for QuotingMetrics { + fn default() -> Self { + Self::new() + } +} + +/// A payment quote to store data given by a node to a client +/// Note that the PaymentQuote is a contract between the node and itself to make sure the clients aren’t mispaying. +/// It is NOT a contract between the client and the node. +#[derive( + Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize, custom_debug::Debug, +)] +pub struct PaymentQuote { + /// the content paid for + pub content: XorName, + /// how much the node demands for storing the content + pub cost: AttoTokens, + /// the local node time when the quote was created + pub timestamp: SystemTime, + /// quoting metrics being used to generate this quote + pub quoting_metrics: QuotingMetrics, + /// the node's wallet address + pub rewards_address: RewardsAddress, + /// the node's libp2p identity public key in bytes (PeerId) + #[debug(skip)] + pub pub_key: Vec, + /// the node's signature for the quote + #[debug(skip)] + pub signature: Vec, +} + +impl PaymentQuote { + /// create an empty PaymentQuote + pub fn zero() -> Self { + Self { + content: Default::default(), + cost: AttoTokens::zero(), + timestamp: SystemTime::now(), + quoting_metrics: Default::default(), + rewards_address: dummy_address(), + pub_key: vec![], + signature: vec![], + } + } + + pub fn hash(&self) -> QuoteHash { + let mut bytes = self.bytes_for_sig(); + bytes.extend_from_slice(self.pub_key.as_slice()); + bytes.extend_from_slice(self.signature.as_slice()); + evmlib::cryptography::hash(bytes) + } + + /// returns the bytes to be signed from the given parameters + pub fn bytes_for_signing( + xorname: XorName, + cost: AttoTokens, + timestamp: SystemTime, + quoting_metrics: &QuotingMetrics, + rewards_address: &RewardsAddress, + ) -> Vec { + let mut bytes = xorname.to_vec(); + bytes.extend_from_slice(&cost.to_bytes()); + bytes.extend_from_slice( + ×tamp + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Unix epoch to be in the past") + .as_secs() + .to_le_bytes(), + ); + let serialised_quoting_metrics = rmp_serde::to_vec(quoting_metrics).unwrap_or_default(); + bytes.extend_from_slice(&serialised_quoting_metrics); + bytes.extend_from_slice(rewards_address.as_slice()); + bytes + } + + /// Returns the bytes to be signed from self + pub fn bytes_for_sig(&self) -> Vec { + Self::bytes_for_signing( + self.content, + self.cost, + self.timestamp, + &self.quoting_metrics, + &self.rewards_address, + ) + } + + /// Returns the peer id of the node that created the quote + pub fn peer_id(&self) -> Result { + if let Ok(pub_key) = libp2p::identity::PublicKey::try_decode_protobuf(&self.pub_key) { + Ok(PeerId::from(pub_key.clone())) + } else { + error!("Cann't parse PublicKey from protobuf"); + Err(EvmError::InvalidQuotePublicKey) + } + } + + /// Check self is signed by the claimed peer + pub fn check_is_signed_by_claimed_peer(&self, claimed_peer: PeerId) -> bool { + let pub_key = if let Ok(pub_key) = PublicKey::try_decode_protobuf(&self.pub_key) { + pub_key + } else { + error!("Cann't parse PublicKey from protobuf"); + return false; + }; + + let self_peer_id = PeerId::from(pub_key.clone()); + + if self_peer_id != claimed_peer { + error!("This quote {self:?} of {self_peer_id:?} is not signed by {claimed_peer:?}"); + return false; + } + + let bytes = self.bytes_for_sig(); + + if !pub_key.verify(&bytes, &self.signature) { + error!("Signature is not signed by claimed pub_key"); + return false; + } + + true + } + + /// Returns true) if the quote has not yet expired + pub fn has_expired(&self) -> bool { + let now = std::time::SystemTime::now(); + + let dur_s = match now.duration_since(self.timestamp) { + Ok(dur) => dur.as_secs(), + Err(_) => return true, + }; + dur_s > QUOTE_EXPIRATION_SECS + } + + /// test utility to create a dummy quote + pub fn test_dummy(xorname: XorName, cost: AttoTokens) -> Self { + Self { + content: xorname, + cost, + timestamp: SystemTime::now(), + quoting_metrics: Default::default(), + pub_key: vec![], + signature: vec![], + rewards_address: dummy_address(), + } + } + + /// Check whether self is newer than the target quote. + pub fn is_newer_than(&self, other: &Self) -> bool { + self.timestamp > other.timestamp + } + + /// Check against a new quote, verify whether it is a valid one from self perspective. + /// Returns `true` to flag the `other` quote is valid, from self perspective. + pub fn historical_verify(&self, other: &Self) -> bool { + // There is a chance that an old quote got used later than a new quote + let self_is_newer = self.is_newer_than(other); + let (old_quote, new_quote) = if self_is_newer { + (other, self) + } else { + (self, other) + }; + + if new_quote.quoting_metrics.live_time < old_quote.quoting_metrics.live_time { + info!("Claimed live_time out of sequence"); + return false; + } + + let old_elapsed = if let Ok(elapsed) = old_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + let new_elapsed = if let Ok(elapsed) = new_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + + let time_diff = old_elapsed.as_secs().saturating_sub(new_elapsed.as_secs()); + let live_time_diff = + new_quote.quoting_metrics.live_time - old_quote.quoting_metrics.live_time; + // In theory, these two shall match, give it a LIVE_TIME_MARGIN to avoid system glitch + if live_time_diff > time_diff + LIVE_TIME_MARGIN { + info!("claimed live_time out of sync with the timestamp"); + return false; + } + + // There could be pruning to be undertaken, also the close range keeps changing as well. + // Hence `close_records_stored` could be growing or shrinking. + // Currently not to carry out check on it, just logging to observe the trend. + debug!( + "The new quote has {} close records stored, meanwhile old one has {}.", + new_quote.quoting_metrics.close_records_stored, + old_quote.quoting_metrics.close_records_stored + ); + + // TODO: Double check if this applies, as this will prevent a node restart with same ID + if new_quote.quoting_metrics.received_payment_count + < old_quote.quoting_metrics.received_payment_count + { + info!("claimed received_payment_count out of sequence"); + return false; + } + + true + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use libp2p::identity::Keypair; + use std::{thread::sleep, time::Duration}; + + #[test] + fn test_is_newer_than() { + let old_quote = PaymentQuote::zero(); + sleep(Duration::from_millis(100)); + let new_quote = PaymentQuote::zero(); + assert!(new_quote.is_newer_than(&old_quote)); + assert!(!old_quote.is_newer_than(&new_quote)); + } + + #[test] + fn test_is_signed_by_claimed_peer() { + let keypair = Keypair::generate_ed25519(); + let peer_id = keypair.public().to_peer_id(); + + let false_peer = PeerId::random(); + + let mut quote = PaymentQuote::zero(); + let bytes = quote.bytes_for_sig(); + let signature = if let Ok(sig) = keypair.sign(&bytes) { + sig + } else { + panic!("Cannot sign the quote!"); + }; + + // Check failed with both incorrect pub_key and signature + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check failed with correct pub_key but incorrect signature + quote.pub_key = keypair.public().encode_protobuf(); + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check succeed with correct pub_key and signature, + // and failed with incorrect claimed signer (peer) + quote.signature = signature; + assert!(quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + + // Check failed with incorrect pub_key but correct signature + quote.pub_key = Keypair::generate_ed25519().public().encode_protobuf(); + assert!(!quote.check_is_signed_by_claimed_peer(peer_id)); + assert!(!quote.check_is_signed_by_claimed_peer(false_peer)); + } + + #[test] + fn test_historical_verify() { + let mut old_quote = PaymentQuote::zero(); + sleep(Duration::from_millis(100)); + let mut new_quote = PaymentQuote::zero(); + + // historical_verify will swap quotes to compare based on timeline automatically + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + + // Out of sequence received_payment_count shall be detected + old_quote.quoting_metrics.received_payment_count = 10; + new_quote.quoting_metrics.received_payment_count = 9; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Reset to correct one + new_quote.quoting_metrics.received_payment_count = 11; + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + + // Out of sequence live_time shall be detected + new_quote.quoting_metrics.live_time = 10; + old_quote.quoting_metrics.live_time = 11; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Out of margin live_time shall be detected + new_quote.quoting_metrics.live_time = 11 + LIVE_TIME_MARGIN + 1; + assert!(!new_quote.historical_verify(&old_quote)); + assert!(!old_quote.historical_verify(&new_quote)); + // Reset live_time to be within the margin + new_quote.quoting_metrics.live_time = 11 + LIVE_TIME_MARGIN - 1; + assert!(new_quote.historical_verify(&old_quote)); + assert!(old_quote.historical_verify(&new_quote)); + } +} diff --git a/sn_evm/src/error.rs b/sn_evm/src/error.rs new file mode 100644 index 0000000000..386683b5aa --- /dev/null +++ b/sn_evm/src/error.rs @@ -0,0 +1,33 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::AttoTokens; +use thiserror::Error; + +/// Specialisation of `std::Result`. +pub type Result = std::result::Result; + +#[allow(clippy::large_enum_variant)] +#[derive(Error, Debug, Clone, PartialEq)] +#[non_exhaustive] +/// Transfer errors +pub enum EvmError { + #[error("Lost precision on the number of coins during parsing.")] + LossOfPrecision, + #[error("The token amount would exceed the maximum value")] + ExcessiveValue, + #[error("Failed to parse: {0}")] + FailedToParseAttoToken(String), + #[error("Overflow occurred while adding values")] + NumericOverflow, + #[error("Not enough balance, {0} available, {1} required")] + NotEnoughBalance(AttoTokens, AttoTokens), + + #[error("Invalid quote public key")] + InvalidQuotePublicKey, +} diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs new file mode 100644 index 0000000000..6f8edadb85 --- /dev/null +++ b/sn_evm/src/evm.rs @@ -0,0 +1,30 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use evmlib::common::TxHash; +use libp2p::identity::PublicKey; +use libp2p::PeerId; +use serde::{Deserialize, Serialize}; + +use crate::PaymentQuote; + +/// The proof of payment for a data payment +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub struct ProofOfPayment { + /// The Quote we're paying for + pub quote: PaymentQuote, + /// The transaction hash + pub tx_hash: TxHash, +} + +impl ProofOfPayment { + pub fn to_peer_id_payee(&self) -> Option { + let pub_key = PublicKey::try_decode_protobuf(&self.quote.pub_key).ok()?; + Some(PeerId::from_public_key(&pub_key)) + } +} diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs new file mode 100644 index 0000000000..de3a62c1dc --- /dev/null +++ b/sn_evm/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[macro_use] +extern crate tracing; + +pub use evmlib::common::Address as RewardsAddress; +pub use evmlib::common::{QuoteHash, TxHash}; +pub use evmlib::utils; +pub use evmlib::wallet::Wallet as EvmWallet; +pub use evmlib::CustomNetwork as EvmNetworkCustom; +pub use evmlib::Network as EvmNetwork; + +mod amount; +mod data_payments; +mod error; +mod evm; + +pub use data_payments::{PaymentQuote, QuotingMetrics}; +pub use evm::ProofOfPayment; + +/// Types used in the public API +pub use amount::{Amount, AttoTokens}; +pub use error::{EvmError, Result}; From 19dce9d8cd8b717ccf2041df114777aaddd39ea7 Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 26 Sep 2024 15:06:29 +0900 Subject: [PATCH 047/255] chore: appease clippy --- evmlib/tests/chunk_payments.rs | 1 + evmlib/tests/common/mod.rs | 1 + evmlib/tests/common/quote.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/evmlib/tests/chunk_payments.rs b/evmlib/tests/chunk_payments.rs index ce5e24ce33..1b0a283245 100644 --- a/evmlib/tests/chunk_payments.rs +++ b/evmlib/tests/chunk_payments.rs @@ -52,6 +52,7 @@ async fn setup() -> ( #[allow(clippy::unwrap_used)] #[allow(clippy::type_complexity)] +#[allow(dead_code)] async fn provider_with_gas_funded_wallet( anvil: &AnvilInstance, ) -> FillProvider< diff --git a/evmlib/tests/common/mod.rs b/evmlib/tests/common/mod.rs index cc82e5bf16..48fab70355 100644 --- a/evmlib/tests/common/mod.rs +++ b/evmlib/tests/common/mod.rs @@ -2,4 +2,5 @@ use alloy::primitives::{address, Address}; pub mod quote; +#[allow(dead_code)] pub const ROYALTIES_WALLET: Address = address!("385e7887E5b41750E3679Da787B943EC42f37d75"); diff --git a/evmlib/tests/common/quote.rs b/evmlib/tests/common/quote.rs index d3d4f574ca..21d05cf189 100644 --- a/evmlib/tests/common/quote.rs +++ b/evmlib/tests/common/quote.rs @@ -1,6 +1,7 @@ use evmlib::common::{Amount, QuotePayment}; use evmlib::utils::{dummy_address, dummy_hash}; +#[allow(dead_code)] pub fn random_quote_payment() -> QuotePayment { let quote_hash = dummy_hash(); let reward_address = dummy_address(); From 1147e6ede66e12fbc9c06eeee36e4b055c626b61 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 26 Sep 2024 09:24:00 +0200 Subject: [PATCH 048/255] chore: ignore DevSkim warnings for hex types --- evmlib/src/event.rs | 2 +- evmlib/src/transaction.rs | 12 ++++++------ evmlib/src/wallet.rs | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs index deca415998..9327eb98cd 100644 --- a/evmlib/src/event.rs +++ b/evmlib/src/event.rs @@ -4,7 +4,7 @@ use alloy::rpc::types::Log; // Should be updated when the smart contract changes! pub(crate) const CHUNK_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = - b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); + b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); // DevSkim: ignore DS173237 #[derive(thiserror::Error, Debug)] pub enum Error { diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index b83d30f750..87798ba1d3 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -148,7 +148,7 @@ mod tests { async fn test_get_transaction_receipt_by_hash() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 assert!(get_transaction_receipt_by_hash(&network, tx_hash) .await @@ -161,9 +161,9 @@ mod tests { let network = Network::ArbitrumOne; let block_number: u64 = 250043261; - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 let amount = U256::from(200); - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 let logs = get_chunk_payment_event(&network, block_number, quote_hash, reward_address, amount) @@ -177,9 +177,9 @@ mod tests { async fn test_verify_chunk_payment() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 let amount = U256::from(200); let result = verify_chunk_payment( diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 7f01497fb5..69eb0d55b9 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -278,7 +278,7 @@ mod tests { #[tokio::test] async fn test_from_private_key() { - let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; + let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; // DevSkim: ignore DS117838 let wallet = from_private_key(private_key).unwrap(); let account = >::default_signer_address(&wallet); From dea6230c7b2f5066268d3fa05976a733be91e748 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 26 Sep 2024 10:17:24 +0200 Subject: [PATCH 049/255] fix(launchpad): nat mode only when first time automatic --- node-launchpad/src/components/status.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index d06e777953..7fa81b3412 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -183,7 +183,8 @@ impl Status { /// Only run NAT detection if we haven't determined the status yet and we haven't failed more than 3 times. fn should_we_run_nat_detection(&self) -> bool { - !self.is_nat_status_determined + self.connection_mode == ConnectionMode::Automatic + && !self.is_nat_status_determined && self.error_while_running_nat_detection < MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION } From 45129883839ec630bb24692065a278a506258bde Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 26 Sep 2024 17:46:12 +0900 Subject: [PATCH 050/255] feat: node side integration and sn_node_manager --- Cargo.lock | 9 +- sn_networking/Cargo.toml | 1 + sn_networking/src/cmd.rs | 17 +- sn_networking/src/driver.rs | 2 +- sn_networking/src/error.rs | 2 + sn_networking/src/event/mod.rs | 2 +- sn_networking/src/event/swarm.rs | 1 + sn_networking/src/lib.rs | 34 +- sn_networking/src/log_markers.rs | 4 +- sn_networking/src/metrics/mod.rs | 8 +- sn_networking/src/record_store.rs | 29 +- sn_networking/src/record_store_api.rs | 6 +- sn_node/Cargo.toml | 11 +- sn_node/src/bin/safenode/main.rs | 35 +- sn_node/src/bin/safenode/rpc_service.rs | 6 +- sn_node/src/bin/safenode/subcommands.rs | 41 + sn_node/src/error.rs | 15 +- sn_node/src/event.rs | 11 +- sn_node/src/lib.rs | 8 +- sn_node/src/metrics.rs | 4 +- sn_node/src/node.rs | 291 +--- sn_node/src/put_validation.rs | 212 +-- sn_node/src/quote.rs | 25 +- sn_node/tests/data_with_churn.rs | 3 +- sn_node/tests/double_spend.rs | 1366 ++++++++--------- sn_node/tests/sequential_transfers.rs | 108 +- sn_node/tests/storage_payments.rs | 755 ++++----- sn_node_manager/Cargo.toml | 2 +- sn_node_manager/src/add_services/tests.rs | 18 +- sn_node_manager/src/bin/cli/main.rs | 24 + .../src/bin/cli/subcommands/evm_network.rs | 41 + .../src/bin/cli/subcommands/mod.rs | 1 + sn_node_manager/src/cmd/local.rs | 9 + sn_node_manager/src/lib.rs | 71 +- sn_node_manager/src/local.rs | 100 +- sn_protocol/Cargo.toml | 1 + sn_protocol/src/messages.rs | 2 +- sn_protocol/src/messages/cmd.rs | 3 +- sn_protocol/src/messages/response.rs | 6 +- sn_protocol/src/version.rs | 6 +- sn_service_management/Cargo.toml | 2 +- sn_service_management/src/node.rs | 4 +- 42 files changed, 1583 insertions(+), 1713 deletions(-) create mode 100644 sn_node/src/bin/safenode/subcommands.rs create mode 100644 sn_node_manager/src/bin/cli/subcommands/evm_network.rs create mode 100644 sn_node_manager/src/bin/cli/subcommands/mod.rs diff --git a/Cargo.lock b/Cargo.lock index dcc9ca1559..b952004587 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8486,6 +8486,7 @@ dependencies = [ "service-manager", "sn-releases", "sn_build_info", + "sn_evm", "sn_logging", "sn_peers_acquisition", "sn_protocol", @@ -8498,7 +8499,6 @@ dependencies = [ "tracing", "users", "uuid", - "which 6.0.3", ] [[package]] @@ -8811,6 +8811,7 @@ dependencies = [ "rmp-serde", "serde", "sn_build_info", + "sn_evm", "sn_protocol", "sn_registers", "sn_transfers", @@ -8833,13 +8834,13 @@ name = "sn_node" version = "0.111.2" dependencies = [ "assert_fs", - "assert_matches", "async-trait", "blsttc", "bytes", "chrono", "clap", "color-eyre", + "const-hex", "crdts", "custom_debug", "dirs-next", @@ -8860,6 +8861,7 @@ dependencies = [ "serde_json", "sn_build_info", "sn_client", + "sn_evm", "sn_logging", "sn_networking", "sn_peers_acquisition", @@ -8945,6 +8947,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sn_build_info", + "sn_evm", "sn_registers", "sn_transfers", "thiserror", @@ -8986,9 +8989,9 @@ dependencies = [ "serde", "serde_json", "service-manager", + "sn_evm", "sn_logging", "sn_protocol", - "sn_transfers", "sysinfo", "thiserror", "tokio", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 09f61b0645..39831935b2 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -57,6 +57,7 @@ sn_build_info = { path="../sn_build_info", version = "0.1.13" } sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_registers = { path = "../sn_registers", version = "0.3.19" } +sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 133bd2abda..541a518ce5 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -21,12 +21,12 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -91,12 +91,12 @@ pub enum LocalSwarmCmd { /// GetLocalStoreCost for this node GetLocalStoreCost { key: RecordKey, - sender: oneshot::Sender<(NanoTokens, QuotingMetrics)>, + sender: oneshot::Sender<(AttoTokens, QuotingMetrics)>, }, /// Notify the node received a payment. PaymentReceived, /// Put record to the local RecordStore - PutVerifiedLocalRecord { + PutLocalRecord { record: Record, }, /// Remove a local record from the RecordStore @@ -194,7 +194,7 @@ pub enum NetworkSwarmCmd { impl Debug for LocalSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - LocalSwarmCmd::PutVerifiedLocalRecord { record } => { + LocalSwarmCmd::PutLocalRecord { record } => { write!( f, "LocalSwarmCmd::PutLocalRecord {{ key: {:?} }}", @@ -561,7 +561,7 @@ impl SwarmDriver { .store_cost(&key); self.record_metrics(Marker::StoreCost { - cost: cost.as_nano(), + cost: cost.as_atto(), quoting_metrics: "ing_metrics, }); @@ -587,8 +587,8 @@ impl SwarmDriver { let _ = sender.send(record); } - LocalSwarmCmd::PutVerifiedLocalRecord { record } => { - cmd_string = "PutVerifiedLocalRecord"; + LocalSwarmCmd::PutLocalRecord { record } => { + cmd_string = "PutLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -719,6 +719,7 @@ impl SwarmDriver { } LocalSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; + #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self .swarm .behaviour_mut() @@ -735,7 +736,7 @@ impl SwarmDriver { if let Some(distance) = range.0.ilog2() { let peers_in_kbucket = kbucket .iter() - .map(|peer_entry| (*peer_entry.node.key).into_preimage()) + .map(|peer_entry| peer_entry.node.key.into_preimage()) .collect::>(); let _ = ilog2_kbuckets.insert(distance, peers_in_kbucket); } else { diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 4b39b80907..2ed9a7d1f8 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -47,6 +47,7 @@ use libp2p::{ }; #[cfg(feature = "open-metrics")] use prometheus_client::{metrics::info::Info, registry::Registry}; +use sn_evm::PaymentQuote; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, @@ -57,7 +58,6 @@ use sn_protocol::{ NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; -use sn_transfers::PaymentQuote; use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, fmt::Debug, diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 2168bb892c..6da5a22d9a 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -105,6 +105,8 @@ pub enum NetworkError { Wallet(#[from] sn_transfers::WalletError), #[error("Transfer Error {0}")] Transfer(#[from] sn_transfers::TransferError), + #[error("Evm payment Error {0}")] + EvmPaymemt(#[from] sn_evm::EvmError), #[error("Failed to sign the message with the PeerId keypair")] SigningFailed(#[from] libp2p::identity::SigningError), diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 20f45ca2c8..2b8158f255 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -21,11 +21,11 @@ use libp2p::{ Multiaddr, PeerId, }; +use sn_evm::PaymentQuote; use sn_protocol::{ messages::{Query, Request, Response}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::PaymentQuote; use std::{ collections::BTreeSet, fmt::{Debug, Formatter}, diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index af74a1455e..3f650f0b5a 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -329,6 +329,7 @@ impl SwarmDriver { self.send_event(NetworkEvent::NewListenAddr(address.clone())); info!("Local node is listening {listener_id:?} on {address:?}"); + println!("Local node is listening on {address:?}"); // TODO: make it print only once } SwarmEvent::ListenerClosed { listener_id, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 0df7812ebb..8369665c12 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -59,13 +59,13 @@ use libp2p::{ Multiaddr, PeerId, }; use rand::Rng; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, storage::{RecordType, RetryStrategy}, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, net::IpAddr, @@ -79,7 +79,7 @@ use tokio::sync::{ use tokio::time::Duration; /// The type of quote for a selected payee. -pub type PayeeQuote = (PeerId, MainPubkey, PaymentQuote); +pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); /// The count of peers that will be considered as close to a record target, /// that a replication of the record shall be sent/accepted to/by the peer. @@ -378,8 +378,8 @@ impl Network { peer_address, }) => { // Check the quote itself is valid. - if quote.cost.as_nano() - != calculate_cost_for_records(quote.quoting_metrics.close_records_stored) + if quote.cost + != AttoTokens::from_u64(calculate_cost_for_records(quote.quoting_metrics.close_records_stored)) { warn!("Received invalid quote from {peer_address:?}, {quote:?}"); continue; @@ -589,7 +589,7 @@ impl Network { pub async fn get_local_storecost( &self, key: RecordKey, - ) -> Result<(NanoTokens, QuotingMetrics)> { + ) -> Result<(AttoTokens, QuotingMetrics)> { let (sender, receiver) = oneshot::channel(); self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); @@ -751,7 +751,7 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_local_swarm_cmd(LocalSwarmCmd::PutVerifiedLocalRecord { record }) + self.send_local_swarm_cmd(LocalSwarmCmd::PutLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore @@ -961,7 +961,7 @@ impl Network { /// Given `all_costs` it will return the closest / lowest cost /// Closest requiring it to be within CLOSE_GROUP nodes fn get_fees_from_store_cost_responses( - all_costs: Vec<(NetworkAddress, MainPubkey, PaymentQuote)>, + all_costs: Vec<(NetworkAddress, RewardsAddress, PaymentQuote)>, ) -> Result { // Find the minimum cost using a linear scan with random tie break let mut rng = rand::thread_rng(); @@ -1114,7 +1114,7 @@ mod tests { use eyre::bail; use super::*; - use sn_transfers::PaymentQuote; + use sn_evm::PaymentQuote; #[test] fn test_get_fee_from_store_cost_responses() -> Result<()> { @@ -1122,18 +1122,18 @@ mod tests { // ensure we return the CLOSE_GROUP / 2 indexed price let mut costs = vec![]; for i in 1..CLOSE_GROUP_SIZE { - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i as u64)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i as u64)), )); } - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = get_fees_from_store_cost_responses(costs)?; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); @@ -1148,18 +1148,18 @@ mod tests { let responses_count = CLOSE_GROUP_SIZE as u64 - 1; let mut costs = vec![]; for i in 1..responses_count { - // push random MainPubkey and Nano - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + // push random addr and Nano + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i)), )); println!("price added {i}"); } // this should be the lowest price - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = match get_fees_from_store_cost_responses(costs) { Err(_) => bail!("Should not have errored as we have enough responses"), @@ -1167,7 +1167,7 @@ mod tests { }; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); diff --git a/sn_networking/src/log_markers.rs b/sn_networking/src/log_markers.rs index 97ecb6c04b..38ec42c875 100644 --- a/sn_networking/src/log_markers.rs +++ b/sn_networking/src/log_markers.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use libp2p::PeerId; -use sn_transfers::QuotingMetrics; +use sn_evm::{Amount, QuotingMetrics}; // this gets us to_string easily enough use strum::Display; @@ -22,7 +22,7 @@ pub enum Marker<'a> { /// Store cost StoreCost { /// Cost - cost: u64, + cost: Amount, quoting_metrics: &'a QuotingMetrics, }, /// The peer has been considered as bad diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a7fdfbeee1..ebb15a73fb 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -232,7 +232,13 @@ impl NetworkMetricsRecorder { cost, quoting_metrics, } => { - let _ = self.store_cost.set(cost as i64); + let _ = self.store_cost.set(cost.try_into().unwrap_or(i64::MAX)); + let _ = self.relevant_records.set( + quoting_metrics + .close_records_stored + .try_into() + .unwrap_or(i64::MAX), + ); let _ = self .relevant_records .set(quoting_metrics.close_records_stored as i64); diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 55183866b8..7ce96c2e41 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -30,11 +30,11 @@ use prometheus_client::metrics::gauge::Gauge; use rand::RngCore; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{ storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::collections::VecDeque; use std::{ borrow::Cow, @@ -651,7 +651,7 @@ impl NodeRecordStore { } /// Calculate the cost to store data for our current store state - pub(crate) fn store_cost(&self, key: &Key) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &Key) -> (AttoTokens, QuotingMetrics) { let records_stored = self.records.len(); let record_keys_as_hashset: HashSet<&Key> = self.records.keys().collect(); @@ -685,7 +685,7 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Cost is now {cost:?} for quoting_metrics {quoting_metrics:?}"); - (NanoTokens::from(cost), quoting_metrics) + (AttoTokens::from_u64(cost), quoting_metrics) } /// Notify the node received a payment. @@ -955,7 +955,6 @@ mod tests { use super::*; use bls::SecretKey; - use sn_protocol::storage::{try_deserialize_record, Scratchpad}; use xor_name::XorName; use bytes::Bytes; @@ -963,8 +962,9 @@ mod tests { use libp2p::kad::K_VALUE; use libp2p::{core::multihash::Multihash, kad::RecordKey}; use quickcheck::*; - use sn_protocol::storage::{try_serialize_record, Chunk, ChunkAddress}; - use sn_transfers::{MainPubkey, PaymentQuote}; + use sn_evm::utils::dummy_address; + use sn_evm::{PaymentQuote, RewardsAddress}; + use sn_protocol::storage::{try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, Scratchpad}; use std::collections::BTreeMap; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use tokio::runtime::Runtime; @@ -1562,7 +1562,7 @@ mod tests { struct PeerStats { address: NetworkAddress, - pk: MainPubkey, + rewards_addr: RewardsAddress, records_stored: AtomicUsize, nanos_earned: AtomicU64, payments_received: AtomicUsize, @@ -1590,7 +1590,7 @@ mod tests { records_stored: AtomicUsize::new(0), nanos_earned: AtomicU64::new(0), payments_received: AtomicUsize::new(0), - pk: MainPubkey::new(SecretKey::random().public_key()), + rewards_addr: dummy_address(), }) .collect(); @@ -1657,7 +1657,7 @@ mod tests { if peer_index == payee_index { peer.nanos_earned - .fetch_add(cost.as_nano(), Ordering::Relaxed); + .fetch_add(cost.as_atto().try_into().unwrap_or(u64::MAX), Ordering::Relaxed); peer.payments_received.fetch_add(1, Ordering::Relaxed); } } @@ -1758,7 +1758,7 @@ mod tests { fn pick_cheapest_payee( peers: &[PeerStats], close_group: &[usize], - ) -> eyre::Result<(usize, NanoTokens)> { + ) -> eyre::Result<(usize, AttoTokens)> { let mut costs_vec = Vec::with_capacity(close_group.len()); let mut address_to_index = BTreeMap::new(); @@ -1767,7 +1767,7 @@ mod tests { address_to_index.insert(peer.address.clone(), i); let close_records_stored = peer.records_stored.load(Ordering::Relaxed); - let cost = NanoTokens::from(calculate_cost_for_records(close_records_stored)); + let cost = AttoTokens::from(calculate_cost_for_records(close_records_stored)); let quote = PaymentQuote { content: XorName::default(), // unimportant for cost calc @@ -1779,11 +1779,12 @@ mod tests { received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, - pub_key: peer.pk.to_bytes().to_vec(), - signature: vec![], // unimportant for cost calc + pub_key: bls::SecretKey::random().public_key().to_bytes().to_vec(), + signature: vec![], + rewards_address: peer.rewards_addr, // unimportant for cost calc }; - costs_vec.push((peer.address.clone(), peer.pk, quote)); + costs_vec.push((peer.address.clone(), peer.rewards_addr, quote)); } // sort by address first diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index c61b8d7043..8e3bc67364 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -12,8 +12,8 @@ use libp2p::kad::{ store::{RecordStore, Result}, ProviderRecord, Record, RecordKey, }; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::{borrow::Cow, collections::HashMap}; pub enum UnifiedRecordStore { @@ -111,11 +111,11 @@ impl UnifiedRecordStore { } } - pub(crate) fn store_cost(&self, key: &RecordKey) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &RecordKey) -> (AttoTokens, QuotingMetrics) { match self { Self::Client(_) => { warn!("Calling store cost calculation at Client. This should not happen"); - (NanoTokens::zero(), Default::default()) + (AttoTokens::zero(), Default::default()) } Self::Node(store) => store.store_cost(key), } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 99c6d3f273..bed23167bb 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -14,15 +14,14 @@ name = "safenode" path = "src/bin/safenode/main.rs" [features] -default = ["metrics", "upnp", "reward-forward", "open-metrics"] -encrypt-records = ["sn_networking/encrypt-records"] +default = ["metrics", "upnp", "open-metrics"] local-discovery = ["sn_networking/local-discovery"] +otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -otlp = ["sn_logging/otlp"] -reward-forward = ["sn_transfers/reward-forward"] +encrypt-records = ["sn_networking/encrypt-records"] upnp = ["sn_networking/upnp"] [dependencies] @@ -34,6 +33,7 @@ clap = { version = "4.2.1", features = ["derive"] } crdts = { version = "7.3", default-features = false, features = ["merkle"] } chrono = "~0.4.19" custom_debug = "~0.6.1" +const-hex = "1.12.0" dirs-next = "~2.0.0" eyre = "0.6.8" file-rotate = "0.7.3" @@ -59,6 +59,7 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_registers = { path = "../sn_registers", version = "0.3.19" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } +sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -80,7 +81,6 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -assert_matches = "1.5.0" reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } @@ -92,6 +92,7 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ sn_transfers = { path = "../sn_transfers", version = "0.19.1", features = [ "test-utils", ] } +sn_evm = { path = "../sn_evm", version = "0.1.0" } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index c503504528..9d2211597b 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -10,10 +10,14 @@ extern crate tracing; mod rpc_service; +mod subcommands; use clap::{command, Parser}; use color_eyre::{eyre::eyre, Result}; +use crate::subcommands::EvmNetworkCommand; +use const_hex::traits::FromHex; use libp2p::{identity::Keypair, PeerId}; +use sn_evm::{EvmNetwork, RewardsAddress}; #[cfg(feature = "metrics")] use sn_logging::metrics::init_metrics; use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; @@ -68,6 +72,7 @@ pub fn parse_log_output(val: &str) -> Result { // They are used for inserting line breaks when the help menu is rendered in the UI. #[derive(Parser, Debug)] #[command(disable_version_flag = true)] +#[clap(name = "safenode cli", version = env!("CARGO_PKG_VERSION"))] struct Opt { /// Specify whether the node is operating from a home network and situated behind a NAT without port forwarding /// capabilities. Setting this to true, activates hole-punching to facilitate direct connections from other nodes. @@ -120,6 +125,19 @@ struct Opt { #[clap(long = "max_archived_log_files", verbatim_doc_comment)] max_compressed_log_files: Option, + /// Specify the rewards address. + /// The rewards address is the address that will receive the rewards for the node. + /// It should be a valid EVM address. + #[clap(long)] + rewards_address: String, + + /// Specify the EVM network to use. + /// The network can either be a pre-configured one or a custom network. + /// When setting a custom network, you must specify the RPC URL to a fully synced node and + /// the addresses of the network token and chunk payments contracts. + #[command(subcommand)] + evm_network: Option, + /// Specify the node's data directory. /// /// If not provided, the default location is platform specific: @@ -213,6 +231,8 @@ fn main() -> Result<()> { ); return Ok(()); } + // evm config + let rewards_address = RewardsAddress::from_hex(&opt.rewards_address)?; if opt.crate_version { println!("Crate version: {}", env!("CARGO_PKG_VERSION")); @@ -229,6 +249,12 @@ fn main() -> Result<()> { println!("Package version: {}", sn_build_info::package_version()); return Ok(()); } + let evm_network: EvmNetwork = opt + .evm_network + .as_ref() + .cloned() + .map(|v| v.into()) + .unwrap_or_default(); let node_socket_addr = SocketAddr::new(opt.ip, opt.port); let (root_dir, keypair) = get_root_dir_and_keypair(&opt.root_dir)?; @@ -246,6 +272,10 @@ fn main() -> Result<()> { info!("\n{}\n{}", msg, "=".repeat(msg.len())); sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); + debug!( + "safenode built with git version: {}", + sn_build_info::git_info() + ); info!("Node started with initial_peers {bootstrap_peers:?}"); @@ -258,12 +288,12 @@ fn main() -> Result<()> { let restart_options = rt.block_on(async move { let mut node_builder = NodeBuilder::new( keypair, + rewards_address, + evm_network, node_socket_addr, bootstrap_peers, opt.local, root_dir, - opt.owner.clone(), - #[cfg(feature = "upnp")] opt.upnp, ); node_builder.is_behind_home_network = opt.home_network; @@ -462,6 +492,7 @@ fn init_logging(opt: &Opt, peer_id: PeerId) -> Result<(String, ReloadHandle, Opt ("sn_protocol".to_string(), Level::DEBUG), ("sn_registers".to_string(), Level::DEBUG), ("sn_transfers".to_string(), Level::DEBUG), + ("sn_evm".to_string(), Level::DEBUG), ]; let output_dest = match &opt.log_output_dest { diff --git a/sn_node/src/bin/safenode/rpc_service.rs b/sn_node/src/bin/safenode/rpc_service.rs index 6943221741..c42503f112 100644 --- a/sn_node/src/bin/safenode/rpc_service.rs +++ b/sn_node/src/bin/safenode/rpc_service.rs @@ -66,11 +66,7 @@ impl SafeNode for SafeNodeRpcService { pid: process::id(), bin_version: env!("CARGO_PKG_VERSION").to_string(), uptime_secs: self.started_instant.elapsed().as_secs(), - wallet_balance: self - .running_node - .get_node_wallet_balance() - .expect("Failed to get node wallet balance") - .as_nano(), + wallet_balance: 0, // NB TODO: Implement this using metrics data? }); Ok(resp) diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs new file mode 100644 index 0000000000..3faada3562 --- /dev/null +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -0,0 +1,41 @@ +use clap::Subcommand; +use sn_evm::{EvmNetwork, EvmNetworkCustom}; + +#[derive(Subcommand, Clone, Debug)] +pub(crate) enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + chunk_payments_address: String, + }, +} + +#[allow(clippy::from_over_into)] +impl Into for EvmNetworkCommand { + fn into(self) -> EvmNetwork { + match self { + Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmCustom { + rpc_url, + payment_token_address, + chunk_payments_address, + } => EvmNetwork::Custom(EvmNetworkCustom::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )), + } + } +} diff --git a/sn_node/src/error.rs b/sn_node/src/error.rs index 1c2bb23e16..a74ed00bc7 100644 --- a/sn_node/src/error.rs +++ b/sn_node/src/error.rs @@ -6,14 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use sn_evm::AttoTokens; use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; -use sn_transfers::{NanoTokens, WalletError}; +use sn_transfers::WalletError; use thiserror::Error; pub(super) type Result = std::result::Result; /// Internal error. #[derive(Debug, Error)] +#[allow(missing_docs)] pub enum Error { #[error("Network error {0}")] Network(#[from] sn_networking::NetworkError), @@ -28,7 +30,7 @@ pub enum Error { Wallet(#[from] WalletError), #[error("Transfers Error {0}")] - Transfers(#[from] sn_transfers::TransferError), + Transfers(#[from] sn_evm::EvmError), #[error("Failed to parse NodeEvent")] NodeEventParsingFailed, @@ -74,8 +76,8 @@ pub enum Error { /// The amount paid by payment proof is not the required for the received content #[error("The amount paid by payment proof is not the required for the received content, paid {paid}, expected {expected}")] PaymentProofInsufficientAmount { - paid: NanoTokens, - expected: NanoTokens, + paid: AttoTokens, + expected: AttoTokens, }, #[error("A payment we received contains cash notes already confirmed to be spent")] ReusedPayment, @@ -93,4 +95,9 @@ pub enum Error { /// Error occurred in an async thread #[error("Error occured in async thread: {0}")] JoinErrorInAsyncThread(String), + + #[error("EVM Network error: {0}")] + EvmNetwork(String), + #[error("Invalid quote timestamp: {0}")] + InvalidQuoteTimestamp(String), } diff --git a/sn_node/src/event.rs b/sn_node/src/event.rs index c3e9857bad..6237e1d8bf 100644 --- a/sn_node/src/event.rs +++ b/sn_node/src/event.rs @@ -9,8 +9,11 @@ use crate::error::{Error, Result}; use serde::{Deserialize, Serialize}; -use sn_protocol::storage::{ChunkAddress, RegisterAddress}; -use sn_transfers::UniquePubkey; +use sn_evm::AttoTokens; +use sn_protocol::{ + storage::{ChunkAddress, RegisterAddress}, + NetworkAddress, +}; use tokio::sync::broadcast; const NODE_EVENT_CHANNEL_SIZE: usize = 500; @@ -62,8 +65,8 @@ pub enum NodeEvent { RegisterCreated(RegisterAddress), /// A Register edit operation has been applied in local storage RegisterEdited(RegisterAddress), - /// A CashNote Spend has been stored in local storage - SpendStored(UniquePubkey), + /// A new reward was received + RewardReceived(AttoTokens, NetworkAddress), /// One of the sub event channel closed and unrecoverable. ChannelClosed, /// Terminates the node diff --git a/sn_node/src/lib.rs b/sn_node/src/lib.rs index 4f097a7724..7dbd88ce5e 100644 --- a/sn_node/src/lib.rs +++ b/sn_node/src/lib.rs @@ -48,7 +48,6 @@ use crate::error::{Error, Result}; use libp2p::PeerId; use sn_networking::{Network, SwarmLocalState}; use sn_protocol::{get_port_from_multiaddr, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; use std::{ collections::{BTreeMap, HashSet}, path::PathBuf, @@ -80,12 +79,6 @@ impl RunningNode { self.network.root_dir_path().clone() } - /// Returns the wallet balance of the node - pub fn get_node_wallet_balance(&self) -> Result { - let wallet = HotWallet::load_from(self.network.root_dir_path())?; - Ok(wallet.balance()) - } - /// Returns a `SwarmLocalState` with some information obtained from swarm's local state. pub async fn get_swarm_local_state(&self) -> Result { let state = self.network.get_swarm_local_state().await?; @@ -110,6 +103,7 @@ impl RunningNode { /// Returns the list of all the RecordKeys held by the node pub async fn get_all_record_addresses(&self) -> Result> { + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress let addresses: HashSet<_> = self .network .get_all_local_record_addresses() diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 4ba458448e..b2731e8dd5 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -36,7 +36,7 @@ pub(crate) struct NodeMetricsRecorder { // wallet pub(crate) current_reward_wallet_balance: Gauge, - pub(crate) total_forwarded_rewards: Gauge, + pub(crate) _total_forwarded_rewards: Gauge, // to track the uptime of the node. pub(crate) started_instant: Instant, @@ -130,7 +130,7 @@ impl NodeMetricsRecorder { peer_added_to_routing_table, peer_removed_from_routing_table, current_reward_wallet_balance, - total_forwarded_rewards, + _total_forwarded_rewards: total_forwarded_rewards, started_instant: Instant::now(), uptime, } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 0caeab2fa7..3ca3e015b6 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -7,10 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::{ - error::{Error, Result}, - event::NodeEventsChannel, - quote::quotes_verification, - Marker, NodeEvent, + error::Result, event::NodeEventsChannel, quote::quotes_verification, Marker, NodeEvent, }; #[cfg(feature = "open-metrics")] use crate::metrics::NodeMetricsRecorder; @@ -18,10 +15,11 @@ use crate::RunningNode; use bytes::Bytes; use libp2p::{identity::Keypair, Multiaddr, PeerId}; #[cfg(feature = "open-metrics")] -use prometheus_client::metrics::{gauge::Gauge, info::Info}; +use prometheus_client::metrics::info::Info; #[cfg(feature = "open-metrics")] use prometheus_client::registry::Registry; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +use sn_evm::{AttoTokens, RewardsAddress}; use sn_networking::{ close_group_majority, Instant, Network, NetworkBuilder, NetworkError, NetworkEvent, NodeIssue, SwarmDriver, @@ -31,7 +29,6 @@ use sn_protocol::{ messages::{ChunkProof, CmdResponse, Query, QueryResponse, Request, Response}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{HotWallet, MainPubkey, MainSecretKey, NanoTokens, PAYMENT_FORWARD_PK}; use std::{ net::SocketAddr, path::PathBuf, @@ -46,12 +43,7 @@ use tokio::{ task::{spawn, JoinHandle}, }; -#[cfg(feature = "reward-forward")] -use libp2p::kad::{Quorum, Record}; -#[cfg(feature = "reward-forward")] -use sn_networking::PutRecordCfg; -#[cfg(feature = "reward-forward")] -use sn_protocol::storage::{try_serialize_record, RecordKind, SpendAddress}; +use sn_evm::EvmNetwork; /// Interval to trigger replication of all records to all peers. /// This is the max time it should take. Minimum interval at any node will be half this @@ -61,10 +53,6 @@ pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; /// This is the max time it should take. Minimum interval at any node will be half this const PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S: u64 = 600; -/// Interval to trigger reward forwarding. -/// This is the max time it should take. Minimum interval at any node will be half this -const PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S: u64 = 450; - /// Max number of attempts that chunk proof verification will be carried out against certain target, /// before classifying peer as a bad peer. const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; @@ -72,10 +60,6 @@ const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; /// Interval between chunk proof verification to be retired against the same target. const CHUNK_PROOF_VERIFY_RETRY_INTERVAL: Duration = Duration::from_secs(15); -#[cfg(feature = "reward-forward")] -/// Track the forward balance by storing the balance in a file. This is useful to restore the balance between restarts. -const FORWARDED_BALANCE_FILE_NAME: &str = "forwarded_balance"; - /// Interval to update the nodes uptime metric const UPTIME_METRICS_UPDATE_INTERVAL: Duration = Duration::from_secs(10); @@ -84,7 +68,9 @@ const UNRELEVANT_RECORDS_CLEANUP_INTERVAL: Duration = Duration::from_secs(3600); /// Helper to build and run a Node pub struct NodeBuilder { - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, @@ -94,24 +80,27 @@ pub struct NodeBuilder { metrics_server_port: Option, /// Enable hole punching for nodes connecting from home networks. pub is_behind_home_network: bool, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, } impl NodeBuilder { /// Instantiate the builder + #[expect(clippy::too_many_arguments)] pub fn new( - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, root_dir: PathBuf, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, ) -> Self { Self { - keypair, + identity_keypair, + evm_address, + evm_network, addr, initial_peers, local, @@ -119,7 +108,6 @@ impl NodeBuilder { #[cfg(feature = "open-metrics")] metrics_server_port: None, is_behind_home_network: false, - owner, #[cfg(feature = "upnp")] upnp, } @@ -144,21 +132,8 @@ impl NodeBuilder { /// /// Returns an error if there is a problem initializing the `SwarmDriver`. pub fn build_and_run(self) -> Result { - // Using the signature as the seed of generating the reward_key - let sig_vec = match self.keypair.sign(b"generate reward seed") { - Ok(sig) => sig, - Err(_err) => return Err(Error::FailedToGenerateRewardKey), - }; - let mut rng = sn_transfers::rng::from_vec(&sig_vec); - - let reward_key = MainSecretKey::random_from_rng(&mut rng); - let reward_address = reward_key.main_pubkey(); - - let mut wallet = HotWallet::load_from_main_key(&self.root_dir, reward_key)?; - // store in case it's a fresh wallet created if none was found - wallet.deposit_and_store_to_disk(&vec![])?; - - let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); + let mut network_builder = + NetworkBuilder::new(self.identity_keypair, self.local, self.root_dir); #[cfg(feature = "open-metrics")] let node_metrics = if self.metrics_server_port.is_some() { @@ -201,10 +176,10 @@ impl NodeBuilder { network: network.clone(), events_channel: node_events_channel.clone(), initial_peers: self.initial_peers, - reward_address, + reward_address: self.evm_address, #[cfg(feature = "open-metrics")] node_metrics, - owner: self.owner, + evm_network: self.evm_network, }; let node = Node { inner: Arc::new(node), @@ -238,10 +213,8 @@ struct NodeInner { network: Network, #[cfg(feature = "open-metrics")] node_metrics: Option, - /// Node owner's discord username, in readable format - /// If not set, there will be no payment forward to be undertaken - owner: Option, - reward_address: MainPubkey, + reward_address: RewardsAddress, + evm_network: EvmNetwork, } impl Node { @@ -266,37 +239,21 @@ impl Node { self.inner.node_metrics.as_ref() } - /// Returns the owner of the node - pub(crate) fn owner(&self) -> Option<&String> { - self.inner.owner.as_ref() - } - /// Returns the reward address of the node - pub(crate) fn reward_address(&self) -> &MainPubkey { + pub(crate) fn reward_address(&self) -> &RewardsAddress { &self.inner.reward_address } + pub(crate) fn evm_network(&self) -> &EvmNetwork { + &self.inner.evm_network + } + /// Runs the provided `SwarmDriver` and spawns a task to process for `NetworkEvents` fn run(self, swarm_driver: SwarmDriver, mut network_event_receiver: Receiver) { let mut rng = StdRng::from_entropy(); let peers_connected = Arc::new(AtomicUsize::new(0)); - // read the forwarded balance from the file and set the metric. - // This is done initially because reward forwarding takes a while to kick in - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let node_copy = self.clone(); - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let _handle = spawn(async move { - let root_dir = node_copy.network().root_dir_path().clone(); - let balance_file_path = root_dir.join(FORWARDED_BALANCE_FILE_NAME); - let balance = read_forwarded_balance_value(&balance_file_path); - - if let Some(node_metrics) = node_copy.node_metrics() { - let _ = node_metrics.total_forwarded_rewards.set(balance as i64); - } - }); - let _handle = spawn(swarm_driver.run()); let _handle = spawn(async move { // use a random inactivity timeout to ensure that the nodes do not sync when messages @@ -323,19 +280,6 @@ impl Node { let mut rolling_index = 0; - // use a random timeout to ensure not sync when transmit messages. - let balance_forward_interval: u64 = rng.gen_range( - PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S / 2..PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S, - ); - let balance_forward_time = Duration::from_secs(balance_forward_interval); - debug!( - "BalanceForward interval set to {balance_forward_time:?} to: {:?}", - PAYMENT_FORWARD_PK.to_hex(), - ); - - let mut balance_forward_interval = tokio::time::interval(balance_forward_time); - let _ = balance_forward_interval.tick().await; // first tick completes immediately - let mut uptime_metrics_update_interval = tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately @@ -395,36 +339,6 @@ impl Node { rolling_index += 1; } } - // runs every balance_forward_interval time - _ = balance_forward_interval.tick() => { - if cfg!(feature = "reward-forward") { - if let Some(owner) = self.owner() { - let start = Instant::now(); - debug!("Periodic balance forward triggered"); - let network = self.network().clone(); - let forwarding_reason = owner.clone(); - - #[cfg(feature = "open-metrics")] - let total_forwarded_rewards = self.node_metrics().map(|metrics|metrics.total_forwarded_rewards.clone()); - #[cfg(feature = "open-metrics")] - let current_reward_wallet_balance = self.node_metrics().map(|metrics|metrics.current_reward_wallet_balance.clone()); - - let _handle = spawn(async move { - - #[cfg(feature = "open-metrics")] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason, total_forwarded_rewards,current_reward_wallet_balance) { - error!("Error while trying to forward balance: {err:?}"); - } - #[cfg(not(feature = "open-metrics"))] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason) { - error!("Error while trying to forward balance: {err:?}"); - } - info!("Periodic balance forward took {:?}", start.elapsed()); - }); - } - - } - } _ = uptime_metrics_update_interval.tick() => { #[cfg(feature = "open-metrics")] if let Some(node_metrics) = self.node_metrics() { @@ -694,7 +608,7 @@ impl Node { async fn handle_query( network: &Network, query: Query, - payment_address: MainPubkey, + payment_address: RewardsAddress, ) -> Response { let resp: QueryResponse = match query { Query::GetStoreCost(address) => { @@ -706,7 +620,7 @@ impl Node { match store_cost { Ok((cost, quoting_metrics)) => { - if cost == NanoTokens::zero() { + if cost == AttoTokens::zero() { QueryResponse::GetStoreCost { quote: Err(ProtocolError::RecordExists( PrettyPrintRecordKey::from(&record_key).into_owned(), @@ -721,6 +635,7 @@ impl Node { cost, &address, "ing_metrics, + &payment_address, ), payment_address, peer_address: NetworkAddress::from_peer(self_id), @@ -862,130 +777,6 @@ impl Node { } } } - - /// Forward received rewards to another address - fn try_forward_balance( - network: Network, - forward_reason: String, - #[cfg(feature = "open-metrics")] forwarded_balance_metric: Option, - #[cfg(feature = "open-metrics")] current_reward_wallet_balance: Option, - ) -> Result<()> { - let mut spend_requests = vec![]; - { - // load wallet - let mut wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - - if !balance.is_zero() { - let payee = vec![(balance, *PAYMENT_FORWARD_PK)]; - spend_requests.extend(wallet.prepare_forward_signed_spend(payee, forward_reason)?); - } - } - let total_forwarded_amount = spend_requests - .iter() - .map(|s| s.amount().as_nano()) - .sum::(); - - let record_kind = RecordKind::Spend; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: None, - use_put_record_to: None, - verification: None, - }; - - info!( - "Reward forwarding sending {} spends in this iteration. Total forwarded amount: {total_forwarded_amount}", - spend_requests.len() - ); - - for spend_request in spend_requests { - let network_clone = network.clone(); - let put_cfg_clone = put_cfg.clone(); - - // Sent out spend in separate thread to avoid blocking the main one - let _handle = spawn(async move { - let unique_pubkey = *spend_request.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let record_key = network_address.to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&record_key); - - debug!("Reward forwarding in spend {pretty_key:?}: {spend_request:#?}"); - - let value = if let Ok(value) = try_serialize_record(&[spend_request], record_kind) { - value - } else { - error!("Reward forwarding: Failed to serialise spend {pretty_key:?}"); - return; - }; - - let record = Record { - key: record_key.clone(), - value: value.to_vec(), - publisher: None, - expires: None, - }; - - let result = network_clone.put_record(record, &put_cfg_clone).await; - - match result { - Ok(_) => info!("Reward forwarding completed sending spend {pretty_key:?}"), - Err(err) => { - info!("Reward forwarding: sending spend {pretty_key:?} failed with {err:?}") - } - } - }); - - std::thread::sleep(Duration::from_millis(500)); - } - - // write the balance to a file - let balance_file_path = network.root_dir_path().join(FORWARDED_BALANCE_FILE_NAME); - let old_balance = read_forwarded_balance_value(&balance_file_path); - let updated_balance = old_balance + total_forwarded_amount; - debug!("Updating forwarded balance to {updated_balance}"); - write_forwarded_balance_value(&balance_file_path, updated_balance)?; - - #[cfg(feature = "open-metrics")] - { - if let Some(forwarded_balance_metric) = forwarded_balance_metric { - let _ = forwarded_balance_metric.set(updated_balance as i64); - } - - let wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - if let Some(current_reward_wallet_balance) = current_reward_wallet_balance { - let _ = current_reward_wallet_balance.set(balance.as_nano() as i64); - } - } - - Ok(()) - } -} - -fn read_forwarded_balance_value(balance_file_path: &PathBuf) -> u64 { - debug!("Reading forwarded balance from file {balance_file_path:?}"); - match std::fs::read_to_string(balance_file_path) { - Ok(balance) => balance.parse::().unwrap_or_else(|_| { - debug!("The balance from file is not a valid number"); - 0 - }), - Err(_) => { - debug!("Error while reading to string, setting the balance to 0. This can happen at node init."); - 0 - } - } -} - -fn write_forwarded_balance_value(balance_file_path: &PathBuf, balance: u64) -> Result<()> { - if let Err(err) = std::fs::write(balance_file_path, balance.to_string()) { - error!( - "Failed to write the updated balance to the file {balance_file_path:?} with {err:?}" - ); - } - Ok(()) } async fn chunk_proof_verify_peer( @@ -1052,29 +843,3 @@ fn received_valid_chunk_proof( None } } - -#[cfg(test)] -mod tests { - - use crate::node::{read_forwarded_balance_value, write_forwarded_balance_value}; - use color_eyre::Result; - use tempfile::tempdir; - #[test] - fn read_and_write_reward_to_file() -> Result<()> { - let dir = tempdir()?; - let balance_file_path = dir.path().join("forwarded_balance"); - - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 0); - - write_forwarded_balance_value(&balance_file_path, balance + 10)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 10); - - write_forwarded_balance_value(&balance_file_path, balance + 100)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 110); - - Ok(()) - } -} diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8839c8d631..f78d0990fa 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -6,34 +6,32 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result}; +use crate::{node::Node, Error, Marker, Result}; use libp2p::kad::{Record, RecordKey}; +use sn_evm::ProofOfPayment; use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ storage::{ - try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, - Scratchpad, SpendAddress, + try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, Scratchpad, SpendAddress }, NetworkAddress, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; -use sn_transfers::{ - calculate_royalties_fee, CashNote, CashNoteRedemption, HotWallet, NanoTokens, Payment, - SignedSpend, Transfer, TransferError, UniquePubkey, WalletError, NETWORK_ROYALTIES_PK, -}; +use sn_transfers::{SignedSpend, TransferError, UniquePubkey, QUOTE_EXPIRATION_SECS}; use std::collections::BTreeSet; +use std::time::{Duration, UNIX_EPOCH}; use tokio::task::JoinSet; use xor_name::XorName; impl Node { - /// Validate a record and it's payment, and store the record to the RecordStore + /// Validate a record and its payment, and store the record to the RecordStore pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { let record_header = RecordHeader::from_record(&record)?; match record_header.kind { RecordKind::ChunkWithPayment => { let record_key = record.key.clone(); - let (payment, chunk) = try_deserialize_record::<(Payment, Chunk)>(&record)?; + let (payment, chunk) = try_deserialize_record::<(ProofOfPayment, Chunk)>(&record)?; let already_exists = self .validate_key_and_existence(&chunk.network_address(), &record_key) .await?; @@ -97,7 +95,7 @@ impl Node { RecordKind::ScratchpadWithPayment => { let record_key = record.key.clone(); let (payment, scratchpad) = - try_deserialize_record::<(Payment, Scratchpad)>(&record)?; + try_deserialize_record::<(ProofOfPayment, Scratchpad)>(&record)?; let _already_exists = self .validate_key_and_existence(&scratchpad.network_address(), &record_key) .await?; @@ -209,7 +207,7 @@ impl Node { } RecordKind::RegisterWithPayment => { let (payment, register) = - try_deserialize_record::<(Payment, SignedRegister)>(&record)?; + try_deserialize_record::<(ProofOfPayment, SignedRegister)>(&record)?; // check if the deserialized value's RegisterAddress matches the record's key let net_addr = NetworkAddress::from_register_address(*register.address()); @@ -573,160 +571,68 @@ impl Node { Ok(()) } - /// Gets CashNotes out of Transfers, this includes network verifications of the Transfers - /// Rewraps the royalties transfers into encrypted Transfers ready to be sent directly to the beneficiary - async fn cash_notes_from_transfers( - &self, - transfers: Vec, - wallet: &HotWallet, - pretty_key: PrettyPrintRecordKey<'static>, - ) -> Result<(NanoTokens, Vec, Vec)> { - let royalties_pk = *NETWORK_ROYALTIES_PK; - let mut cash_notes = vec![]; - let mut royalties_cash_notes_r = vec![]; - let mut received_fee = NanoTokens::zero(); - - for transfer in transfers { - match transfer { - Transfer::Encrypted(_) => match self - .network() - .verify_and_unpack_transfer(&transfer, wallet) - .await - { - // transfer not for us - Err(NetworkError::Wallet(WalletError::FailedToDecypherTransfer)) => continue, - // transfer invalid - Err(e) => return Err(e.into()), - // transfer ok, add to cash_notes and continue as more transfers might be ours - Ok(cns) => cash_notes.extend(cns), - }, - Transfer::NetworkRoyalties(cashnote_redemptions) => { - match self - .network() - .verify_cash_notes_redemptions(royalties_pk, &cashnote_redemptions) - .await - { - Ok(cash_notes) => { - let received_royalties = total_cash_notes_amount(&cash_notes)?; - debug!( - "{} network royalties payment cash notes found for record {pretty_key} for a total value of {received_royalties:?}", - cash_notes.len() - ); - royalties_cash_notes_r.extend(cashnote_redemptions); - received_fee = received_fee - .checked_add(received_royalties) - .ok_or_else(|| Error::NumericOverflow)?; - } - Err(e) => { - warn!( - "Invalid network royalties payment for record {pretty_key}: {e:?}" - ); - } - } - } - } - } - - if cash_notes.is_empty() { - Err(Error::NoPaymentToOurNode(pretty_key)) - } else { - let received_fee_to_our_node = total_cash_notes_amount(&cash_notes)?; - info!( - "{} cash note/s (for a total of {received_fee_to_our_node:?}) are for us for {pretty_key}", - cash_notes.len() - ); - received_fee = received_fee - .checked_add(received_fee_to_our_node) - .ok_or_else(|| Error::NumericOverflow)?; - - Ok((received_fee, cash_notes, royalties_cash_notes_r)) - } - } - /// Perform validations on the provided `Record`. async fn payment_for_us_exists_and_is_still_valid( &self, address: &NetworkAddress, - payment: Payment, + payment: ProofOfPayment, ) -> Result<()> { let key = address.to_record_key(); let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); debug!("Validating record payment for {pretty_key}"); - // load wallet - let mut wallet = HotWallet::load_from(self.network().root_dir_path())?; - let old_balance = wallet.balance().as_nano(); - - // unpack transfer - debug!("Unpacking incoming Transfers for record {pretty_key}"); - let (received_fee, mut cash_notes, royalties_cash_notes_r) = self - .cash_notes_from_transfers(payment.transfers, &wallet, pretty_key.clone()) - .await?; - - // check for cash notes that we have already spent - // this can happen in cases where the client retries a failed PUT after we have already used the cash note - cash_notes.retain(|cash_note| { - let already_present = wallet.cash_note_presents(&cash_note.unique_pubkey()); - if already_present { - return !already_present; - } - - let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - !wallet.has_confirmed_spend(spend_addr) - }); - if cash_notes.is_empty() { - info!("All incoming cash notes were already received, no need to further process"); - return Err(Error::ReusedPayment); + // check if the quote is valid + let storecost = payment.quote.cost; + let self_peer_id = self.network().peer_id(); + if !payment.quote.check_is_signed_by_claimed_peer(self_peer_id) { + warn!("Payment quote signature is not valid for record {pretty_key}"); + return Err(Error::InvalidRequest(format!( + "Payment quote signature is not valid for record {pretty_key}" + ))); } - - debug!("Received payment of {received_fee:?} for {pretty_key}"); + debug!("Payment quote signature is valid for record {pretty_key}"); + + // verify quote timestamp + let quote_timestamp = payment.quote.timestamp; + let quote_expiration_time = quote_timestamp + Duration::from_secs(QUOTE_EXPIRATION_SECS); + let quote_expiration_time_in_secs = quote_expiration_time + .duration_since(UNIX_EPOCH) + .map_err(|e| { + Error::InvalidRequest(format!( + "Payment quote timestamp is invalid for record {pretty_key}: {e}" + )) + })? + .as_secs(); + + // check if payment is valid on chain + debug!("Verifying payment for record {pretty_key}"); + self.evm_network() + .verify_chunk_payment( + payment.tx_hash, + payment.quote.hash(), + *self.reward_address(), + storecost.as_atto(), + quote_expiration_time_in_secs, + ) + .await + .map_err(|e| Error::EvmNetwork(format!("Failed to verify chunk payment: {e}")))?; + debug!("Payment is valid for record {pretty_key}"); // Notify `record_store` that the node received a payment. self.network().notify_payment_received(); - // deposit the CashNotes in our wallet - wallet.deposit_and_store_to_disk(&cash_notes)?; - let new_balance = wallet.balance().as_nano(); - info!( - "The new wallet balance is {new_balance}, after earning {}", - new_balance - old_balance - ); - #[cfg(feature = "open-metrics")] if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics + let _prev = node_metrics .current_reward_wallet_balance - .set(new_balance as i64); - } - - if royalties_cash_notes_r.is_empty() { - warn!("No network royalties payment found for record {pretty_key}"); - return Err(Error::NoNetworkRoyaltiesPayment(pretty_key.into_owned())); + .inc_by(storecost.as_atto().try_into().unwrap_or(i64::MAX)); // TODO maybe metrics should be in u256 too? } + self.events_channel() + .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); - // check if the quote is valid - let storecost = payment.quote.cost; - verify_quote_for_storecost(self.network(), payment.quote, address)?; - debug!("Payment quote valid for record {pretty_key}"); - - // Let's check payment is sufficient both for our store cost and for network royalties - // Since the storage payment is made to a single node, we can calculate the royalties fee based on that single payment. - let expected_royalties_fee = calculate_royalties_fee(storecost); - let expected_fee = storecost - .checked_add(expected_royalties_fee) - .ok_or(Error::NumericOverflow)?; - - // finally, (after we accept any payments to us as they are ours now anyway) - // lets check they actually paid enough - if received_fee < expected_fee { - debug!("Payment insufficient for record {pretty_key}. {received_fee:?} is less than {expected_fee:?}"); - return Err(Error::PaymentProofInsufficientAmount { - paid: received_fee, - expected: expected_fee, - }); - } + // NB TODO: tell happybeing about the AttoToken change // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): - info!("Total payment of {received_fee:?} nanos accepted for record {pretty_key}"); + info!("Total payment of {storecost:?} nanos accepted for record {pretty_key}"); Ok(()) } @@ -1004,19 +910,3 @@ impl Node { } } } - -// Helper to calculate total amout of tokens received in a given set of CashNotes -fn total_cash_notes_amount<'a, I>(cash_notes: I) -> Result -where - I: IntoIterator, -{ - let mut received_fee = NanoTokens::zero(); - for cash_note in cash_notes { - let amount = cash_note.value(); - received_fee = received_fee - .checked_add(amount) - .ok_or(Error::NumericOverflow)?; - } - - Ok(received_fee) -} diff --git a/sn_node/src/quote.rs b/sn_node/src/quote.rs index 2020a2995d..42079b1d0c 100644 --- a/sn_node/src/quote.rs +++ b/sn_node/src/quote.rs @@ -8,21 +8,28 @@ use crate::{node::Node, Error, Result}; use libp2p::PeerId; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_networking::{calculate_cost_for_records, Network, NodeIssue}; use sn_protocol::{error::Error as ProtocolError, storage::ChunkAddress, NetworkAddress}; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::time::Duration; impl Node { pub(crate) fn create_quote_for_storecost( network: &Network, - cost: NanoTokens, + cost: AttoTokens, address: &NetworkAddress, quoting_metrics: &QuotingMetrics, + payment_address: &RewardsAddress, ) -> Result { let content = address.as_xorname().unwrap_or_default(); let timestamp = std::time::SystemTime::now(); - let bytes = PaymentQuote::bytes_for_signing(content, cost, timestamp, quoting_metrics); + let bytes = PaymentQuote::bytes_for_signing( + content, + cost, + timestamp, + quoting_metrics, + payment_address, + ); let Ok(signature) = network.sign(&bytes) else { return Err(ProtocolError::QuoteGenerationFailed); @@ -34,6 +41,7 @@ impl Node { timestamp, quoting_metrics: quoting_metrics.clone(), pub_key: network.get_pub_key(), + rewards_address: *payment_address, signature, }; @@ -60,12 +68,7 @@ pub(crate) fn verify_quote_for_storecost( } // check sig - let bytes = PaymentQuote::bytes_for_signing( - quote.content, - quote.cost, - quote.timestamp, - "e.quoting_metrics, - ); + let bytes = quote.bytes_for_sig(); let signature = quote.signature; if !network.verify(&bytes, &signature) { return Err(Error::InvalidQuoteSignature); @@ -96,7 +99,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, .filter(|(peer_id, quote)| { let is_same_target = quote.content == self_quote.content; let is_not_self = *peer_id != network.peer_id(); - let is_not_zero_quote = quote.cost != NanoTokens::zero(); + let is_not_zero_quote = quote.cost != AttoTokens::zero(); let time_gap = Duration::from_secs(10); let is_around_same_time = if quote.timestamp > self_quote.timestamp { @@ -119,7 +122,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, quotes_for_nodes_duty.retain(|(peer_id, quote)| { let cost = calculate_cost_for_records(quote.quoting_metrics.close_records_stored); - let is_same_as_expected = quote.cost == NanoTokens::from(cost); + let is_same_as_expected = quote.cost == AttoTokens::from_u64(cost); if !is_same_as_expected { info!("Quote from {peer_id:?} using a different quoting_metrics to achieve the claimed cost. Quote {quote:?} can only result in cost {cost:?}"); diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index baba07c851..36626b920d 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -23,8 +23,7 @@ use sn_protocol::{ NetworkAddress, }; use sn_registers::Permissions; -use sn_transfers::HotWallet; -use sn_transfers::{CashNote, MainSecretKey, NanoTokens}; +use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; use std::{ collections::{BTreeMap, VecDeque}, fmt, diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 1352a24659..8d06a87187 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -1,683 +1,683 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use assert_matches::assert_matches; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::{bail, Result}; -use itertools::Itertools; -use sn_logging::LogBuilder; -use sn_networking::NetworkError; -use sn_transfers::{ - get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, - WalletError, GENESIS_CASHNOTE, -}; -use std::time::Duration; -use tracing::*; - -#[tokio::test] -async fn cash_note_transfer_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - // create 1 wallet add money from faucet - let first_wallet_dir = TempDir::new()?; - - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - // create wallet 2 and 3 to receive money from 1 - let second_wallet_dir = TempDir::new()?; - let second_wallet = get_wallet(second_wallet_dir.path()); - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - let third_wallet_dir = TempDir::new()?; - let third_wallet = get_wallet(third_wallet_dir.path()); - assert_eq!(third_wallet.balance(), NanoTokens::zero()); - - // manually forge two transfers of the same source - let amount = NanoTokens::from(first_wallet_balance / 3); - let to1 = first_wallet.address(); - let to2 = second_wallet.address(); - let to3 = third_wallet.address(); - - let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; - let same_cash_notes = some_cash_notes.clone(); - - let mut rng = rng::thread_rng(); - - let reason = SpendReason::default(); - let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); - - let transfer_to_2 = SignedTransaction::new( - some_cash_notes, - vec![to2_unique_key], - to1, - reason.clone(), - first_wallet.key(), - )?; - let transfer_to_3 = SignedTransaction::new( - same_cash_notes, - vec![to3_unique_key], - to1, - reason, - first_wallet.key(), - )?; - - // send both transfers to the network - // upload won't error out, only error out during verification. - info!("Sending both transfers to the network..."); - let res = client.send_spends(transfer_to_2.spends.iter(), false).await; - assert!(res.is_ok()); - let res = client.send_spends(transfer_to_3.spends.iter(), false).await; - assert!(res.is_ok()); - - // we wait 5s to ensure that the double spend attempt is detected and accumulated - info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); - tokio::time::sleep(Duration::from_secs(10)).await; - - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - - // check the CashNotes, it should fail - let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; - let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; - info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); - assert!(should_err1.is_err() && should_err2.is_err()); - assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - Ok(()) -} - -#[tokio::test] -async fn genesis_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - - // create a client and an unused wallet to make sure some money already exists in the system - let first_wallet_dir = TempDir::new()?; - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_addr = first_wallet.address(); - - // create a new genesis wallet with the intention to spend genesis again - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; - second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; - let genesis_amount = GENESIS_CASHNOTE.value(); - let second_wallet_addr = second_wallet.address(); - - // create a transfer from the second wallet to the first wallet - // this will spend Genesis (again) and transfer its value to the first wallet - let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; - let mut rng = rng::thread_rng(); - let recipient = ( - genesis_amount, - first_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let change_addr = second_wallet_addr; - let reason = SpendReason::default(); - let transfer = SignedTransaction::new( - genesis_cashnote, - vec![recipient], - change_addr, - reason, - second_wallet.key(), - )?; - - // send the transfer to the network which will mark genesis as a double spent - // making its direct descendants unspendable - let res = client.send_spends(transfer.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert!(res.is_ok()); - - // put the bad cashnote in the first wallet - first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; - - // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) - let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; - let recipient = ( - genesis_amount, - second_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let bad_genesis_descendant = genesis_cashnote_and_others - .iter() - .find(|cn| cn.value() == genesis_amount) - .unwrap() - .clone(); - let change_addr = first_wallet_addr; - let reason = SpendReason::default(); - let transfer2 = SignedTransaction::new( - vec![bad_genesis_descendant], - vec![recipient], - change_addr, - reason, - first_wallet.key(), - )?; - - // send the transfer to the network which should reject it - let res = client.send_spends(transfer2.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); - - Ok(()) -} - -#[tokio::test] -async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_1 = TempDir::new()?; - - let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; - let balance_1 = wallet_1.balance().as_nano(); - let amount = NanoTokens::from(balance_1 / 2); - let to1 = wallet_1.address(); - - // Send from 1 -> 2 - let wallet_dir_2 = TempDir::new()?; - let mut wallet_2 = get_wallet(wallet_dir_2.path()); - assert_eq!(wallet_2.balance(), NanoTokens::zero()); - - let to2 = wallet_2.address(); - let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; - let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let transfer_to_2 = SignedTransaction::new( - cash_notes_1.clone(), - vec![to_2_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; - - info!("Sending 1->2 to the network..."); - client - .send_spends(transfer_to_2.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 1 -> 2 wallet..."); - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_2[0]).await?; - wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 - - // Send from 2 -> 22 - let wallet_dir_22 = TempDir::new()?; - let mut wallet_22 = get_wallet(wallet_dir_22.path()); - assert_eq!(wallet_22.balance(), NanoTokens::zero()); - - let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; - assert!(!cash_notes_2.is_empty()); - let to_22_unique_key = ( - wallet_2.balance(), - wallet_22.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_22 = SignedTransaction::new( - cash_notes_2, - vec![to_22_unique_key], - to2, - reason.clone(), - wallet_2.key(), - )?; - - client - .send_spends(transfer_to_22.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 2 -> 22 wallet..."); - let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_22[0]).await?; - wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 - - // Try to double spend from 1 -> 3 - let wallet_dir_3 = TempDir::new()?; - let wallet_3 = get_wallet(wallet_dir_3.path()); - assert_eq!(wallet_3.balance(), NanoTokens::zero()); - - let to_3_unique_key = ( - amount, - wallet_3.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_3 = SignedTransaction::new( - cash_notes_1, - vec![to_3_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_3.spends.iter(), false) - .await?; - info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned - info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned - - // The old spend has been poisoned, but spends from 22 -> 222 should still work - let wallet_dir_222 = TempDir::new()?; - let wallet_222 = get_wallet(wallet_dir_222.path()); - assert_eq!(wallet_222.balance(), NanoTokens::zero()); - - let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; - assert!(!cash_notes_22.is_empty()); - let to_222_unique_key = ( - wallet_22.balance(), - wallet_222.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_222 = SignedTransaction::new( - cash_notes_22, - vec![to_222_unique_key], - wallet_22.address(), - reason, - wallet_22.key(), - )?; - client - .send_spends(transfer_to_222.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 22 -> 222 wallet..."); - let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_222[0]).await?; - - // finally assert that we have a double spend attempt error here - // we wait 1s to ensure that the double spend attempt is detected and accumulated - tokio::time::sleep(Duration::from_secs(5)).await; - - match client.verify_cashnote(&cash_notes_for_2[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - - match client.verify_cashnote(&cash_notes_for_3[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C -/// being invalid. -async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - info!("spend B to C: {:?}", transfer_to_c.spends); - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a, - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(10)).await; - - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); // poisoned - - // Try to double spend from B -> Y - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_b, - vec![to_y_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; // reuse the old cash notes - - info!("spend B to Y: {:?}", transfer_to_y.spends); - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); - let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; - info!("B spends: {b_spends:?}"); - - info!("Verifying the transfers from B -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(30)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from B -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of A -> B"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - info!("Got result while verifying the original spend from A -> B: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of B -> C"); - let result = client.verify_cashnote(&cash_notes_for_c[0]).await; - info!("Got result while verifying the original spend from B -> C: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over -/// should not lead to the original A disappearing and B becoming orphan -async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - // save original A spend - let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); - let original_a_spend = if let [spend] = vec_of_spends.as_slice() { - spend - } else { - panic!("Expected to have one spend here!"); - }; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(15)).await; - - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - - // Try to double spend A -> n different random keys - for _ in 0..20 { - info!("Spamming double spends on A"); - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_y_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_millis(500)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from A -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - } - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use assert_matches::assert_matches; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::{bail, Result}; +// use itertools::Itertools; +// use sn_transfers::{ +// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, +// SpendReason, WalletError, GENESIS_CASHNOTE, +// }; +// use sn_logging::LogBuilder; +// use sn_networking::NetworkError; +// use std::time::Duration; +// use tracing::*; + +// #[tokio::test] +// async fn cash_note_transfer_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// // create 1 wallet add money from faucet +// let first_wallet_dir = TempDir::new()?; + +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance = first_wallet.balance().as_nano(); + +// // create wallet 2 and 3 to receive money from 1 +// let second_wallet_dir = TempDir::new()?; +// let second_wallet = get_wallet(second_wallet_dir.path()); +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); +// let third_wallet_dir = TempDir::new()?; +// let third_wallet = get_wallet(third_wallet_dir.path()); +// assert_eq!(third_wallet.balance(), NanoTokens::zero()); + +// // manually forge two transfers of the same source +// let amount = first_wallet_balance / 3; +// let to1 = first_wallet.address(); +// let to2 = second_wallet.address(); +// let to3 = third_wallet.address(); + +// let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; +// let same_cash_notes = some_cash_notes.clone(); + +// let mut rng = rng::thread_rng(); + +// let reason = SpendReason::default(); +// let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); + +// let transfer_to_2 = SignedTransaction::new( +// some_cash_notes, +// vec![to2_unique_key], +// to1, +// reason.clone(), +// first_wallet.key(), +// )?; +// let transfer_to_3 = SignedTransaction::new( +// same_cash_notes, +// vec![to3_unique_key], +// to1, +// reason, +// first_wallet.key(), +// )?; + +// // send both transfers to the network +// // upload won't error out, only error out during verification. +// info!("Sending both transfers to the network..."); +// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; +// assert!(res.is_ok()); +// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; +// assert!(res.is_ok()); + +// // we wait 5s to ensure that the double spend attempt is detected and accumulated +// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); +// tokio::time::sleep(Duration::from_secs(10)).await; + +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); + +// // check the CashNotes, it should fail +// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// assert!(should_err1.is_err() && should_err2.is_err()); +// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); +// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// Ok(()) +// } + +// #[tokio::test] +// async fn genesis_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); + +// // create a client and an unused wallet to make sure some money already exists in the system +// let first_wallet_dir = TempDir::new()?; +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_addr = first_wallet.address(); + +// // create a new genesis wallet with the intention to spend genesis again +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; +// second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; +// let genesis_amount = GENESIS_CASHNOTE.value(); +// let second_wallet_addr = second_wallet.address(); + +// // create a transfer from the second wallet to the first wallet +// // this will spend Genesis (again) and transfer its value to the first wallet +// let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; +// let mut rng = rng::thread_rng(); +// let recipient = ( +// genesis_amount, +// first_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let change_addr = second_wallet_addr; +// let reason = SpendReason::default(); +// let transfer = SignedTransaction::new( +// genesis_cashnote, +// vec![recipient], +// change_addr, +// reason, +// second_wallet.key(), +// )?; + +// // send the transfer to the network which will mark genesis as a double spent +// // making its direct descendants unspendable +// let res = client.send_spends(transfer.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert!(res.is_ok()); + +// // put the bad cashnote in the first wallet +// first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; + +// // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) +// let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; +// let recipient = ( +// genesis_amount, +// second_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let bad_genesis_descendant = genesis_cashnote_and_others +// .iter() +// .find(|cn| cn.value() == genesis_amount) +// .unwrap() +// .clone(); +// let change_addr = first_wallet_addr; +// let reason = SpendReason::default(); +// let transfer2 = SignedTransaction::new( +// vec![bad_genesis_descendant], +// vec![recipient], +// change_addr, +// reason, +// first_wallet.key(), +// )?; + +// // send the transfer to the network which should reject it +// let res = client.send_spends(transfer2.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_1 = TempDir::new()?; + +// let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; +// let balance_1 = wallet_1.balance(); +// let amount = balance_1 / 2; +// let to1 = wallet_1.address(); + +// // Send from 1 -> 2 +// let wallet_dir_2 = TempDir::new()?; +// let mut wallet_2 = get_wallet(wallet_dir_2.path()); +// assert_eq!(wallet_2.balance(), NanoTokens::zero()); + +// let to2 = wallet_2.address(); +// let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; +// let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let transfer_to_2 = SignedTransaction::new( +// cash_notes_1.clone(), +// vec![to_2_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; + +// info!("Sending 1->2 to the network..."); +// client +// .send_spends(transfer_to_2.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 1 -> 2 wallet..."); +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_2[0]).await?; +// wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 + +// // Send from 2 -> 22 +// let wallet_dir_22 = TempDir::new()?; +// let mut wallet_22 = get_wallet(wallet_dir_22.path()); +// assert_eq!(wallet_22.balance(), NanoTokens::zero()); + +// let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; +// assert!(!cash_notes_2.is_empty()); +// let to_22_unique_key = ( +// wallet_2.balance(), +// wallet_22.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_22 = SignedTransaction::new( +// cash_notes_2, +// vec![to_22_unique_key], +// to2, +// reason.clone(), +// wallet_2.key(), +// )?; + +// client +// .send_spends(transfer_to_22.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 2 -> 22 wallet..."); +// let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_22[0]).await?; +// wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 + +// // Try to double spend from 1 -> 3 +// let wallet_dir_3 = TempDir::new()?; +// let wallet_3 = get_wallet(wallet_dir_3.path()); +// assert_eq!(wallet_3.balance(), NanoTokens::zero()); + +// let to_3_unique_key = ( +// amount, +// wallet_3.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_3 = SignedTransaction::new( +// cash_notes_1, +// vec![to_3_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_3.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); +// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned +// info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); +// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// // The old spend has been poisoned, but spends from 22 -> 222 should still work +// let wallet_dir_222 = TempDir::new()?; +// let wallet_222 = get_wallet(wallet_dir_222.path()); +// assert_eq!(wallet_222.balance(), NanoTokens::zero()); + +// let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; +// assert!(!cash_notes_22.is_empty()); +// let to_222_unique_key = ( +// wallet_22.balance(), +// wallet_222.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_222 = SignedTransaction::new( +// cash_notes_22, +// vec![to_222_unique_key], +// wallet_22.address(), +// reason, +// wallet_22.key(), +// )?; +// client +// .send_spends(transfer_to_222.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 22 -> 222 wallet..."); +// let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_222[0]).await?; + +// // finally assert that we have a double spend attempt error here +// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// tokio::time::sleep(Duration::from_secs(5)).await; + +// match client.verify_cashnote(&cash_notes_for_2[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } + +// match client.verify_cashnote(&cash_notes_for_3[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C +// /// being invalid. +// async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// info!("spend B to C: {:?}", transfer_to_c.spends); +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a, +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(10)).await; + +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // poisoned + +// // Try to double spend from B -> Y +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_b, +// vec![to_y_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; // reuse the old cash notes + +// info!("spend B to Y: {:?}", transfer_to_y.spends); +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); +// let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; +// info!("B spends: {b_spends:?}"); + +// info!("Verifying the transfers from B -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(30)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from B -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of A -> B"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// info!("Got result while verifying the original spend from A -> B: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of B -> C"); +// let result = client.verify_cashnote(&cash_notes_for_c[0]).await; +// info!("Got result while verifying the original spend from B -> C: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over +// /// should not lead to the original A disappearing and B becoming orphan +// async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// // save original A spend +// let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); +// let original_a_spend = if let [spend] = vec_of_spends.as_slice() { +// spend +// } else { +// panic!("Expected to have one spend here!"); +// }; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(15)).await; + +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } + +// // Try to double spend A -> n different random keys +// for _ in 0..20 { +// info!("Spamming double spends on A"); +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_y_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_millis(500)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from A -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } +// } + +// Ok(()) +// } diff --git a/sn_node/tests/sequential_transfers.rs b/sn_node/tests/sequential_transfers.rs index 66d69337c8..d6906e37d1 100644 --- a/sn_node/tests/sequential_transfers.rs +++ b/sn_node/tests/sequential_transfers.rs @@ -1,54 +1,54 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::Result; -use sn_client::send; -use sn_logging::LogBuilder; -use sn_transfers::NanoTokens; -use tracing::info; - -#[tokio::test] -async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); - - let first_wallet_dir = TempDir::new()?; - - let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - let second_wallet_balance = NanoTokens::from(first_wallet_balance / 2); - info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = get_wallet(second_wallet_dir.path()); - - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - - let tokens = send( - first_wallet, - second_wallet_balance, - second_wallet.address(), - &client, - true, - ) - .await?; - info!("Verifying the transfer from first wallet..."); - - client.verify_cashnote(&tokens).await?; - second_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(second_wallet.balance(), second_wallet_balance); - info!("CashNotes deposited to second wallet: {second_wallet_balance}."); - - let first_wallet = get_wallet(&first_wallet_dir); - assert!(second_wallet_balance.as_nano() == first_wallet.balance().as_nano()); - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::Result; +// use sn_client::send; +// use sn_logging::LogBuilder; +// use sn_transfers::NanoTokens; +// use tracing::info; + +// #[tokio::test] +// async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); + +// let first_wallet_dir = TempDir::new()?; + +// let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance:NanoTokens = first_wallet.balance(); + +// let second_wallet_balance = first_wallet_balance / 2; +// info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = get_wallet(second_wallet_dir.path()); + +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); + +// let tokens = send( +// first_wallet, +// second_wallet_balance, +// second_wallet.address(), +// &client, +// true, +// ) +// .await?; +// info!("Verifying the transfer from first wallet..."); + +// client.verify_cashnote(&tokens).await?; +// second_wallet.deposit_and_store_to_disk(&vec![tokens])?; +// assert_eq!(second_wallet.balance(), second_wallet_balance); +// info!("CashNotes deposited to second wallet: {second_wallet_balance}."); + +// let first_wallet = get_wallet(&first_wallet_dir); +// assert!(second_wallet_balance.as_atto() == first_wallet.balance().as_atto()); + +// Ok(()) +// } diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 57e63f05b6..6e11295cbd 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -1,399 +1,404 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use crate::common::{client::get_client_and_funded_wallet, random_content}; -use assert_fs::TempDir; -use eyre::{eyre, Result}; -use libp2p::PeerId; -use rand::Rng; -use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_networking::{GetRecordError, NetworkError}; -use sn_protocol::{ - error::Error as ProtocolError, - storage::{ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; -use std::collections::BTreeMap; -use tokio::time::{sleep, Duration}; -use tracing::info; -use xor_name::XorName; - -#[tokio::test] -async fn storage_payment_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let balance_before = paying_wallet.balance(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - info!( - "Paying for {} random addresses...", - random_content_addrs.len() - ); - - let _cost = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - - info!("Verifying balance has been paid from the wallet..."); - - let paying_wallet = wallet_client.into_wallet(); - assert!( - paying_wallet.balance() < balance_before, - "balance should have decreased after payment" - ); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_fails_with_insufficient_money() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let (files_api, content_bytes, _random_content_addrs, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let subset_len = chunks.len() / 3; - let _storage_cost = wallet_client - .pay_for_storage( - chunks - .clone() - .into_iter() - .take(subset_len) - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), - ) - .await?; - - // now let's request to upload all addresses, even that we've already paid for a subset of them - let verify_store = false; - let res = files_api - .upload_test_bytes(content_bytes.clone(), verify_store) - .await; - assert!( - res.is_err(), - "Should have failed to store as we didnt pay for everything" - ); - Ok(()) -} - -// TODO: reenable -#[ignore = "Currently we do not cache the proofs in the wallet"] -#[tokio::test] -async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let wallet_original_balance = paying_wallet.balance().as_nano(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - - // let's first pay only for a subset of the addresses - let subset_len = random_content_addrs.len() / 3; - info!("Paying for {subset_len} random addresses...",); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) - .await?; - - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for the subset of addresses, 1 nano per addr - let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); - info!("Verifying new balance on paying wallet is {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs for the subset have been cached in the wallet - assert!(random_content_addrs - .iter() - .take(subset_len) - .all(|name| paying_wallet - .api() - .get_recent_payment(&name.as_xorname().unwrap()) - .is_ok())); - - // now let's request to pay for all addresses, even that we've already paid for a subset of them - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for addresses we haven't previously paid for, 1 nano per addr - let new_balance = NanoTokens::from( - wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), - ); - println!("Verifying new balance on paying wallet is now {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs now for all addresses have been cached in the wallet - // assert!(random_content_addrs - // .iter() - // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_chunk_upload_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, _content_bytes, file_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - info!("Paying for {} random addresses...", chunks.len()); - - let _cost = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; - - let mut files_download = FilesDownload::new(files_api); - let _ = files_download.download_file(file_addr, None).await?; - - Ok(()) -} - -#[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] -#[tokio::test] -async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, content_bytes, content_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut no_data_payments = BTreeMap::default(); - for (chunk_name, _) in chunks.iter() { - no_data_payments.insert( - *chunk_name, - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), - PeerId::random().to_bytes(), - ), - ); - } - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - sleep(Duration::from_secs(5)).await; - - files_api - .upload_test_bytes(content_bytes.clone(), false) - .await?; +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use crate::common::{client::get_client_and_funded_wallet, random_content}; +// use assert_fs::TempDir; +// use eyre::{eyre, Result}; +// use libp2p::PeerId; +// use rand::Rng; +// use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; +// use sn_evm::{Amount, AttoTokens, PaymentQuote}; +// use sn_logging::LogBuilder; +// use sn_networking::{GetRecordError, NetworkError}; +// use sn_protocol::{ +// error::Error as ProtocolError, +// storage::{ChunkAddress, RegisterAddress}, +// NetworkAddress, +// }; +// use sn_registers::Permissions; +// use std::collections::BTreeMap; +// use tokio::time::{sleep, Duration}; +// use tracing::info; +// use xor_name::XorName; + +// #[tokio::test] +// async fn storage_payment_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let balance_before = paying_wallet.balance(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); +// info!( +// "Paying for {} random addresses...", +// random_content_addrs.len() +// ); + +// let _cost = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; + +// info!("Verifying balance has been paid from the wallet..."); + +// let paying_wallet = wallet_client.into_wallet(); +// assert!( +// paying_wallet.balance() < balance_before, +// "balance should have decreased after payment" +// ); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_fails_with_insufficient_money() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let (files_api, content_bytes, _random_content_addrs, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let subset_len = chunks.len() / 3; +// let _storage_cost = wallet_client +// .pay_for_storage( +// chunks +// .clone() +// .into_iter() +// .take(subset_len) +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), +// ) +// .await?; + +// // now let's request to upload all addresses, even that we've already paid for a subset of them +// let verify_store = false; +// let res = files_api +// .upload_test_bytes(content_bytes.clone(), verify_store) +// .await; +// assert!( +// res.is_err(), +// "Should have failed to store as we didnt pay for everything" +// ); +// Ok(()) +// } + +// // TODO: reenable +// #[ignore = "Currently we do not cache the proofs in the wallet"] +// #[tokio::test] +// async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); + +// // let's first pay only for a subset of the addresses +// let subset_len = random_content_addrs.len() / 3; +// info!("Paying for {subset_len} random addresses...",); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) +// .await?; + +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for the subset of addresses, 1 nano per addr +// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// info!("Verifying new balance on paying wallet is {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm + +// // let's verify payment proofs for the subset have been cached in the wallet +// assert!(random_content_addrs +// .iter() +// .take(subset_len) +// .all(|name| paying_wallet +// .api() +// .get_recent_payment(&name.as_xorname().unwrap()) +// .is_ok())); + +// // now let's request to pay for all addresses, even that we've already paid for a subset of them +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for addresses we haven't previously paid for, 1 nano per addr +// let new_balance = AttoTokens::from_atto( +// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// ); +// println!("Verifying new balance on paying wallet is now {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // TODO adapt to evm +// // assert_eq!(paying_wallet.balance(), new_balance); + +// // let's verify payment proofs now for all addresses have been cached in the wallet +// // assert!(random_content_addrs +// // .iter() +// // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_chunk_upload_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, _content_bytes, file_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// info!("Paying for {} random addresses...", chunks.len()); + +// let _cost = wallet_client +// .pay_for_storage( +// chunks +// .iter() +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), +// ) +// .await?; + +// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); +// uploader.set_show_holders(true); +// uploader.insert_chunk_paths(chunks); +// let _upload_stats = uploader.start_upload().await?; + +// let mut files_download = FilesDownload::new(files_api); +// let _ = files_download.download_file(file_addr, None).await?; + +// Ok(()) +// } + +// #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] +// #[tokio::test] +// async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, content_bytes, content_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut no_data_payments = BTreeMap::default(); +// for (chunk_name, _) in chunks.iter() { +// no_data_payments.insert( +// *chunk_name, +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// PeerId::random().to_bytes(), +// ), +// ); +// } + +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; + +// sleep(Duration::from_secs(5)).await; + +// files_api +// .upload_test_bytes(content_bytes.clone(), false) +// .await?; - info!("Reading {content_addr:?} expected to fail"); - let mut files_download = FilesDownload::new(files_api); - assert!( - matches!( - files_download.download_file(content_addr, None).await, - Err(ClientError::Network(NetworkError::GetRecordError( - GetRecordError::RecordNotFound - ))) - ), - "read bytes should fail as we didn't store them" - ); +// info!("Reading {content_addr:?} expected to fail"); +// let mut files_download = FilesDownload::new(files_api); +// assert!( +// matches!( +// files_download.download_file(content_addr, None).await, +// Err(ClientError::Network(NetworkError::GetRecordError( +// GetRecordError::RecordNotFound +// ))) +// ), +// "read bytes should fail as we didn't store them" +// ); - Ok(()) -} +// Ok(()) +// } -#[tokio::test] -async fn storage_payment_register_creation_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// #[tokio::test] +// async fn storage_payment_register_creation_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - let paying_wallet_dir = TempDir::new()?; +// let paying_wallet_dir = TempDir::new()?; - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_addr = NetworkAddress::from_register_address(address); - info!("Paying for random Register address {net_addr:?} ..."); +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_addr = NetworkAddress::from_register_address(address); +// info!("Paying for random Register address {net_addr:?} ..."); - let _cost = wallet_client - .pay_for_storage(std::iter::once(net_addr)) - .await?; +// let _cost = wallet_client +// .pay_for_storage(std::iter::once(net_addr)) +// .await?; - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) - .await?; +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// .await?; - println!("Newly created register has {} ops", register.read().len()); +// println!("Newly created register has {} ops", register.read().len()); + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(register.read(), retrieved_reg.read()); + +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; + +// println!( +// "Register has {} ops after first write", +// register.read().len() +// ); + +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); + +// assert_eq!(retrieved_reg.read().len(), 1); + +// for index in 1..10 { +// println!("current index is {index}"); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); + +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops.len() +// ); +// println!("current local cached ops length is {}", register.ops.len()); + +// assert_eq!(retrieved_reg.read().len(), register.read().len()); - let retrieved_reg = client.get_register(address).await?; +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); - assert_eq!(register.read(), retrieved_reg.read()); +// println!("Current fetched register is {:?}", retrieved_reg.register); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.register.log_update_history() +// ); - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - - println!( - "Register has {} ops after first write", - register.read().len() - ); +// std::thread::sleep(std::time::Duration::from_millis(1000)); +// } - register.sync(&mut wallet_client, true, None).await?; +// Ok(()) +// } - let retrieved_reg = client.get_register(address).await?; +// #[tokio::test] +// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// let paying_wallet_dir = TempDir::new()?; - assert_eq!(retrieved_reg.read().len(), 1); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - for index in 1..10 { - println!("current index is {index}"); - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - register.sync(&mut wallet_client, true, None).await?; +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_address = +// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - let retrieved_reg = client.get_register(address).await?; +// let mut no_data_payments = BTreeMap::default(); +// no_data_payments.insert( +// net_address +// .as_xorname() +// .expect("RegisterAddress should convert to XorName"), +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// vec![], +// ), +// ); - println!( - "current retrieved register entry length is {}", - retrieved_reg.read().len() - ); - println!("current expected entry length is {}", register.read().len()); - - println!( - "current retrieved register ops length is {}", - retrieved_reg.ops.len() - ); - println!("current local cached ops length is {}", register.ops.len()); +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; - assert_eq!(retrieved_reg.read().len(), register.read().len()); +// // this should fail to store as the amount paid is not enough +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// .await?; - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// client.get_register(address).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); - println!("Current fetched register is {:?}", retrieved_reg.register); - println!( - "Fetched register has update history of {}", - retrieved_reg.register.log_update_history() - ); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// register.write(&random_entry)?; - std::thread::sleep(std::time::Duration::from_millis(1000)); - } +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// register.sync(&mut wallet_client, false, None).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); - Ok(()) -} - -#[tokio::test] -#[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_address = - NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - - let mut no_data_payments = BTreeMap::default(); - no_data_payments.insert( - net_address - .as_xorname() - .expect("RegisterAddress should convert to XorName"), - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(xor_name, NanoTokens::from(0)), - vec![], - ), - ); - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - // this should fail to store as the amount paid is not enough - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) - .await?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - client.get_register(address).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - register.write(&random_entry)?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - register.sync(&mut wallet_client, false, None).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - Ok(()) -} +// Ok(()) +// } diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 6dfd50bd04..78d5c5b9c4 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -52,13 +52,13 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } sn-releases = "0.2.6" sn_transfers = { path = "../sn_transfers", version = "0.19.1" } +sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } tracing = { version = "~0.1.26" } tonic = { version = "0.6.2" } uuid = { version = "1.5.0", features = ["v4"] } -which = "6.0.1" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dependencies] nix = { version = "0.27.1", features = ["fs", "user"] } diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index ed10be31cf..ab0ba5fd03 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -23,12 +23,12 @@ use libp2p::Multiaddr; use mockall::{mock, predicate::*, Sequence}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; +use sn_evm::AttoTokens; use sn_service_management::{auditor::AuditorServiceData, control::ServiceControl}; use sn_service_management::{error::Result as ServiceControlResult, NatDetectionStatus}; use sn_service_management::{ DaemonServiceData, FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -234,7 +234,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n pid: None, peer_id: None, owner: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), status: ServiceStatus::Added, safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -896,7 +896,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1494,7 +1494,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1587,7 +1587,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2156,7 +2156,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2250,7 +2250,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2550,7 +2550,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2644,7 +2644,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index b827e3f6a4..e1cf5faf6c 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -6,9 +6,13 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +mod subcommands; + +use crate::subcommands::evm_network::EvmNetworkCommand; use clap::{Parser, Subcommand}; use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; +use sn_evm::RewardsAddress; use sn_logging::{LogBuilder, LogFormat}; use sn_node_manager::{ add_services::config::PortRange, @@ -870,6 +874,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -987,6 +997,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -1203,6 +1219,8 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { cmd::local::join( @@ -1221,6 +1239,8 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network.map(|v| v.into()), true, verbosity, ) @@ -1243,6 +1263,8 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { cmd::local::run( @@ -1261,6 +1283,8 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network.map(|v| v.into()), true, verbosity, ) diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs new file mode 100644 index 0000000000..89c39a16f6 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -0,0 +1,41 @@ +use clap::Subcommand; +use sn_evm::{EvmNetwork, EvmNetworkCustom}; + +#[derive(Subcommand, Clone, Debug)] +pub enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + chunk_payments_address: String, + }, +} + +#[allow(clippy::from_over_into)] +impl Into for EvmNetworkCommand { + fn into(self) -> EvmNetwork { + match self { + Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmCustom { + rpc_url, + payment_token_address, + chunk_payments_address, + } => EvmNetwork::Custom(EvmNetworkCustom::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )), + } + } +} diff --git a/sn_node_manager/src/bin/cli/subcommands/mod.rs b/sn_node_manager/src/bin/cli/subcommands/mod.rs new file mode 100644 index 0000000000..80b95f1ea5 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/mod.rs @@ -0,0 +1 @@ +pub mod evm_network; diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 5be4ef15b6..699495d291 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -15,6 +15,7 @@ use crate::{ print_banner, status_report, VerbosityLevel, }; use color_eyre::{eyre::eyre, Help, Report, Result}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -39,6 +40,8 @@ pub async fn join( owner_prefix: Option, peers_args: PeersArgs, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -107,6 +110,8 @@ pub async fn join( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; Ok(()) @@ -145,6 +150,8 @@ pub async fn run( owner: Option, owner_prefix: Option, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -219,6 +226,8 @@ pub async fn run( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 36a452819a..5ee8d4c5d7 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -41,6 +41,7 @@ impl From for VerbosityLevel { use crate::error::{Error, Result}; use colored::Colorize; use semver::Version; +use sn_evm::AttoTokens; use sn_service_management::rpc::RpcActions; use sn_service_management::{ control::ServiceControl, error::Error as ServiceError, rpc::RpcClient, NodeRegistry, @@ -555,7 +556,7 @@ pub async fn refresh_node_registry( // exists. match HotWallet::try_load_from(&node.data_dir_path) { Ok(wallet) => { - node.reward_balance = Some(wallet.balance()); + node.reward_balance = Some(AttoTokens::from_u64(wallet.balance().as_nano())); trace!( "Wallet balance for node {}: {}", node.service_name, @@ -672,6 +673,7 @@ mod tests { use mockall::{mock, predicate::*}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; + use sn_evm::AttoTokens; use sn_logging::LogFormat; use sn_service_management::{ error::{Error as ServiceControlError, Result as ServiceControlResult}, @@ -679,7 +681,6 @@ mod tests { rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, UpgradeOptions, UpgradeResult, }; - use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -780,7 +781,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -884,7 +885,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -951,7 +952,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1061,7 +1062,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1139,7 +1140,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1229,7 +1230,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1318,7 +1319,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1379,7 +1380,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1426,7 +1427,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1475,7 +1476,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1523,7 +1524,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1588,7 +1589,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1714,7 +1715,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1802,7 +1803,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1935,7 +1936,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2080,7 +2081,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2220,7 +2221,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2361,7 +2362,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2532,7 +2533,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2686,7 +2687,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2843,7 +2844,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2997,7 +2998,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3154,7 +3155,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3308,7 +3309,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3465,7 +3466,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3622,7 +3623,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3779,7 +3780,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3935,7 +3936,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4005,7 +4006,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, @@ -4064,7 +4065,7 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4137,7 +4138,7 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4200,7 +4201,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4263,7 +4264,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 58d650cf67..863cc748d9 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; @@ -18,13 +18,13 @@ use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; #[cfg(test)] use mockall::automock; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, - FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, + NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, @@ -37,6 +37,7 @@ use sysinfo::{Pid, System}; pub trait Launcher { fn get_safenode_path(&self) -> PathBuf; fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result; + #[allow(clippy::too_many_arguments)] fn launch_node( &self, bootstrap_peers: Vec, @@ -45,6 +46,8 @@ pub trait Launcher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()>; fn wait(&self, delay: u64); } @@ -90,6 +93,8 @@ impl Launcher for LocalSafeLauncher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()> { let mut args = Vec::new(); @@ -126,6 +131,22 @@ impl Launcher for LocalSafeLauncher { args.push("--rpc".to_string()); args.push(rpc_socket_addr.to_string()); + args.push("--rewards-address".to_string()); + args.push(rewards_address.to_string()); + + if let Some(network) = evm_network { + args.push(format!("evm-{}", network.identifier())); + + if let EvmNetwork::Custom(custom) = network { + args.push("--rpc-url".to_string()); + args.push(custom.rpc_url_http.to_string()); + args.push("--payment-token-address".to_string()); + args.push(custom.payment_token_address.to_string()); + args.push("--chunk-payments-address".to_string()); + args.push(custom.chunk_payments_address.to_string()); + } + } + Command::new(self.safenode_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -197,13 +218,21 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res if !keep_directories { // At this point we don't allow path overrides, so deleting the data directory will clear // the log directory also. - std::fs::remove_dir_all(&node.data_dir_path)?; - debug!("Removed node data directory: {:?}", node.data_dir_path); - println!( - " {} Removed {}", - "✓".green(), - node.data_dir_path.to_string_lossy() - ); + if let Err(e) = std::fs::remove_dir_all(&node.data_dir_path) { + error!("Failed to remove node data directory: {:?}", e); + println!( + " {} Failed to remove {}: {e}", + "✗".red(), + node.data_dir_path.to_string_lossy() + ); + } else { + debug!("Removed node data directory: {:?}", node.data_dir_path); + println!( + " {} Removed {}", + "✓".green(), + node.data_dir_path.to_string_lossy() + ); + } } } @@ -225,6 +254,8 @@ pub struct LocalNetworkOptions { pub safenode_bin_path: PathBuf, pub skip_validation: bool, pub log_format: Option, + pub rewards_address: RewardsAddress, + pub evm_network: Option, } pub async fn run_network( @@ -301,6 +332,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -348,6 +381,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -374,22 +409,23 @@ pub async fn run_network( validate_network(node_registry, bootstrap_peers.clone()).await?; } - if !options.join { - println!("Launching the faucet server..."); - let version = get_bin_version(&options.faucet_bin_path)?; - let pid = launcher.launch_faucet(&bootstrap_peers[0])?; - let faucet = FaucetServiceData { - faucet_path: options.faucet_bin_path, - local: true, - log_dir_path: get_faucet_data_dir(), - pid: Some(pid), - service_name: "faucet".to_string(), - status: ServiceStatus::Running, - user: get_username()?, - version, - }; - node_registry.faucet = Some(faucet); - } + // TODO: re-enable faucet when it can do EVM payments or when we switch back to native payments + // if !options.join { + // println!("Launching the faucet server..."); + // let pid = launcher.launch_faucet(&bootstrap_peers[0])?; + // let version = get_bin_version(&options.faucet_bin_path)?; + // let faucet = FaucetServiceData { + // faucet_path: options.faucet_bin_path, + // local: true, + // log_dir_path: get_faucet_data_dir(), + // pid: Some(pid), + // service_name: "faucet".to_string(), + // status: ServiceStatus::Running, + // user: get_username()?, + // version, + // }; + // node_registry.faucet = Some(faucet); + // } Ok(()) } @@ -404,6 +440,8 @@ pub struct RunNodeOptions { pub number: u16, pub owner: Option, pub rpc_socket_addr: SocketAddr, + pub rewards_address: RewardsAddress, + pub evm_network: Option, pub version: String, } @@ -421,6 +459,8 @@ pub async fn run_node( run_options.node_port, run_options.owner.clone(), run_options.rpc_socket_addr, + run_options.rewards_address, + run_options.evm_network, )?; launcher.wait(run_options.interval); @@ -532,6 +572,7 @@ mod tests { use libp2p_identity::PeerId; use mockall::mock; use mockall::predicate::*; + use sn_evm::utils::dummy_address; use sn_service_management::{ error::Result as RpcResult, rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, @@ -557,6 +598,7 @@ mod tests { async fn run_node_should_launch_the_genesis_node() -> Result<()> { let mut mock_launcher = MockLauncher::new(); let mut mock_rpc_client = MockRpcClient::new(); + let rewards_address = dummy_address(); let peer_id = PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?; let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 13000); @@ -569,9 +611,11 @@ mod tests { eq(None), eq(None), eq(rpc_socket_addr), + eq(rewards_address), + eq(None), ) .times(1) - .returning(|_, _, _, _, _, _| Ok(())); + .returning(|_, _, _, _, _, _, _, _| Ok(())); mock_launcher .expect_wait() .with(eq(100)) @@ -617,6 +661,8 @@ mod tests { number: 1, owner: None, rpc_socket_addr, + rewards_address, + evm_network: None, version: "0.100.12".to_string(), }, &mock_launcher, diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 178bf6671c..2cd4de4c17 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -31,6 +31,7 @@ sha2 = "0.10.7" sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_registers = { path = "../sn_registers", version = "0.3.19" } +sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/src/messages.rs b/sn_protocol/src/messages.rs index 1cdab98f2e..cbef76ab90 100644 --- a/sn_protocol/src/messages.rs +++ b/sn_protocol/src/messages.rs @@ -16,7 +16,7 @@ mod response; pub use self::{ chunk_proof::{ChunkProof, Nonce}, - cmd::{Cmd, Hash}, + cmd::Cmd, node_id::NodeId, query::Query, register::RegisterCmd, diff --git a/sn_protocol/src/messages/cmd.rs b/sn_protocol/src/messages/cmd.rs index 094d93cae4..a9618ba3f8 100644 --- a/sn_protocol/src/messages/cmd.rs +++ b/sn_protocol/src/messages/cmd.rs @@ -9,8 +9,7 @@ use crate::{storage::RecordType, NetworkAddress}; use serde::{Deserialize, Serialize}; -// TODO: remove this dependency and define these types herein. -pub use sn_transfers::{Hash, PaymentQuote}; +pub use sn_evm::PaymentQuote; /// Data and CashNote cmds - recording spends or creating, updating, and removing data. /// diff --git a/sn_protocol/src/messages/response.rs b/sn_protocol/src/messages/response.rs index 28fb8035f3..17c986f581 100644 --- a/sn_protocol/src/messages/response.rs +++ b/sn_protocol/src/messages/response.rs @@ -12,7 +12,7 @@ use super::ChunkProof; use bytes::Bytes; use core::fmt; use serde::{Deserialize, Serialize}; -use sn_transfers::{MainPubkey, PaymentQuote}; +use sn_evm::{PaymentQuote, RewardsAddress}; use std::fmt::Debug; /// The response to a query, containing the query result. @@ -26,8 +26,8 @@ pub enum QueryResponse { GetStoreCost { /// The store cost quote for storing the next record. quote: Result, - /// The cash_note MainPubkey to pay this node's store cost to. - payment_address: MainPubkey, + /// The rewards address to pay this node's store cost to. + payment_address: RewardsAddress, /// Node's Peer Address peer_address: NetworkAddress, }, diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index ee88185752..04921730ef 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use lazy_static::lazy_static; -use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK, PAYMENT_FORWARD_PK}; +use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK}; lazy_static! { /// The node version used during Identify Behaviour. @@ -65,7 +65,5 @@ fn get_key_version_str() -> String { let _ = g_k_str.split_off(6); let mut n_k_str = NETWORK_ROYALTIES_PK.to_hex(); let _ = n_k_str.split_off(6); - let mut p_k_str = PAYMENT_FORWARD_PK.to_hex(); - let _ = p_k_str.split_off(6); - format!("{f_k_str}_{g_k_str}_{n_k_str}_{p_k_str}") + format!("{f_k_str}_{g_k_str}_{n_k_str}") } diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 46c6d80d26..d5a9119a46 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -23,7 +23,7 @@ sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.1" } +sn_evm = { path = "../sn_evm", version = "0.1.0" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index ffd6af0742..2cc7060d33 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -11,9 +11,9 @@ use async_trait::async_trait; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use service_manager::{ServiceInstallCtx, ServiceLabel}; +use sn_evm::AttoTokens; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{Ipv4Addr, SocketAddr}, @@ -282,7 +282,7 @@ pub struct NodeServiceData { )] pub peer_id: Option, pub pid: Option, - pub reward_balance: Option, + pub reward_balance: Option, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, pub service_name: String, From 0ac232fdbce417f5e50b2b039ed8f34f217936d9 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 26 Sep 2024 09:24:00 +0200 Subject: [PATCH 051/255] chore: ignore DevSkim warnings for hex types --- evmlib/src/event.rs | 2 +- evmlib/src/transaction.rs | 12 ++++++------ evmlib/src/wallet.rs | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs index deca415998..9327eb98cd 100644 --- a/evmlib/src/event.rs +++ b/evmlib/src/event.rs @@ -4,7 +4,7 @@ use alloy::rpc::types::Log; // Should be updated when the smart contract changes! pub(crate) const CHUNK_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = - b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); + b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); // DevSkim: ignore DS173237 #[derive(thiserror::Error, Debug)] pub enum Error { diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index b83d30f750..87798ba1d3 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -148,7 +148,7 @@ mod tests { async fn test_get_transaction_receipt_by_hash() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 assert!(get_transaction_receipt_by_hash(&network, tx_hash) .await @@ -161,9 +161,9 @@ mod tests { let network = Network::ArbitrumOne; let block_number: u64 = 250043261; - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 let amount = U256::from(200); - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 let logs = get_chunk_payment_event(&network, block_number, quote_hash, reward_address, amount) @@ -177,9 +177,9 @@ mod tests { async fn test_verify_chunk_payment() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); + let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 + let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 + let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 let amount = U256::from(200); let result = verify_chunk_payment( diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 7f01497fb5..69eb0d55b9 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -278,7 +278,7 @@ mod tests { #[tokio::test] async fn test_from_private_key() { - let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; + let private_key = "bf210844fa5463e373974f3d6fbedf451350c3e72b81b3c5b1718cb91f49c33d"; // DevSkim: ignore DS117838 let wallet = from_private_key(private_key).unwrap(); let account = >::default_signer_address(&wallet); From d8153bb1b3c77b27d2b2e72af252a6d62c3d7139 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 24 Sep 2024 14:42:32 +0200 Subject: [PATCH 052/255] feat(launchpad): new stats with layout and interaction --- Cargo.lock | 11 + node-launchpad/Cargo.toml | 1 + node-launchpad/src/components/status.rs | 302 ++++++++++++++++++------ node-launchpad/src/node_stats.rs | 136 ++++++++++- 4 files changed, 367 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5abdf34a9c..710bef0512 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4770,6 +4770,7 @@ dependencies = [ "strum", "sysinfo", "tempfile", + "throbber-widgets-tui", "tokio", "tokio-util 0.7.12", "tracing", @@ -7866,6 +7867,16 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "throbber-widgets-tui" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad9e055cadd9da8b4a67662b962e3e67e96af491ae9cec7e88aaff92e7c3666" +dependencies = [ + "rand 0.8.5", + "ratatui", +] + [[package]] name = "time" version = "0.3.36" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index f1b006bd67..5269250ce7 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -68,6 +68,7 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter", "serde"] } tui-input = "0.8.0" which = "6.0.1" faccess = "0.2.4" +throbber-widgets-tui = "0.7.0" [build-dependencies] vergen = { version = "8.2.6", features = ["build", "git", "gitoxide", "cargo"] } diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index d06e777953..cf56567079 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -19,6 +19,7 @@ use crate::connection_mode::ConnectionMode; use crate::error::ErrorPopup; use crate::node_mgmt::MaintainNodesArgs; use crate::node_mgmt::{PORT_MAX, PORT_MIN}; +use crate::style::{COOL_GREY, INDIGO}; use crate::tui::Event; use crate::{ action::{Action, StatusActions}, @@ -39,6 +40,7 @@ use sn_peers_acquisition::PeersArgs; use sn_service_management::{ control::ServiceController, NodeRegistry, NodeServiceData, ServiceStatus, }; +use std::collections::HashMap; use std::{ path::PathBuf, time::{Duration, Instant}, @@ -48,6 +50,8 @@ use tokio::sync::mpsc::UnboundedSender; use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; +use throbber_widgets_tui::{self, ThrobberState}; + const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; @@ -60,6 +64,7 @@ pub struct Status { config: Config, // state node_services: Vec, + node_services_throttle_state: HashMap, is_nat_status_determined: bool, error_while_running_nat_detection: usize, node_stats: NodeStats, @@ -111,6 +116,7 @@ impl Status { config: Default::default(), active: true, node_services: Default::default(), + node_services_throttle_state: HashMap::new(), is_nat_status_determined: false, error_while_running_nat_detection: 0, node_stats: NodeStats::default(), @@ -256,6 +262,9 @@ impl Component for Status { match action { Action::Tick => { self.try_update_node_stats(false)?; + for (_spinner_key, spinner_state) in self.node_services_throttle_state.iter_mut() { + spinner_state.calc_next(); // Assuming calc_next() is a method of ThrobberState + } } Action::SwitchScene(scene) => match scene { Scene::Status | Scene::StatusBetaProgrammePopUp => { @@ -528,13 +537,13 @@ impl Component for Status { Cell::new("Storage Allocated".to_string()).fg(GHOST_WHITE), Cell::new(format!("{} GB", self.nodes_to_start * GB_PER_NODE)).fg(GHOST_WHITE), ]); - let memory_use_val = if self.node_stats.memory_usage_mb as f64 / 1024_f64 > 1.0 { + let memory_use_val = if self.node_stats.total_memory_usage_mb as f64 / 1024_f64 > 1.0 { format!( "{:.2} GB", - self.node_stats.memory_usage_mb as f64 / 1024_f64 + self.node_stats.total_memory_usage_mb as f64 / 1024_f64 ) } else { - format!("{} MB", self.node_stats.memory_usage_mb) + format!("{} MB", self.node_stats.total_memory_usage_mb) }; let memory_use_row = Row::new(vec![ @@ -586,7 +595,7 @@ impl Component for Status { let total_nanos_earned_and_discord_row = Row::new(vec![ Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), - Cell::new(self.node_stats.forwarded_rewards.to_string()) + Cell::new(self.node_stats.total_forwarded_rewards.to_string()) .fg(VIVID_SKY_BLUE) .bold(), Cell::new( @@ -625,24 +634,98 @@ impl Component for Status { // ==== Node Status ===== + // Widths + const NODE_WIDTH: usize = 10; + const VERSION_WIDTH: usize = 7; + const NANOS_WIDTH: usize = 5; + const MEMORY_WIDTH: usize = 7; + const MBPS_WIDTH: usize = 15; + const RECORDS_WIDTH: usize = 4; + const PEERS_WIDTH: usize = 5; + const CONNS_WIDTH: usize = 5; + const STATUS_WIDTH: usize = 8; + const SPINNER_WIDTH: usize = 1; + let node_rows: Vec<_> = self .node_services .iter() .filter_map(|n| { - let peer_id = n.peer_id; if n.status == ServiceStatus::Removed { return None; } - let peer_id = peer_id.map(|p| p.to_string()).unwrap_or("-".to_string()); - let status = format!("{:?}", n.status); - let version = format!("v{}", n.version); - let row = vec![n.service_name.clone(), peer_id, version, status]; + let mut status = format!("{:?}", n.status); + if let Some(LockRegistryState::StartingNodes) = self.lock_registry { + status = "Starting".to_string(); + } + let connected_peers = match n.connected_peers { + Some(ref peers) => format!("{:?}", peers.len()), + None => "0".to_string(), + }; + + let mut nanos = "-".to_string(); + let mut memory = "-".to_string(); + let mut mbps = " -".to_string(); + let mut records = "-".to_string(); + let mut connections = "-".to_string(); + + let individual_stats = self + .node_stats + .individual_stats + .iter() + .find(|s| s.service_name == n.service_name); + if let Some(stats) = individual_stats { + nanos = stats.forwarded_rewards.to_string(); + memory = stats.memory_usage_mb.to_string(); + mbps = format!( + "↓{:06.2} ↑{:06.2}", + stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), + stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) + ); + records = stats.max_records.to_string(); + connections = stats.connections.to_string(); + } + + // Create a row vector + let row = vec![ + n.service_name.clone().to_string(), + n.version.to_string(), + format!( + "{}{}", + " ".repeat(NANOS_WIDTH.saturating_sub(nanos.len())), + nanos.to_string() + ), + format!( + "{}{} MB", + " ".repeat(MEMORY_WIDTH.saturating_sub(memory.len() + 4)), + memory.to_string() + ), + mbps.to_string(), + format!( + "{}{}", + " ".repeat(RECORDS_WIDTH.saturating_sub(records.len())), + records.to_string() + ), + format!( + "{}{}", + " ".repeat(PEERS_WIDTH.saturating_sub(connected_peers.len())), + connected_peers.to_string() + ), + format!( + "{}{}", + " ".repeat(CONNS_WIDTH.saturating_sub(connections.len())), + connections.to_string() + ), + status.to_string(), + ]; + + // Create a styled row let row_style = if n.status == ServiceStatus::Running { Style::default().fg(EUCALYPTUS) } else { Style::default().fg(GHOST_WHITE) }; + Some(Row::new(row).style(row_style)) }) .collect(); @@ -681,31 +764,112 @@ impl Component for Status { layout[2], ); } else { + // Node/s block + let block_nodes = Block::default() + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + format!(" ({}) ", self.nodes_to_start), + Style::default().fg(LIGHT_PERIWINKLE), + ), + ])) + .padding(Padding::new(1, 1, 0, 0)) + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(EUCALYPTUS)); + + // Create a layout to arrange the header and table vertically + let inner_layout = Layout::new( + Direction::Vertical, + vec![Constraint::Length(1), Constraint::Min(0)], + ); + + // Split the inner area of the combined block + let inner_area = block_nodes.inner(layout[2]); + let inner_chunks = inner_layout.split(inner_area); + + // Column Widths let node_widths = [ - Constraint::Length(11), - Constraint::Fill(1), - Constraint::Length(9), - Constraint::Length(8), + Constraint::Min(NODE_WIDTH as u16), + Constraint::Min(VERSION_WIDTH as u16), + Constraint::Min(NANOS_WIDTH as u16), + Constraint::Min(MEMORY_WIDTH as u16), + Constraint::Min(MBPS_WIDTH as u16), + Constraint::Min(RECORDS_WIDTH as u16), + Constraint::Min(PEERS_WIDTH as u16), + Constraint::Min(CONNS_WIDTH as u16), + Constraint::Min(STATUS_WIDTH as u16), + Constraint::Max(SPINNER_WIDTH as u16), ]; + + // Header + let header_row = Row::new(vec![ + Cell::new("Node").fg(COOL_GREY), + Cell::new("Version").fg(COOL_GREY), + Cell::new("Nanos").fg(COOL_GREY), + Cell::new("Memory").fg(COOL_GREY), + Cell::new( + format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps").fg(COOL_GREY), + ), + Cell::new("Recs").fg(COOL_GREY), + Cell::new("Peers").fg(COOL_GREY), + Cell::new("Conns").fg(COOL_GREY), + Cell::new("Status").fg(COOL_GREY), + Cell::new(" ").fg(COOL_GREY), // Spinner + ]); + + let header = Table::new(vec![header_row.clone()], node_widths) + .style(Style::default().add_modifier(Modifier::BOLD)); + + // Table items let table = Table::new(node_rows.clone(), node_widths) - .column_spacing(2) - .highlight_style(Style::new().reversed()) - .block( - Block::default() - .title(Line::from(vec![ - Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), - Span::styled( - format!(" ({}) ", self.nodes_to_start), - Style::default().fg(LIGHT_PERIWINKLE), - ), - ])) - .padding(Padding::new(2, 2, 1, 1)) - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .border_style(Style::default().fg(EUCALYPTUS)), - ) - .highlight_symbol("*"); - f.render_stateful_widget(table, layout[2], &mut self.node_table_state); + .column_spacing(1) + .highlight_style(Style::default().bg(INDIGO)) + .highlight_spacing(HighlightSpacing::Always); + + f.render_stateful_widget(header, inner_chunks[0], &mut self.node_table_state); + f.render_stateful_widget(table, inner_chunks[1], &mut self.node_table_state); + + // Render the throbber in the last column for running nodes + for (i, node) in self.node_services.iter().enumerate() { + let mut throbber = throbber_widgets_tui::Throbber::default() + .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE); + match node.status { + ServiceStatus::Running => { + throbber = throbber + .throbber_style( + Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), + ) + .use_type(throbber_widgets_tui::WhichUse::Spin); + } + ServiceStatus::Stopped => { + throbber = throbber + .throbber_style( + Style::default() + .fg(GHOST_WHITE) + .add_modifier(Modifier::BOLD), + ) + .use_type(throbber_widgets_tui::WhichUse::Full); + } + _ => {} + } + if let Some(LockRegistryState::StartingNodes) = self.lock_registry { + throbber = throbber + .throbber_style( + Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::BOX_DRAWING) + .use_type(throbber_widgets_tui::WhichUse::Spin); + } + let throbber_area = + Rect::new(inner_chunks[1].width, inner_chunks[1].y + i as u16, 1, 1); + let throttle_state = self + .node_services_throttle_state + .entry(node.service_name.clone()) + .or_default(); + f.render_stateful_widget(throbber, throbber_area, throttle_state); + } + f.render_widget(block_nodes, layout[2]); } // ==== Footer ===== @@ -735,19 +899,6 @@ impl Component for Status { // Status Popup if let Some(registry_state) = &self.lock_registry { - let popup_area = centered_rect_fixed(50, 12, area); - clear_area(f, popup_area); - - let popup_border = Paragraph::new("").block( - Block::default() - .borders(Borders::ALL) - .title(" Manage Nodes ") - .bold() - .title_style(Style::new().fg(VIVID_SKY_BLUE)) - .padding(Padding::uniform(2)) - .border_style(Style::new().fg(GHOST_WHITE)), - ); - let popup_text = match registry_state { LockRegistryState::StartingNodes => { if self.should_we_run_nat_detection() { @@ -759,12 +910,8 @@ impl Component for Status { Line::raw("This may take a couple minutes."), ] } else { - vec![ - Line::raw(""), - Line::raw(""), - Line::raw(""), - Line::raw("Starting nodes..."), - ] + // We avoid rendering the popup as we have status lines now + return Ok(()); } } LockRegistryState::StoppingNodes => { @@ -784,26 +931,41 @@ impl Component for Status { ] } }; - let centred_area = Layout::new( - Direction::Vertical, - vec![ - // border - Constraint::Length(2), - // our text goes here - Constraint::Min(5), - // border - Constraint::Length(1), - ], - ) - .split(popup_area); - let text = Paragraph::new(popup_text) - .block(Block::default().padding(Padding::horizontal(2))) - .wrap(Wrap { trim: false }) - .alignment(Alignment::Center) - .fg(EUCALYPTUS); - f.render_widget(text, centred_area[1]); - - f.render_widget(popup_border, popup_area); + if !popup_text.is_empty() { + let popup_area = centered_rect_fixed(50, 12, area); + clear_area(f, popup_area); + + let popup_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Manage Nodes ") + .bold() + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(GHOST_WHITE)), + ); + + let centred_area = Layout::new( + Direction::Vertical, + vec![ + // border + Constraint::Length(2), + // our text goes here + Constraint::Min(5), + // border + Constraint::Length(1), + ], + ) + .split(popup_area); + let text = Paragraph::new(popup_text) + .block(Block::default().padding(Padding::horizontal(2))) + .wrap(Wrap { trim: false }) + .alignment(Alignment::Center) + .fg(EUCALYPTUS); + f.render_widget(text, centred_area[1]); + + f.render_widget(popup_border, popup_area); + } } Ok(()) diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index c43d868eef..0cbebcc815 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -16,17 +16,49 @@ use tokio::sync::mpsc::UnboundedSender; use crate::action::{Action, StatusActions}; #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct NodeStats { +pub struct IndividualNodeStats { + pub service_name: String, pub forwarded_rewards: u64, pub memory_usage_mb: usize, + pub bandwidth_inbound: usize, + pub bandwidth_outbound: usize, + pub max_records: usize, + pub peers: usize, + pub connections: usize, +} + +#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct NodeStats { + pub total_forwarded_rewards: u64, + pub total_memory_usage_mb: usize, + pub individual_stats: Vec, } impl NodeStats { - fn merge(&mut self, other: &NodeStats) { - self.forwarded_rewards += other.forwarded_rewards; - self.memory_usage_mb += other.memory_usage_mb; + fn merge(&mut self, other: &IndividualNodeStats) { + self.total_forwarded_rewards += other.forwarded_rewards; + self.total_memory_usage_mb += other.memory_usage_mb; + self.individual_stats.push(other.clone()); // Store individual stats } + /// Fetches statistics from all running nodes and sends the aggregated stats via the action sender. + /// + /// This method iterates over the provided list of `NodeServiceData` instances, filters out nodes that are not running, + /// and for each running node, it checks if a metrics port is available. If a metrics port is found, the node's details + /// (service name, metrics port, and data directory path) are collected. If no metrics port is found, a debug message + /// is logged indicating that the node's stats will not be fetched. + /// + /// If there are any nodes with available metrics ports, this method spawns a local task to asynchronously fetch + /// statistics from these nodes using `fetch_all_node_stats_inner`. The aggregated statistics are then sent via the + /// provided `action_sender`. + /// + /// If no running nodes with metrics ports are found, a debug message is logged indicating that there are no running nodes + /// to fetch stats from. + /// + /// # Parameters + /// + /// * `nodes`: A slice of `NodeServiceData` instances representing the nodes to fetch statistics from. + /// * `action_sender`: An unbounded sender of `Action` instances used to send the aggregated node statistics. pub fn fetch_all_node_stats(nodes: &[NodeServiceData], action_sender: UnboundedSender) { let node_details = nodes .iter() @@ -60,6 +92,24 @@ impl NodeStats { } } + /// This method is an inner function used to fetch statistics from all nodes. + /// It takes a vector of node details (service name, metrics port, and data directory path) and an unbounded sender of `Action` instances. + /// The method iterates over the provided list of `NodeServiceData` instances, filters out nodes that are not running, + /// and for each running node, it checks if a metrics port is available. If a metrics port is found, the node's details + /// (service name, metrics port, and data directory path) are collected. If no metrics port is found, a debug message + /// is logged indicating that the node's stats will not be fetched. + /// + /// If there are any nodes with available metrics ports, this method spawns a local task to asynchronously fetch + /// statistics from these nodes using `fetch_all_node_stats_inner`. The aggregated statistics are then sent via the + /// provided `action_sender`. + /// + /// If no running nodes with metrics ports are found, a debug message is logged indicating that there are no running nodes + /// to fetch stats from. + /// + /// # Parameters + /// + /// * `node_details`: A vector of tuples, each containing the service name, metrics port, and data directory path of a node. + /// * `action_sender`: An unbounded sender of `Action` instances used to send the aggregated node statistics. async fn fetch_all_node_stats_inner( node_details: Vec<(String, u16, PathBuf)>, action_sender: UnboundedSender, @@ -78,7 +128,17 @@ impl NodeStats { while let Some((result, service_name)) = stream.next().await { match result { Ok(stats) => { - all_node_stats.merge(&stats); + let individual_stats = IndividualNodeStats { + service_name: service_name.clone(), + forwarded_rewards: stats.forwarded_rewards, + memory_usage_mb: stats.memory_usage_mb, + bandwidth_inbound: stats.bandwidth_inbound, + bandwidth_outbound: stats.bandwidth_outbound, + max_records: stats.max_records, + peers: stats.peers, + connections: stats.connections, + }; + all_node_stats.merge(&individual_stats); } Err(err) => { error!("Error while fetching stats from {service_name:?}: {err:?}"); @@ -93,7 +153,10 @@ impl NodeStats { } } - async fn fetch_stat_per_node(metrics_port: u16, _data_dir: PathBuf) -> Result { + async fn fetch_stat_per_node( + metrics_port: u16, + _data_dir: PathBuf, + ) -> Result { let now = Instant::now(); let body = reqwest::get(&format!("http://localhost:{metrics_port}/metrics")) @@ -103,12 +166,22 @@ impl NodeStats { let lines: Vec<_> = body.lines().map(|s| Ok(s.to_owned())).collect(); let all_metrics = prometheus_parse::Scrape::parse(lines.into_iter())?; - let mut stats = NodeStats { - memory_usage_mb: 0, - forwarded_rewards: 0, - }; + let mut stats = IndividualNodeStats::default(); + for sample in all_metrics.samples.iter() { - if sample.metric == "sn_networking_process_memory_used_mb" { + debug!(sample.metric); + if sample.metric == "sn_node_total_forwarded_rewards" { + // Nanos + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.forwarded_rewards = val as u64; + } + _ => {} + } + } else if sample.metric == "sn_networking_process_memory_used_mb" { + // Memory match sample.value { prometheus_parse::Value::Counter(val) | prometheus_parse::Value::Gauge(val) @@ -117,12 +190,49 @@ impl NodeStats { } _ => {} } - } else if sample.metric == "sn_node_total_forwarded_rewards" { + } else if sample.metric == "libp2p_bandwidth_bytes_total" { + // Mbps match sample.value { prometheus_parse::Value::Counter(val) | prometheus_parse::Value::Gauge(val) | prometheus_parse::Value::Untyped(val) => { - stats.forwarded_rewards = val as u64; + if let Some(direction) = sample.labels.get("direction") { + if direction == "Inbound" { + stats.bandwidth_inbound += val as usize; + } else if direction == "Outbound" { + stats.bandwidth_outbound += val as usize; + } + } + } + _ => {} + } + } else if sample.metric == "sn_networking_records_stored" { + // Records + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.max_records = val as usize; + } + _ => {} + } + } else if sample.metric == "sn_networking_peers_in_routing_table" { + // Peers + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.peers = val as usize; + } + _ => {} + } + } else if sample.metric == "sn_networking_open_connections" { + // Connections + match sample.value { + prometheus_parse::Value::Counter(val) + | prometheus_parse::Value::Gauge(val) + | prometheus_parse::Value::Untyped(val) => { + stats.connections = val as usize; } _ => {} } From ee6f8f40b4937abb1bf7389dd7c8567a0df06251 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 25 Sep 2024 16:10:07 +0200 Subject: [PATCH 053/255] feat(launchpad): mbps calculation and display --- node-launchpad/src/components/status.rs | 6 +++--- node-launchpad/src/node_stats.rs | 28 ++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index cf56567079..b9f80449e7 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -52,7 +52,7 @@ use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes} use throbber_widgets_tui::{self, ThrobberState}; -const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); +pub const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; @@ -639,7 +639,7 @@ impl Component for Status { const VERSION_WIDTH: usize = 7; const NANOS_WIDTH: usize = 5; const MEMORY_WIDTH: usize = 7; - const MBPS_WIDTH: usize = 15; + const MBPS_WIDTH: usize = 13; const RECORDS_WIDTH: usize = 4; const PEERS_WIDTH: usize = 5; const CONNS_WIDTH: usize = 5; @@ -678,7 +678,7 @@ impl Component for Status { nanos = stats.forwarded_rewards.to_string(); memory = stats.memory_usage_mb.to_string(); mbps = format!( - "↓{:06.2} ↑{:06.2}", + "↓{:05.2} ↑{:05.2}", stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) ); diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index 0cbebcc815..788ccfb94b 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -13,6 +13,8 @@ use sn_service_management::{NodeServiceData, ServiceStatus}; use std::{path::PathBuf, time::Instant}; use tokio::sync::mpsc::UnboundedSender; +use super::components::status::NODE_STAT_UPDATE_INTERVAL; + use crate::action::{Action, StatusActions}; #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -22,6 +24,10 @@ pub struct IndividualNodeStats { pub memory_usage_mb: usize, pub bandwidth_inbound: usize, pub bandwidth_outbound: usize, + pub prev_bandwidth_inbound: usize, + pub prev_bandwidth_outbound: usize, + pub bandwidth_inbound_rate: usize, + pub bandwidth_outbound_rate: usize, pub max_records: usize, pub peers: usize, pub connections: usize, @@ -137,6 +143,10 @@ impl NodeStats { max_records: stats.max_records, peers: stats.peers, connections: stats.connections, + prev_bandwidth_inbound: stats.prev_bandwidth_inbound, + prev_bandwidth_outbound: stats.prev_bandwidth_outbound, + bandwidth_inbound_rate: stats.bandwidth_inbound_rate, + bandwidth_outbound_rate: stats.bandwidth_outbound_rate, }; all_node_stats.merge(&individual_stats); } @@ -169,7 +179,6 @@ impl NodeStats { let mut stats = IndividualNodeStats::default(); for sample in all_metrics.samples.iter() { - debug!(sample.metric); if sample.metric == "sn_node_total_forwarded_rewards" { // Nanos match sample.value { @@ -198,9 +207,22 @@ impl NodeStats { | prometheus_parse::Value::Untyped(val) => { if let Some(direction) = sample.labels.get("direction") { if direction == "Inbound" { - stats.bandwidth_inbound += val as usize; + let current_inbound = val as usize; + stats.bandwidth_inbound = current_inbound; + let rate = (current_inbound as f64 + - stats.prev_bandwidth_inbound as f64) + / NODE_STAT_UPDATE_INTERVAL.as_secs_f64(); + stats.bandwidth_inbound_rate = rate as usize; + stats.prev_bandwidth_inbound = current_inbound; } else if direction == "Outbound" { - stats.bandwidth_outbound += val as usize; + let current_outbound = val as usize; + stats.bandwidth_outbound = current_outbound; + let rate = (current_outbound as f64 + - stats.prev_bandwidth_outbound as f64) + / NODE_STAT_UPDATE_INTERVAL.as_secs_f64(); + stats.bandwidth_outbound_rate = rate as usize; + stats.prev_bandwidth_outbound = current_outbound; + // Update previous value } } } From 1ff86588a15dc0f95c475b8257d787a969886295 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 26 Sep 2024 11:11:54 +0200 Subject: [PATCH 054/255] feat(launchpad): less variables to calculate rate --- node-launchpad/src/node_stats.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/node-launchpad/src/node_stats.rs b/node-launchpad/src/node_stats.rs index 788ccfb94b..a68d0d1404 100644 --- a/node-launchpad/src/node_stats.rs +++ b/node-launchpad/src/node_stats.rs @@ -24,8 +24,6 @@ pub struct IndividualNodeStats { pub memory_usage_mb: usize, pub bandwidth_inbound: usize, pub bandwidth_outbound: usize, - pub prev_bandwidth_inbound: usize, - pub prev_bandwidth_outbound: usize, pub bandwidth_inbound_rate: usize, pub bandwidth_outbound_rate: usize, pub max_records: usize, @@ -143,8 +141,6 @@ impl NodeStats { max_records: stats.max_records, peers: stats.peers, connections: stats.connections, - prev_bandwidth_inbound: stats.prev_bandwidth_inbound, - prev_bandwidth_outbound: stats.prev_bandwidth_outbound, bandwidth_inbound_rate: stats.bandwidth_inbound_rate, bandwidth_outbound_rate: stats.bandwidth_outbound_rate, }; @@ -208,21 +204,18 @@ impl NodeStats { if let Some(direction) = sample.labels.get("direction") { if direction == "Inbound" { let current_inbound = val as usize; - stats.bandwidth_inbound = current_inbound; let rate = (current_inbound as f64 - - stats.prev_bandwidth_inbound as f64) + - stats.bandwidth_inbound as f64) / NODE_STAT_UPDATE_INTERVAL.as_secs_f64(); stats.bandwidth_inbound_rate = rate as usize; - stats.prev_bandwidth_inbound = current_inbound; + stats.bandwidth_inbound = current_inbound; } else if direction == "Outbound" { let current_outbound = val as usize; - stats.bandwidth_outbound = current_outbound; let rate = (current_outbound as f64 - - stats.prev_bandwidth_outbound as f64) + - stats.bandwidth_outbound as f64) / NODE_STAT_UPDATE_INTERVAL.as_secs_f64(); stats.bandwidth_outbound_rate = rate as usize; - stats.prev_bandwidth_outbound = current_outbound; - // Update previous value + stats.bandwidth_outbound = current_outbound; } } } From 913e44feee1d985e63f84d1c6ed61a6175893b17 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Thu, 26 Sep 2024 13:35:12 +0200 Subject: [PATCH 055/255] chore(evmlib): use alloy without openssl --- Cargo.lock | 143 ---------------------------------------------- evmlib/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83175a5cf0..07bda779e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3091,21 +3091,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -4361,22 +4346,6 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.8" @@ -5829,23 +5798,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -6226,50 +6178,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.20.0" @@ -7556,13 +7464,11 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.3", - "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -7575,7 +7481,6 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tokio-native-tls", "tokio-rustls 0.26.0", "tower-service", "url", @@ -7984,15 +7889,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" -dependencies = [ - "windows-sys 0.59.0", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -8102,29 +7998,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "self_encryption" version = "0.29.2" @@ -9528,16 +9401,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -10185,12 +10048,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "vergen" version = "8.3.2" diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index bd7fa723ae..ea795b69c8 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.1.0" [dependencies] -alloy = { version = "0.2", features = ["full", "provider-anvil-node"] } +alloy = { version = "0.2", default-features = false, features = ["std", "full", "provider-anvil-node", "reqwest-rustls-tls"] } serde = "1.0" thiserror = "1.0" tokio = "1.38.0" From 3784e312aebd5f47fcd532ed40a0f4c2b1bc2cb5 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 26 Sep 2024 16:58:48 +0200 Subject: [PATCH 056/255] fix(launchpad): disable node selection --- node-launchpad/src/components/status.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index dd3f3b6691..82d12c968c 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -181,7 +181,8 @@ impl Status { ); if !self.node_services.is_empty() && self.node_table_state.selected().is_none() { - self.node_table_state.select(Some(0)); + // self.node_table_state.select(Some(0)); + self.node_table_state.select(None); } Ok(()) @@ -207,7 +208,7 @@ impl Status { .collect() } - fn select_next_table_item(&mut self) { + fn _select_next_table_item(&mut self) { let i = match self.node_table_state.selected() { Some(i) => { if i >= self.node_services.len() - 1 { @@ -221,7 +222,7 @@ impl Status { self.node_table_state.select(Some(i)); } - fn select_previous_table_item(&mut self) { + fn _select_previous_table_item(&mut self) { let i = match self.node_table_state.selected() { Some(i) => { if i == 0 { @@ -411,10 +412,10 @@ impl Component for Status { return Ok(Some(Action::SwitchScene(Scene::ManageNodesPopUp))); } StatusActions::PreviousTableItem => { - self.select_previous_table_item(); + // self.select_previous_table_item(); } StatusActions::NextTableItem => { - self.select_next_table_item(); + // self.select_next_table_item(); } StatusActions::StartNodes => { debug!("Got action to start nodes"); From 347f0f89b63878901ea251e36b2055a2f111b50a Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 26 Sep 2024 17:46:12 +0900 Subject: [PATCH 057/255] feat: node side integration and sn_node_manager --- Cargo.lock | 9 +- sn_networking/Cargo.toml | 1 + sn_networking/src/cmd.rs | 17 +- sn_networking/src/driver.rs | 2 +- sn_networking/src/error.rs | 2 + sn_networking/src/event/mod.rs | 2 +- sn_networking/src/event/swarm.rs | 1 + sn_networking/src/lib.rs | 34 +- sn_networking/src/log_markers.rs | 4 +- sn_networking/src/metrics/mod.rs | 8 +- sn_networking/src/record_store.rs | 29 +- sn_networking/src/record_store_api.rs | 6 +- sn_node/Cargo.toml | 11 +- sn_node/src/bin/safenode/main.rs | 35 +- sn_node/src/bin/safenode/rpc_service.rs | 6 +- sn_node/src/bin/safenode/subcommands.rs | 41 + sn_node/src/error.rs | 15 +- sn_node/src/event.rs | 11 +- sn_node/src/lib.rs | 8 +- sn_node/src/metrics.rs | 4 +- sn_node/src/node.rs | 291 +--- sn_node/src/put_validation.rs | 212 +-- sn_node/src/quote.rs | 25 +- sn_node/tests/data_with_churn.rs | 3 +- sn_node/tests/double_spend.rs | 1366 ++++++++--------- sn_node/tests/sequential_transfers.rs | 108 +- sn_node/tests/storage_payments.rs | 755 ++++----- sn_node_manager/Cargo.toml | 2 +- sn_node_manager/src/add_services/tests.rs | 18 +- sn_node_manager/src/bin/cli/main.rs | 24 + .../src/bin/cli/subcommands/evm_network.rs | 41 + .../src/bin/cli/subcommands/mod.rs | 1 + sn_node_manager/src/cmd/local.rs | 9 + sn_node_manager/src/lib.rs | 71 +- sn_node_manager/src/local.rs | 100 +- sn_protocol/Cargo.toml | 1 + sn_protocol/src/messages.rs | 2 +- sn_protocol/src/messages/cmd.rs | 3 +- sn_protocol/src/messages/response.rs | 6 +- sn_protocol/src/version.rs | 6 +- sn_service_management/Cargo.toml | 2 +- sn_service_management/src/node.rs | 4 +- 42 files changed, 1583 insertions(+), 1713 deletions(-) create mode 100644 sn_node/src/bin/safenode/subcommands.rs create mode 100644 sn_node_manager/src/bin/cli/subcommands/evm_network.rs create mode 100644 sn_node_manager/src/bin/cli/subcommands/mod.rs diff --git a/Cargo.lock b/Cargo.lock index c0495910f6..dc24863112 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8362,6 +8362,7 @@ dependencies = [ "service-manager", "sn-releases", "sn_build_info", + "sn_evm", "sn_logging", "sn_peers_acquisition", "sn_protocol", @@ -8374,7 +8375,6 @@ dependencies = [ "tracing", "users", "uuid", - "which 6.0.3", ] [[package]] @@ -8687,6 +8687,7 @@ dependencies = [ "rmp-serde", "serde", "sn_build_info", + "sn_evm", "sn_protocol", "sn_registers", "sn_transfers", @@ -8709,13 +8710,13 @@ name = "sn_node" version = "0.111.2" dependencies = [ "assert_fs", - "assert_matches", "async-trait", "blsttc", "bytes", "chrono", "clap", "color-eyre", + "const-hex", "crdts", "custom_debug", "dirs-next", @@ -8736,6 +8737,7 @@ dependencies = [ "serde_json", "sn_build_info", "sn_client", + "sn_evm", "sn_logging", "sn_networking", "sn_peers_acquisition", @@ -8821,6 +8823,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sn_build_info", + "sn_evm", "sn_registers", "sn_transfers", "thiserror", @@ -8862,9 +8865,9 @@ dependencies = [ "serde", "serde_json", "service-manager", + "sn_evm", "sn_logging", "sn_protocol", - "sn_transfers", "sysinfo", "thiserror", "tokio", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 09f61b0645..39831935b2 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -57,6 +57,7 @@ sn_build_info = { path="../sn_build_info", version = "0.1.13" } sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_registers = { path = "../sn_registers", version = "0.3.19" } +sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 133bd2abda..541a518ce5 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -21,12 +21,12 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -91,12 +91,12 @@ pub enum LocalSwarmCmd { /// GetLocalStoreCost for this node GetLocalStoreCost { key: RecordKey, - sender: oneshot::Sender<(NanoTokens, QuotingMetrics)>, + sender: oneshot::Sender<(AttoTokens, QuotingMetrics)>, }, /// Notify the node received a payment. PaymentReceived, /// Put record to the local RecordStore - PutVerifiedLocalRecord { + PutLocalRecord { record: Record, }, /// Remove a local record from the RecordStore @@ -194,7 +194,7 @@ pub enum NetworkSwarmCmd { impl Debug for LocalSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - LocalSwarmCmd::PutVerifiedLocalRecord { record } => { + LocalSwarmCmd::PutLocalRecord { record } => { write!( f, "LocalSwarmCmd::PutLocalRecord {{ key: {:?} }}", @@ -561,7 +561,7 @@ impl SwarmDriver { .store_cost(&key); self.record_metrics(Marker::StoreCost { - cost: cost.as_nano(), + cost: cost.as_atto(), quoting_metrics: "ing_metrics, }); @@ -587,8 +587,8 @@ impl SwarmDriver { let _ = sender.send(record); } - LocalSwarmCmd::PutVerifiedLocalRecord { record } => { - cmd_string = "PutVerifiedLocalRecord"; + LocalSwarmCmd::PutLocalRecord { record } => { + cmd_string = "PutLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -719,6 +719,7 @@ impl SwarmDriver { } LocalSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; + #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self .swarm .behaviour_mut() @@ -735,7 +736,7 @@ impl SwarmDriver { if let Some(distance) = range.0.ilog2() { let peers_in_kbucket = kbucket .iter() - .map(|peer_entry| (*peer_entry.node.key).into_preimage()) + .map(|peer_entry| peer_entry.node.key.into_preimage()) .collect::>(); let _ = ilog2_kbuckets.insert(distance, peers_in_kbucket); } else { diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 4b39b80907..2ed9a7d1f8 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -47,6 +47,7 @@ use libp2p::{ }; #[cfg(feature = "open-metrics")] use prometheus_client::{metrics::info::Info, registry::Registry}; +use sn_evm::PaymentQuote; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, @@ -57,7 +58,6 @@ use sn_protocol::{ NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; -use sn_transfers::PaymentQuote; use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, fmt::Debug, diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 2168bb892c..6da5a22d9a 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -105,6 +105,8 @@ pub enum NetworkError { Wallet(#[from] sn_transfers::WalletError), #[error("Transfer Error {0}")] Transfer(#[from] sn_transfers::TransferError), + #[error("Evm payment Error {0}")] + EvmPaymemt(#[from] sn_evm::EvmError), #[error("Failed to sign the message with the PeerId keypair")] SigningFailed(#[from] libp2p::identity::SigningError), diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 20f45ca2c8..2b8158f255 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -21,11 +21,11 @@ use libp2p::{ Multiaddr, PeerId, }; +use sn_evm::PaymentQuote; use sn_protocol::{ messages::{Query, Request, Response}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::PaymentQuote; use std::{ collections::BTreeSet, fmt::{Debug, Formatter}, diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index af74a1455e..3f650f0b5a 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -329,6 +329,7 @@ impl SwarmDriver { self.send_event(NetworkEvent::NewListenAddr(address.clone())); info!("Local node is listening {listener_id:?} on {address:?}"); + println!("Local node is listening on {address:?}"); // TODO: make it print only once } SwarmEvent::ListenerClosed { listener_id, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 0df7812ebb..8369665c12 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -59,13 +59,13 @@ use libp2p::{ Multiaddr, PeerId, }; use rand::Rng; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_protocol::{ error::Error as ProtocolError, messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, storage::{RecordType, RetryStrategy}, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, net::IpAddr, @@ -79,7 +79,7 @@ use tokio::sync::{ use tokio::time::Duration; /// The type of quote for a selected payee. -pub type PayeeQuote = (PeerId, MainPubkey, PaymentQuote); +pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); /// The count of peers that will be considered as close to a record target, /// that a replication of the record shall be sent/accepted to/by the peer. @@ -378,8 +378,8 @@ impl Network { peer_address, }) => { // Check the quote itself is valid. - if quote.cost.as_nano() - != calculate_cost_for_records(quote.quoting_metrics.close_records_stored) + if quote.cost + != AttoTokens::from_u64(calculate_cost_for_records(quote.quoting_metrics.close_records_stored)) { warn!("Received invalid quote from {peer_address:?}, {quote:?}"); continue; @@ -589,7 +589,7 @@ impl Network { pub async fn get_local_storecost( &self, key: RecordKey, - ) -> Result<(NanoTokens, QuotingMetrics)> { + ) -> Result<(AttoTokens, QuotingMetrics)> { let (sender, receiver) = oneshot::channel(); self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); @@ -751,7 +751,7 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_local_swarm_cmd(LocalSwarmCmd::PutVerifiedLocalRecord { record }) + self.send_local_swarm_cmd(LocalSwarmCmd::PutLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore @@ -961,7 +961,7 @@ impl Network { /// Given `all_costs` it will return the closest / lowest cost /// Closest requiring it to be within CLOSE_GROUP nodes fn get_fees_from_store_cost_responses( - all_costs: Vec<(NetworkAddress, MainPubkey, PaymentQuote)>, + all_costs: Vec<(NetworkAddress, RewardsAddress, PaymentQuote)>, ) -> Result { // Find the minimum cost using a linear scan with random tie break let mut rng = rand::thread_rng(); @@ -1114,7 +1114,7 @@ mod tests { use eyre::bail; use super::*; - use sn_transfers::PaymentQuote; + use sn_evm::PaymentQuote; #[test] fn test_get_fee_from_store_cost_responses() -> Result<()> { @@ -1122,18 +1122,18 @@ mod tests { // ensure we return the CLOSE_GROUP / 2 indexed price let mut costs = vec![]; for i in 1..CLOSE_GROUP_SIZE { - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i as u64)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i as u64)), )); } - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = get_fees_from_store_cost_responses(costs)?; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); @@ -1148,18 +1148,18 @@ mod tests { let responses_count = CLOSE_GROUP_SIZE as u64 - 1; let mut costs = vec![]; for i in 1..responses_count { - // push random MainPubkey and Nano - let addr = MainPubkey::new(bls::SecretKey::random().public_key()); + // push random addr and Nano + let addr = sn_evm::utils::dummy_address(); costs.push(( NetworkAddress::from_peer(PeerId::random()), addr, - PaymentQuote::test_dummy(Default::default(), NanoTokens::from(i)), + PaymentQuote::test_dummy(Default::default(), AttoTokens::from_u64(i)), )); println!("price added {i}"); } // this should be the lowest price - let expected_price = costs[0].2.cost.as_nano(); + let expected_price = costs[0].2.cost.as_atto(); let (_peer_id, _key, price) = match get_fees_from_store_cost_responses(costs) { Err(_) => bail!("Should not have errored as we have enough responses"), @@ -1167,7 +1167,7 @@ mod tests { }; assert_eq!( - price.cost.as_nano(), + price.cost.as_atto(), expected_price, "price should be {expected_price}" ); diff --git a/sn_networking/src/log_markers.rs b/sn_networking/src/log_markers.rs index 97ecb6c04b..38ec42c875 100644 --- a/sn_networking/src/log_markers.rs +++ b/sn_networking/src/log_markers.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use libp2p::PeerId; -use sn_transfers::QuotingMetrics; +use sn_evm::{Amount, QuotingMetrics}; // this gets us to_string easily enough use strum::Display; @@ -22,7 +22,7 @@ pub enum Marker<'a> { /// Store cost StoreCost { /// Cost - cost: u64, + cost: Amount, quoting_metrics: &'a QuotingMetrics, }, /// The peer has been considered as bad diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a7fdfbeee1..ebb15a73fb 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -232,7 +232,13 @@ impl NetworkMetricsRecorder { cost, quoting_metrics, } => { - let _ = self.store_cost.set(cost as i64); + let _ = self.store_cost.set(cost.try_into().unwrap_or(i64::MAX)); + let _ = self.relevant_records.set( + quoting_metrics + .close_records_stored + .try_into() + .unwrap_or(i64::MAX), + ); let _ = self .relevant_records .set(quoting_metrics.close_records_stored as i64); diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 55183866b8..7ce96c2e41 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -30,11 +30,11 @@ use prometheus_client::metrics::gauge::Gauge; use rand::RngCore; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{ storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::collections::VecDeque; use std::{ borrow::Cow, @@ -651,7 +651,7 @@ impl NodeRecordStore { } /// Calculate the cost to store data for our current store state - pub(crate) fn store_cost(&self, key: &Key) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &Key) -> (AttoTokens, QuotingMetrics) { let records_stored = self.records.len(); let record_keys_as_hashset: HashSet<&Key> = self.records.keys().collect(); @@ -685,7 +685,7 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Cost is now {cost:?} for quoting_metrics {quoting_metrics:?}"); - (NanoTokens::from(cost), quoting_metrics) + (AttoTokens::from_u64(cost), quoting_metrics) } /// Notify the node received a payment. @@ -955,7 +955,6 @@ mod tests { use super::*; use bls::SecretKey; - use sn_protocol::storage::{try_deserialize_record, Scratchpad}; use xor_name::XorName; use bytes::Bytes; @@ -963,8 +962,9 @@ mod tests { use libp2p::kad::K_VALUE; use libp2p::{core::multihash::Multihash, kad::RecordKey}; use quickcheck::*; - use sn_protocol::storage::{try_serialize_record, Chunk, ChunkAddress}; - use sn_transfers::{MainPubkey, PaymentQuote}; + use sn_evm::utils::dummy_address; + use sn_evm::{PaymentQuote, RewardsAddress}; + use sn_protocol::storage::{try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, Scratchpad}; use std::collections::BTreeMap; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use tokio::runtime::Runtime; @@ -1562,7 +1562,7 @@ mod tests { struct PeerStats { address: NetworkAddress, - pk: MainPubkey, + rewards_addr: RewardsAddress, records_stored: AtomicUsize, nanos_earned: AtomicU64, payments_received: AtomicUsize, @@ -1590,7 +1590,7 @@ mod tests { records_stored: AtomicUsize::new(0), nanos_earned: AtomicU64::new(0), payments_received: AtomicUsize::new(0), - pk: MainPubkey::new(SecretKey::random().public_key()), + rewards_addr: dummy_address(), }) .collect(); @@ -1657,7 +1657,7 @@ mod tests { if peer_index == payee_index { peer.nanos_earned - .fetch_add(cost.as_nano(), Ordering::Relaxed); + .fetch_add(cost.as_atto().try_into().unwrap_or(u64::MAX), Ordering::Relaxed); peer.payments_received.fetch_add(1, Ordering::Relaxed); } } @@ -1758,7 +1758,7 @@ mod tests { fn pick_cheapest_payee( peers: &[PeerStats], close_group: &[usize], - ) -> eyre::Result<(usize, NanoTokens)> { + ) -> eyre::Result<(usize, AttoTokens)> { let mut costs_vec = Vec::with_capacity(close_group.len()); let mut address_to_index = BTreeMap::new(); @@ -1767,7 +1767,7 @@ mod tests { address_to_index.insert(peer.address.clone(), i); let close_records_stored = peer.records_stored.load(Ordering::Relaxed); - let cost = NanoTokens::from(calculate_cost_for_records(close_records_stored)); + let cost = AttoTokens::from(calculate_cost_for_records(close_records_stored)); let quote = PaymentQuote { content: XorName::default(), // unimportant for cost calc @@ -1779,11 +1779,12 @@ mod tests { received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, - pub_key: peer.pk.to_bytes().to_vec(), - signature: vec![], // unimportant for cost calc + pub_key: bls::SecretKey::random().public_key().to_bytes().to_vec(), + signature: vec![], + rewards_address: peer.rewards_addr, // unimportant for cost calc }; - costs_vec.push((peer.address.clone(), peer.pk, quote)); + costs_vec.push((peer.address.clone(), peer.rewards_addr, quote)); } // sort by address first diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index c61b8d7043..8e3bc67364 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -12,8 +12,8 @@ use libp2p::kad::{ store::{RecordStore, Result}, ProviderRecord, Record, RecordKey, }; +use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; -use sn_transfers::{NanoTokens, QuotingMetrics}; use std::{borrow::Cow, collections::HashMap}; pub enum UnifiedRecordStore { @@ -111,11 +111,11 @@ impl UnifiedRecordStore { } } - pub(crate) fn store_cost(&self, key: &RecordKey) -> (NanoTokens, QuotingMetrics) { + pub(crate) fn store_cost(&self, key: &RecordKey) -> (AttoTokens, QuotingMetrics) { match self { Self::Client(_) => { warn!("Calling store cost calculation at Client. This should not happen"); - (NanoTokens::zero(), Default::default()) + (AttoTokens::zero(), Default::default()) } Self::Node(store) => store.store_cost(key), } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 99c6d3f273..bed23167bb 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -14,15 +14,14 @@ name = "safenode" path = "src/bin/safenode/main.rs" [features] -default = ["metrics", "upnp", "reward-forward", "open-metrics"] -encrypt-records = ["sn_networking/encrypt-records"] +default = ["metrics", "upnp", "open-metrics"] local-discovery = ["sn_networking/local-discovery"] +otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -otlp = ["sn_logging/otlp"] -reward-forward = ["sn_transfers/reward-forward"] +encrypt-records = ["sn_networking/encrypt-records"] upnp = ["sn_networking/upnp"] [dependencies] @@ -34,6 +33,7 @@ clap = { version = "4.2.1", features = ["derive"] } crdts = { version = "7.3", default-features = false, features = ["merkle"] } chrono = "~0.4.19" custom_debug = "~0.6.1" +const-hex = "1.12.0" dirs-next = "~2.0.0" eyre = "0.6.8" file-rotate = "0.7.3" @@ -59,6 +59,7 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_registers = { path = "../sn_registers", version = "0.3.19" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } +sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -80,7 +81,6 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -assert_matches = "1.5.0" reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } @@ -92,6 +92,7 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ sn_transfers = { path = "../sn_transfers", version = "0.19.1", features = [ "test-utils", ] } +sn_evm = { path = "../sn_evm", version = "0.1.0" } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index c503504528..9d2211597b 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -10,10 +10,14 @@ extern crate tracing; mod rpc_service; +mod subcommands; use clap::{command, Parser}; use color_eyre::{eyre::eyre, Result}; +use crate::subcommands::EvmNetworkCommand; +use const_hex::traits::FromHex; use libp2p::{identity::Keypair, PeerId}; +use sn_evm::{EvmNetwork, RewardsAddress}; #[cfg(feature = "metrics")] use sn_logging::metrics::init_metrics; use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; @@ -68,6 +72,7 @@ pub fn parse_log_output(val: &str) -> Result { // They are used for inserting line breaks when the help menu is rendered in the UI. #[derive(Parser, Debug)] #[command(disable_version_flag = true)] +#[clap(name = "safenode cli", version = env!("CARGO_PKG_VERSION"))] struct Opt { /// Specify whether the node is operating from a home network and situated behind a NAT without port forwarding /// capabilities. Setting this to true, activates hole-punching to facilitate direct connections from other nodes. @@ -120,6 +125,19 @@ struct Opt { #[clap(long = "max_archived_log_files", verbatim_doc_comment)] max_compressed_log_files: Option, + /// Specify the rewards address. + /// The rewards address is the address that will receive the rewards for the node. + /// It should be a valid EVM address. + #[clap(long)] + rewards_address: String, + + /// Specify the EVM network to use. + /// The network can either be a pre-configured one or a custom network. + /// When setting a custom network, you must specify the RPC URL to a fully synced node and + /// the addresses of the network token and chunk payments contracts. + #[command(subcommand)] + evm_network: Option, + /// Specify the node's data directory. /// /// If not provided, the default location is platform specific: @@ -213,6 +231,8 @@ fn main() -> Result<()> { ); return Ok(()); } + // evm config + let rewards_address = RewardsAddress::from_hex(&opt.rewards_address)?; if opt.crate_version { println!("Crate version: {}", env!("CARGO_PKG_VERSION")); @@ -229,6 +249,12 @@ fn main() -> Result<()> { println!("Package version: {}", sn_build_info::package_version()); return Ok(()); } + let evm_network: EvmNetwork = opt + .evm_network + .as_ref() + .cloned() + .map(|v| v.into()) + .unwrap_or_default(); let node_socket_addr = SocketAddr::new(opt.ip, opt.port); let (root_dir, keypair) = get_root_dir_and_keypair(&opt.root_dir)?; @@ -246,6 +272,10 @@ fn main() -> Result<()> { info!("\n{}\n{}", msg, "=".repeat(msg.len())); sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); + debug!( + "safenode built with git version: {}", + sn_build_info::git_info() + ); info!("Node started with initial_peers {bootstrap_peers:?}"); @@ -258,12 +288,12 @@ fn main() -> Result<()> { let restart_options = rt.block_on(async move { let mut node_builder = NodeBuilder::new( keypair, + rewards_address, + evm_network, node_socket_addr, bootstrap_peers, opt.local, root_dir, - opt.owner.clone(), - #[cfg(feature = "upnp")] opt.upnp, ); node_builder.is_behind_home_network = opt.home_network; @@ -462,6 +492,7 @@ fn init_logging(opt: &Opt, peer_id: PeerId) -> Result<(String, ReloadHandle, Opt ("sn_protocol".to_string(), Level::DEBUG), ("sn_registers".to_string(), Level::DEBUG), ("sn_transfers".to_string(), Level::DEBUG), + ("sn_evm".to_string(), Level::DEBUG), ]; let output_dest = match &opt.log_output_dest { diff --git a/sn_node/src/bin/safenode/rpc_service.rs b/sn_node/src/bin/safenode/rpc_service.rs index 6943221741..c42503f112 100644 --- a/sn_node/src/bin/safenode/rpc_service.rs +++ b/sn_node/src/bin/safenode/rpc_service.rs @@ -66,11 +66,7 @@ impl SafeNode for SafeNodeRpcService { pid: process::id(), bin_version: env!("CARGO_PKG_VERSION").to_string(), uptime_secs: self.started_instant.elapsed().as_secs(), - wallet_balance: self - .running_node - .get_node_wallet_balance() - .expect("Failed to get node wallet balance") - .as_nano(), + wallet_balance: 0, // NB TODO: Implement this using metrics data? }); Ok(resp) diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs new file mode 100644 index 0000000000..3faada3562 --- /dev/null +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -0,0 +1,41 @@ +use clap::Subcommand; +use sn_evm::{EvmNetwork, EvmNetworkCustom}; + +#[derive(Subcommand, Clone, Debug)] +pub(crate) enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + chunk_payments_address: String, + }, +} + +#[allow(clippy::from_over_into)] +impl Into for EvmNetworkCommand { + fn into(self) -> EvmNetwork { + match self { + Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmCustom { + rpc_url, + payment_token_address, + chunk_payments_address, + } => EvmNetwork::Custom(EvmNetworkCustom::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )), + } + } +} diff --git a/sn_node/src/error.rs b/sn_node/src/error.rs index 1c2bb23e16..a74ed00bc7 100644 --- a/sn_node/src/error.rs +++ b/sn_node/src/error.rs @@ -6,14 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use sn_evm::AttoTokens; use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; -use sn_transfers::{NanoTokens, WalletError}; +use sn_transfers::WalletError; use thiserror::Error; pub(super) type Result = std::result::Result; /// Internal error. #[derive(Debug, Error)] +#[allow(missing_docs)] pub enum Error { #[error("Network error {0}")] Network(#[from] sn_networking::NetworkError), @@ -28,7 +30,7 @@ pub enum Error { Wallet(#[from] WalletError), #[error("Transfers Error {0}")] - Transfers(#[from] sn_transfers::TransferError), + Transfers(#[from] sn_evm::EvmError), #[error("Failed to parse NodeEvent")] NodeEventParsingFailed, @@ -74,8 +76,8 @@ pub enum Error { /// The amount paid by payment proof is not the required for the received content #[error("The amount paid by payment proof is not the required for the received content, paid {paid}, expected {expected}")] PaymentProofInsufficientAmount { - paid: NanoTokens, - expected: NanoTokens, + paid: AttoTokens, + expected: AttoTokens, }, #[error("A payment we received contains cash notes already confirmed to be spent")] ReusedPayment, @@ -93,4 +95,9 @@ pub enum Error { /// Error occurred in an async thread #[error("Error occured in async thread: {0}")] JoinErrorInAsyncThread(String), + + #[error("EVM Network error: {0}")] + EvmNetwork(String), + #[error("Invalid quote timestamp: {0}")] + InvalidQuoteTimestamp(String), } diff --git a/sn_node/src/event.rs b/sn_node/src/event.rs index c3e9857bad..6237e1d8bf 100644 --- a/sn_node/src/event.rs +++ b/sn_node/src/event.rs @@ -9,8 +9,11 @@ use crate::error::{Error, Result}; use serde::{Deserialize, Serialize}; -use sn_protocol::storage::{ChunkAddress, RegisterAddress}; -use sn_transfers::UniquePubkey; +use sn_evm::AttoTokens; +use sn_protocol::{ + storage::{ChunkAddress, RegisterAddress}, + NetworkAddress, +}; use tokio::sync::broadcast; const NODE_EVENT_CHANNEL_SIZE: usize = 500; @@ -62,8 +65,8 @@ pub enum NodeEvent { RegisterCreated(RegisterAddress), /// A Register edit operation has been applied in local storage RegisterEdited(RegisterAddress), - /// A CashNote Spend has been stored in local storage - SpendStored(UniquePubkey), + /// A new reward was received + RewardReceived(AttoTokens, NetworkAddress), /// One of the sub event channel closed and unrecoverable. ChannelClosed, /// Terminates the node diff --git a/sn_node/src/lib.rs b/sn_node/src/lib.rs index 4f097a7724..7dbd88ce5e 100644 --- a/sn_node/src/lib.rs +++ b/sn_node/src/lib.rs @@ -48,7 +48,6 @@ use crate::error::{Error, Result}; use libp2p::PeerId; use sn_networking::{Network, SwarmLocalState}; use sn_protocol::{get_port_from_multiaddr, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; use std::{ collections::{BTreeMap, HashSet}, path::PathBuf, @@ -80,12 +79,6 @@ impl RunningNode { self.network.root_dir_path().clone() } - /// Returns the wallet balance of the node - pub fn get_node_wallet_balance(&self) -> Result { - let wallet = HotWallet::load_from(self.network.root_dir_path())?; - Ok(wallet.balance()) - } - /// Returns a `SwarmLocalState` with some information obtained from swarm's local state. pub async fn get_swarm_local_state(&self) -> Result { let state = self.network.get_swarm_local_state().await?; @@ -110,6 +103,7 @@ impl RunningNode { /// Returns the list of all the RecordKeys held by the node pub async fn get_all_record_addresses(&self) -> Result> { + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress let addresses: HashSet<_> = self .network .get_all_local_record_addresses() diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 4ba458448e..b2731e8dd5 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -36,7 +36,7 @@ pub(crate) struct NodeMetricsRecorder { // wallet pub(crate) current_reward_wallet_balance: Gauge, - pub(crate) total_forwarded_rewards: Gauge, + pub(crate) _total_forwarded_rewards: Gauge, // to track the uptime of the node. pub(crate) started_instant: Instant, @@ -130,7 +130,7 @@ impl NodeMetricsRecorder { peer_added_to_routing_table, peer_removed_from_routing_table, current_reward_wallet_balance, - total_forwarded_rewards, + _total_forwarded_rewards: total_forwarded_rewards, started_instant: Instant::now(), uptime, } diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 0caeab2fa7..3ca3e015b6 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -7,10 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::{ - error::{Error, Result}, - event::NodeEventsChannel, - quote::quotes_verification, - Marker, NodeEvent, + error::Result, event::NodeEventsChannel, quote::quotes_verification, Marker, NodeEvent, }; #[cfg(feature = "open-metrics")] use crate::metrics::NodeMetricsRecorder; @@ -18,10 +15,11 @@ use crate::RunningNode; use bytes::Bytes; use libp2p::{identity::Keypair, Multiaddr, PeerId}; #[cfg(feature = "open-metrics")] -use prometheus_client::metrics::{gauge::Gauge, info::Info}; +use prometheus_client::metrics::info::Info; #[cfg(feature = "open-metrics")] use prometheus_client::registry::Registry; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +use sn_evm::{AttoTokens, RewardsAddress}; use sn_networking::{ close_group_majority, Instant, Network, NetworkBuilder, NetworkError, NetworkEvent, NodeIssue, SwarmDriver, @@ -31,7 +29,6 @@ use sn_protocol::{ messages::{ChunkProof, CmdResponse, Query, QueryResponse, Request, Response}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; -use sn_transfers::{HotWallet, MainPubkey, MainSecretKey, NanoTokens, PAYMENT_FORWARD_PK}; use std::{ net::SocketAddr, path::PathBuf, @@ -46,12 +43,7 @@ use tokio::{ task::{spawn, JoinHandle}, }; -#[cfg(feature = "reward-forward")] -use libp2p::kad::{Quorum, Record}; -#[cfg(feature = "reward-forward")] -use sn_networking::PutRecordCfg; -#[cfg(feature = "reward-forward")] -use sn_protocol::storage::{try_serialize_record, RecordKind, SpendAddress}; +use sn_evm::EvmNetwork; /// Interval to trigger replication of all records to all peers. /// This is the max time it should take. Minimum interval at any node will be half this @@ -61,10 +53,6 @@ pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45; /// This is the max time it should take. Minimum interval at any node will be half this const PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S: u64 = 600; -/// Interval to trigger reward forwarding. -/// This is the max time it should take. Minimum interval at any node will be half this -const PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S: u64 = 450; - /// Max number of attempts that chunk proof verification will be carried out against certain target, /// before classifying peer as a bad peer. const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; @@ -72,10 +60,6 @@ const MAX_CHUNK_PROOF_VERIFY_ATTEMPTS: usize = 3; /// Interval between chunk proof verification to be retired against the same target. const CHUNK_PROOF_VERIFY_RETRY_INTERVAL: Duration = Duration::from_secs(15); -#[cfg(feature = "reward-forward")] -/// Track the forward balance by storing the balance in a file. This is useful to restore the balance between restarts. -const FORWARDED_BALANCE_FILE_NAME: &str = "forwarded_balance"; - /// Interval to update the nodes uptime metric const UPTIME_METRICS_UPDATE_INTERVAL: Duration = Duration::from_secs(10); @@ -84,7 +68,9 @@ const UNRELEVANT_RECORDS_CLEANUP_INTERVAL: Duration = Duration::from_secs(3600); /// Helper to build and run a Node pub struct NodeBuilder { - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, @@ -94,24 +80,27 @@ pub struct NodeBuilder { metrics_server_port: Option, /// Enable hole punching for nodes connecting from home networks. pub is_behind_home_network: bool, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, } impl NodeBuilder { /// Instantiate the builder + #[expect(clippy::too_many_arguments)] pub fn new( - keypair: Keypair, + identity_keypair: Keypair, + evm_address: RewardsAddress, + evm_network: EvmNetwork, addr: SocketAddr, initial_peers: Vec, local: bool, root_dir: PathBuf, - owner: Option, #[cfg(feature = "upnp")] upnp: bool, ) -> Self { Self { - keypair, + identity_keypair, + evm_address, + evm_network, addr, initial_peers, local, @@ -119,7 +108,6 @@ impl NodeBuilder { #[cfg(feature = "open-metrics")] metrics_server_port: None, is_behind_home_network: false, - owner, #[cfg(feature = "upnp")] upnp, } @@ -144,21 +132,8 @@ impl NodeBuilder { /// /// Returns an error if there is a problem initializing the `SwarmDriver`. pub fn build_and_run(self) -> Result { - // Using the signature as the seed of generating the reward_key - let sig_vec = match self.keypair.sign(b"generate reward seed") { - Ok(sig) => sig, - Err(_err) => return Err(Error::FailedToGenerateRewardKey), - }; - let mut rng = sn_transfers::rng::from_vec(&sig_vec); - - let reward_key = MainSecretKey::random_from_rng(&mut rng); - let reward_address = reward_key.main_pubkey(); - - let mut wallet = HotWallet::load_from_main_key(&self.root_dir, reward_key)?; - // store in case it's a fresh wallet created if none was found - wallet.deposit_and_store_to_disk(&vec![])?; - - let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); + let mut network_builder = + NetworkBuilder::new(self.identity_keypair, self.local, self.root_dir); #[cfg(feature = "open-metrics")] let node_metrics = if self.metrics_server_port.is_some() { @@ -201,10 +176,10 @@ impl NodeBuilder { network: network.clone(), events_channel: node_events_channel.clone(), initial_peers: self.initial_peers, - reward_address, + reward_address: self.evm_address, #[cfg(feature = "open-metrics")] node_metrics, - owner: self.owner, + evm_network: self.evm_network, }; let node = Node { inner: Arc::new(node), @@ -238,10 +213,8 @@ struct NodeInner { network: Network, #[cfg(feature = "open-metrics")] node_metrics: Option, - /// Node owner's discord username, in readable format - /// If not set, there will be no payment forward to be undertaken - owner: Option, - reward_address: MainPubkey, + reward_address: RewardsAddress, + evm_network: EvmNetwork, } impl Node { @@ -266,37 +239,21 @@ impl Node { self.inner.node_metrics.as_ref() } - /// Returns the owner of the node - pub(crate) fn owner(&self) -> Option<&String> { - self.inner.owner.as_ref() - } - /// Returns the reward address of the node - pub(crate) fn reward_address(&self) -> &MainPubkey { + pub(crate) fn reward_address(&self) -> &RewardsAddress { &self.inner.reward_address } + pub(crate) fn evm_network(&self) -> &EvmNetwork { + &self.inner.evm_network + } + /// Runs the provided `SwarmDriver` and spawns a task to process for `NetworkEvents` fn run(self, swarm_driver: SwarmDriver, mut network_event_receiver: Receiver) { let mut rng = StdRng::from_entropy(); let peers_connected = Arc::new(AtomicUsize::new(0)); - // read the forwarded balance from the file and set the metric. - // This is done initially because reward forwarding takes a while to kick in - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let node_copy = self.clone(); - #[cfg(all(feature = "reward-forward", feature = "open-metrics"))] - let _handle = spawn(async move { - let root_dir = node_copy.network().root_dir_path().clone(); - let balance_file_path = root_dir.join(FORWARDED_BALANCE_FILE_NAME); - let balance = read_forwarded_balance_value(&balance_file_path); - - if let Some(node_metrics) = node_copy.node_metrics() { - let _ = node_metrics.total_forwarded_rewards.set(balance as i64); - } - }); - let _handle = spawn(swarm_driver.run()); let _handle = spawn(async move { // use a random inactivity timeout to ensure that the nodes do not sync when messages @@ -323,19 +280,6 @@ impl Node { let mut rolling_index = 0; - // use a random timeout to ensure not sync when transmit messages. - let balance_forward_interval: u64 = rng.gen_range( - PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S / 2..PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S, - ); - let balance_forward_time = Duration::from_secs(balance_forward_interval); - debug!( - "BalanceForward interval set to {balance_forward_time:?} to: {:?}", - PAYMENT_FORWARD_PK.to_hex(), - ); - - let mut balance_forward_interval = tokio::time::interval(balance_forward_time); - let _ = balance_forward_interval.tick().await; // first tick completes immediately - let mut uptime_metrics_update_interval = tokio::time::interval(UPTIME_METRICS_UPDATE_INTERVAL); let _ = uptime_metrics_update_interval.tick().await; // first tick completes immediately @@ -395,36 +339,6 @@ impl Node { rolling_index += 1; } } - // runs every balance_forward_interval time - _ = balance_forward_interval.tick() => { - if cfg!(feature = "reward-forward") { - if let Some(owner) = self.owner() { - let start = Instant::now(); - debug!("Periodic balance forward triggered"); - let network = self.network().clone(); - let forwarding_reason = owner.clone(); - - #[cfg(feature = "open-metrics")] - let total_forwarded_rewards = self.node_metrics().map(|metrics|metrics.total_forwarded_rewards.clone()); - #[cfg(feature = "open-metrics")] - let current_reward_wallet_balance = self.node_metrics().map(|metrics|metrics.current_reward_wallet_balance.clone()); - - let _handle = spawn(async move { - - #[cfg(feature = "open-metrics")] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason, total_forwarded_rewards,current_reward_wallet_balance) { - error!("Error while trying to forward balance: {err:?}"); - } - #[cfg(not(feature = "open-metrics"))] - if let Err(err) = Self::try_forward_balance(network, forwarding_reason) { - error!("Error while trying to forward balance: {err:?}"); - } - info!("Periodic balance forward took {:?}", start.elapsed()); - }); - } - - } - } _ = uptime_metrics_update_interval.tick() => { #[cfg(feature = "open-metrics")] if let Some(node_metrics) = self.node_metrics() { @@ -694,7 +608,7 @@ impl Node { async fn handle_query( network: &Network, query: Query, - payment_address: MainPubkey, + payment_address: RewardsAddress, ) -> Response { let resp: QueryResponse = match query { Query::GetStoreCost(address) => { @@ -706,7 +620,7 @@ impl Node { match store_cost { Ok((cost, quoting_metrics)) => { - if cost == NanoTokens::zero() { + if cost == AttoTokens::zero() { QueryResponse::GetStoreCost { quote: Err(ProtocolError::RecordExists( PrettyPrintRecordKey::from(&record_key).into_owned(), @@ -721,6 +635,7 @@ impl Node { cost, &address, "ing_metrics, + &payment_address, ), payment_address, peer_address: NetworkAddress::from_peer(self_id), @@ -862,130 +777,6 @@ impl Node { } } } - - /// Forward received rewards to another address - fn try_forward_balance( - network: Network, - forward_reason: String, - #[cfg(feature = "open-metrics")] forwarded_balance_metric: Option, - #[cfg(feature = "open-metrics")] current_reward_wallet_balance: Option, - ) -> Result<()> { - let mut spend_requests = vec![]; - { - // load wallet - let mut wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - - if !balance.is_zero() { - let payee = vec![(balance, *PAYMENT_FORWARD_PK)]; - spend_requests.extend(wallet.prepare_forward_signed_spend(payee, forward_reason)?); - } - } - let total_forwarded_amount = spend_requests - .iter() - .map(|s| s.amount().as_nano()) - .sum::(); - - let record_kind = RecordKind::Spend; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: None, - use_put_record_to: None, - verification: None, - }; - - info!( - "Reward forwarding sending {} spends in this iteration. Total forwarded amount: {total_forwarded_amount}", - spend_requests.len() - ); - - for spend_request in spend_requests { - let network_clone = network.clone(); - let put_cfg_clone = put_cfg.clone(); - - // Sent out spend in separate thread to avoid blocking the main one - let _handle = spawn(async move { - let unique_pubkey = *spend_request.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let record_key = network_address.to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&record_key); - - debug!("Reward forwarding in spend {pretty_key:?}: {spend_request:#?}"); - - let value = if let Ok(value) = try_serialize_record(&[spend_request], record_kind) { - value - } else { - error!("Reward forwarding: Failed to serialise spend {pretty_key:?}"); - return; - }; - - let record = Record { - key: record_key.clone(), - value: value.to_vec(), - publisher: None, - expires: None, - }; - - let result = network_clone.put_record(record, &put_cfg_clone).await; - - match result { - Ok(_) => info!("Reward forwarding completed sending spend {pretty_key:?}"), - Err(err) => { - info!("Reward forwarding: sending spend {pretty_key:?} failed with {err:?}") - } - } - }); - - std::thread::sleep(Duration::from_millis(500)); - } - - // write the balance to a file - let balance_file_path = network.root_dir_path().join(FORWARDED_BALANCE_FILE_NAME); - let old_balance = read_forwarded_balance_value(&balance_file_path); - let updated_balance = old_balance + total_forwarded_amount; - debug!("Updating forwarded balance to {updated_balance}"); - write_forwarded_balance_value(&balance_file_path, updated_balance)?; - - #[cfg(feature = "open-metrics")] - { - if let Some(forwarded_balance_metric) = forwarded_balance_metric { - let _ = forwarded_balance_metric.set(updated_balance as i64); - } - - let wallet = HotWallet::load_from(network.root_dir_path())?; - let balance = wallet.balance(); - if let Some(current_reward_wallet_balance) = current_reward_wallet_balance { - let _ = current_reward_wallet_balance.set(balance.as_nano() as i64); - } - } - - Ok(()) - } -} - -fn read_forwarded_balance_value(balance_file_path: &PathBuf) -> u64 { - debug!("Reading forwarded balance from file {balance_file_path:?}"); - match std::fs::read_to_string(balance_file_path) { - Ok(balance) => balance.parse::().unwrap_or_else(|_| { - debug!("The balance from file is not a valid number"); - 0 - }), - Err(_) => { - debug!("Error while reading to string, setting the balance to 0. This can happen at node init."); - 0 - } - } -} - -fn write_forwarded_balance_value(balance_file_path: &PathBuf, balance: u64) -> Result<()> { - if let Err(err) = std::fs::write(balance_file_path, balance.to_string()) { - error!( - "Failed to write the updated balance to the file {balance_file_path:?} with {err:?}" - ); - } - Ok(()) } async fn chunk_proof_verify_peer( @@ -1052,29 +843,3 @@ fn received_valid_chunk_proof( None } } - -#[cfg(test)] -mod tests { - - use crate::node::{read_forwarded_balance_value, write_forwarded_balance_value}; - use color_eyre::Result; - use tempfile::tempdir; - #[test] - fn read_and_write_reward_to_file() -> Result<()> { - let dir = tempdir()?; - let balance_file_path = dir.path().join("forwarded_balance"); - - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 0); - - write_forwarded_balance_value(&balance_file_path, balance + 10)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 10); - - write_forwarded_balance_value(&balance_file_path, balance + 100)?; - let balance = read_forwarded_balance_value(&balance_file_path); - assert_eq!(balance, 110); - - Ok(()) - } -} diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8839c8d631..f78d0990fa 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -6,34 +6,32 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result}; +use crate::{node::Node, Error, Marker, Result}; use libp2p::kad::{Record, RecordKey}; +use sn_evm::ProofOfPayment; use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ storage::{ - try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, - Scratchpad, SpendAddress, + try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, Scratchpad, SpendAddress }, NetworkAddress, PrettyPrintRecordKey, }; use sn_registers::SignedRegister; -use sn_transfers::{ - calculate_royalties_fee, CashNote, CashNoteRedemption, HotWallet, NanoTokens, Payment, - SignedSpend, Transfer, TransferError, UniquePubkey, WalletError, NETWORK_ROYALTIES_PK, -}; +use sn_transfers::{SignedSpend, TransferError, UniquePubkey, QUOTE_EXPIRATION_SECS}; use std::collections::BTreeSet; +use std::time::{Duration, UNIX_EPOCH}; use tokio::task::JoinSet; use xor_name::XorName; impl Node { - /// Validate a record and it's payment, and store the record to the RecordStore + /// Validate a record and its payment, and store the record to the RecordStore pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { let record_header = RecordHeader::from_record(&record)?; match record_header.kind { RecordKind::ChunkWithPayment => { let record_key = record.key.clone(); - let (payment, chunk) = try_deserialize_record::<(Payment, Chunk)>(&record)?; + let (payment, chunk) = try_deserialize_record::<(ProofOfPayment, Chunk)>(&record)?; let already_exists = self .validate_key_and_existence(&chunk.network_address(), &record_key) .await?; @@ -97,7 +95,7 @@ impl Node { RecordKind::ScratchpadWithPayment => { let record_key = record.key.clone(); let (payment, scratchpad) = - try_deserialize_record::<(Payment, Scratchpad)>(&record)?; + try_deserialize_record::<(ProofOfPayment, Scratchpad)>(&record)?; let _already_exists = self .validate_key_and_existence(&scratchpad.network_address(), &record_key) .await?; @@ -209,7 +207,7 @@ impl Node { } RecordKind::RegisterWithPayment => { let (payment, register) = - try_deserialize_record::<(Payment, SignedRegister)>(&record)?; + try_deserialize_record::<(ProofOfPayment, SignedRegister)>(&record)?; // check if the deserialized value's RegisterAddress matches the record's key let net_addr = NetworkAddress::from_register_address(*register.address()); @@ -573,160 +571,68 @@ impl Node { Ok(()) } - /// Gets CashNotes out of Transfers, this includes network verifications of the Transfers - /// Rewraps the royalties transfers into encrypted Transfers ready to be sent directly to the beneficiary - async fn cash_notes_from_transfers( - &self, - transfers: Vec, - wallet: &HotWallet, - pretty_key: PrettyPrintRecordKey<'static>, - ) -> Result<(NanoTokens, Vec, Vec)> { - let royalties_pk = *NETWORK_ROYALTIES_PK; - let mut cash_notes = vec![]; - let mut royalties_cash_notes_r = vec![]; - let mut received_fee = NanoTokens::zero(); - - for transfer in transfers { - match transfer { - Transfer::Encrypted(_) => match self - .network() - .verify_and_unpack_transfer(&transfer, wallet) - .await - { - // transfer not for us - Err(NetworkError::Wallet(WalletError::FailedToDecypherTransfer)) => continue, - // transfer invalid - Err(e) => return Err(e.into()), - // transfer ok, add to cash_notes and continue as more transfers might be ours - Ok(cns) => cash_notes.extend(cns), - }, - Transfer::NetworkRoyalties(cashnote_redemptions) => { - match self - .network() - .verify_cash_notes_redemptions(royalties_pk, &cashnote_redemptions) - .await - { - Ok(cash_notes) => { - let received_royalties = total_cash_notes_amount(&cash_notes)?; - debug!( - "{} network royalties payment cash notes found for record {pretty_key} for a total value of {received_royalties:?}", - cash_notes.len() - ); - royalties_cash_notes_r.extend(cashnote_redemptions); - received_fee = received_fee - .checked_add(received_royalties) - .ok_or_else(|| Error::NumericOverflow)?; - } - Err(e) => { - warn!( - "Invalid network royalties payment for record {pretty_key}: {e:?}" - ); - } - } - } - } - } - - if cash_notes.is_empty() { - Err(Error::NoPaymentToOurNode(pretty_key)) - } else { - let received_fee_to_our_node = total_cash_notes_amount(&cash_notes)?; - info!( - "{} cash note/s (for a total of {received_fee_to_our_node:?}) are for us for {pretty_key}", - cash_notes.len() - ); - received_fee = received_fee - .checked_add(received_fee_to_our_node) - .ok_or_else(|| Error::NumericOverflow)?; - - Ok((received_fee, cash_notes, royalties_cash_notes_r)) - } - } - /// Perform validations on the provided `Record`. async fn payment_for_us_exists_and_is_still_valid( &self, address: &NetworkAddress, - payment: Payment, + payment: ProofOfPayment, ) -> Result<()> { let key = address.to_record_key(); let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); debug!("Validating record payment for {pretty_key}"); - // load wallet - let mut wallet = HotWallet::load_from(self.network().root_dir_path())?; - let old_balance = wallet.balance().as_nano(); - - // unpack transfer - debug!("Unpacking incoming Transfers for record {pretty_key}"); - let (received_fee, mut cash_notes, royalties_cash_notes_r) = self - .cash_notes_from_transfers(payment.transfers, &wallet, pretty_key.clone()) - .await?; - - // check for cash notes that we have already spent - // this can happen in cases where the client retries a failed PUT after we have already used the cash note - cash_notes.retain(|cash_note| { - let already_present = wallet.cash_note_presents(&cash_note.unique_pubkey()); - if already_present { - return !already_present; - } - - let spend_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - !wallet.has_confirmed_spend(spend_addr) - }); - if cash_notes.is_empty() { - info!("All incoming cash notes were already received, no need to further process"); - return Err(Error::ReusedPayment); + // check if the quote is valid + let storecost = payment.quote.cost; + let self_peer_id = self.network().peer_id(); + if !payment.quote.check_is_signed_by_claimed_peer(self_peer_id) { + warn!("Payment quote signature is not valid for record {pretty_key}"); + return Err(Error::InvalidRequest(format!( + "Payment quote signature is not valid for record {pretty_key}" + ))); } - - debug!("Received payment of {received_fee:?} for {pretty_key}"); + debug!("Payment quote signature is valid for record {pretty_key}"); + + // verify quote timestamp + let quote_timestamp = payment.quote.timestamp; + let quote_expiration_time = quote_timestamp + Duration::from_secs(QUOTE_EXPIRATION_SECS); + let quote_expiration_time_in_secs = quote_expiration_time + .duration_since(UNIX_EPOCH) + .map_err(|e| { + Error::InvalidRequest(format!( + "Payment quote timestamp is invalid for record {pretty_key}: {e}" + )) + })? + .as_secs(); + + // check if payment is valid on chain + debug!("Verifying payment for record {pretty_key}"); + self.evm_network() + .verify_chunk_payment( + payment.tx_hash, + payment.quote.hash(), + *self.reward_address(), + storecost.as_atto(), + quote_expiration_time_in_secs, + ) + .await + .map_err(|e| Error::EvmNetwork(format!("Failed to verify chunk payment: {e}")))?; + debug!("Payment is valid for record {pretty_key}"); // Notify `record_store` that the node received a payment. self.network().notify_payment_received(); - // deposit the CashNotes in our wallet - wallet.deposit_and_store_to_disk(&cash_notes)?; - let new_balance = wallet.balance().as_nano(); - info!( - "The new wallet balance is {new_balance}, after earning {}", - new_balance - old_balance - ); - #[cfg(feature = "open-metrics")] if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics + let _prev = node_metrics .current_reward_wallet_balance - .set(new_balance as i64); - } - - if royalties_cash_notes_r.is_empty() { - warn!("No network royalties payment found for record {pretty_key}"); - return Err(Error::NoNetworkRoyaltiesPayment(pretty_key.into_owned())); + .inc_by(storecost.as_atto().try_into().unwrap_or(i64::MAX)); // TODO maybe metrics should be in u256 too? } + self.events_channel() + .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); - // check if the quote is valid - let storecost = payment.quote.cost; - verify_quote_for_storecost(self.network(), payment.quote, address)?; - debug!("Payment quote valid for record {pretty_key}"); - - // Let's check payment is sufficient both for our store cost and for network royalties - // Since the storage payment is made to a single node, we can calculate the royalties fee based on that single payment. - let expected_royalties_fee = calculate_royalties_fee(storecost); - let expected_fee = storecost - .checked_add(expected_royalties_fee) - .ok_or(Error::NumericOverflow)?; - - // finally, (after we accept any payments to us as they are ours now anyway) - // lets check they actually paid enough - if received_fee < expected_fee { - debug!("Payment insufficient for record {pretty_key}. {received_fee:?} is less than {expected_fee:?}"); - return Err(Error::PaymentProofInsufficientAmount { - paid: received_fee, - expected: expected_fee, - }); - } + // NB TODO: tell happybeing about the AttoToken change // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): - info!("Total payment of {received_fee:?} nanos accepted for record {pretty_key}"); + info!("Total payment of {storecost:?} nanos accepted for record {pretty_key}"); Ok(()) } @@ -1004,19 +910,3 @@ impl Node { } } } - -// Helper to calculate total amout of tokens received in a given set of CashNotes -fn total_cash_notes_amount<'a, I>(cash_notes: I) -> Result -where - I: IntoIterator, -{ - let mut received_fee = NanoTokens::zero(); - for cash_note in cash_notes { - let amount = cash_note.value(); - received_fee = received_fee - .checked_add(amount) - .ok_or(Error::NumericOverflow)?; - } - - Ok(received_fee) -} diff --git a/sn_node/src/quote.rs b/sn_node/src/quote.rs index 2020a2995d..42079b1d0c 100644 --- a/sn_node/src/quote.rs +++ b/sn_node/src/quote.rs @@ -8,21 +8,28 @@ use crate::{node::Node, Error, Result}; use libp2p::PeerId; +use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics, RewardsAddress}; use sn_networking::{calculate_cost_for_records, Network, NodeIssue}; use sn_protocol::{error::Error as ProtocolError, storage::ChunkAddress, NetworkAddress}; -use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::time::Duration; impl Node { pub(crate) fn create_quote_for_storecost( network: &Network, - cost: NanoTokens, + cost: AttoTokens, address: &NetworkAddress, quoting_metrics: &QuotingMetrics, + payment_address: &RewardsAddress, ) -> Result { let content = address.as_xorname().unwrap_or_default(); let timestamp = std::time::SystemTime::now(); - let bytes = PaymentQuote::bytes_for_signing(content, cost, timestamp, quoting_metrics); + let bytes = PaymentQuote::bytes_for_signing( + content, + cost, + timestamp, + quoting_metrics, + payment_address, + ); let Ok(signature) = network.sign(&bytes) else { return Err(ProtocolError::QuoteGenerationFailed); @@ -34,6 +41,7 @@ impl Node { timestamp, quoting_metrics: quoting_metrics.clone(), pub_key: network.get_pub_key(), + rewards_address: *payment_address, signature, }; @@ -60,12 +68,7 @@ pub(crate) fn verify_quote_for_storecost( } // check sig - let bytes = PaymentQuote::bytes_for_signing( - quote.content, - quote.cost, - quote.timestamp, - "e.quoting_metrics, - ); + let bytes = quote.bytes_for_sig(); let signature = quote.signature; if !network.verify(&bytes, &signature) { return Err(Error::InvalidQuoteSignature); @@ -96,7 +99,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, .filter(|(peer_id, quote)| { let is_same_target = quote.content == self_quote.content; let is_not_self = *peer_id != network.peer_id(); - let is_not_zero_quote = quote.cost != NanoTokens::zero(); + let is_not_zero_quote = quote.cost != AttoTokens::zero(); let time_gap = Duration::from_secs(10); let is_around_same_time = if quote.timestamp > self_quote.timestamp { @@ -119,7 +122,7 @@ pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, quotes_for_nodes_duty.retain(|(peer_id, quote)| { let cost = calculate_cost_for_records(quote.quoting_metrics.close_records_stored); - let is_same_as_expected = quote.cost == NanoTokens::from(cost); + let is_same_as_expected = quote.cost == AttoTokens::from_u64(cost); if !is_same_as_expected { info!("Quote from {peer_id:?} using a different quoting_metrics to achieve the claimed cost. Quote {quote:?} can only result in cost {cost:?}"); diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index baba07c851..36626b920d 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -23,8 +23,7 @@ use sn_protocol::{ NetworkAddress, }; use sn_registers::Permissions; -use sn_transfers::HotWallet; -use sn_transfers::{CashNote, MainSecretKey, NanoTokens}; +use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; use std::{ collections::{BTreeMap, VecDeque}, fmt, diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 1352a24659..8d06a87187 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -1,683 +1,683 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use assert_matches::assert_matches; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::{bail, Result}; -use itertools::Itertools; -use sn_logging::LogBuilder; -use sn_networking::NetworkError; -use sn_transfers::{ - get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, - WalletError, GENESIS_CASHNOTE, -}; -use std::time::Duration; -use tracing::*; - -#[tokio::test] -async fn cash_note_transfer_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - // create 1 wallet add money from faucet - let first_wallet_dir = TempDir::new()?; - - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - // create wallet 2 and 3 to receive money from 1 - let second_wallet_dir = TempDir::new()?; - let second_wallet = get_wallet(second_wallet_dir.path()); - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - let third_wallet_dir = TempDir::new()?; - let third_wallet = get_wallet(third_wallet_dir.path()); - assert_eq!(third_wallet.balance(), NanoTokens::zero()); - - // manually forge two transfers of the same source - let amount = NanoTokens::from(first_wallet_balance / 3); - let to1 = first_wallet.address(); - let to2 = second_wallet.address(); - let to3 = third_wallet.address(); - - let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; - let same_cash_notes = some_cash_notes.clone(); - - let mut rng = rng::thread_rng(); - - let reason = SpendReason::default(); - let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); - - let transfer_to_2 = SignedTransaction::new( - some_cash_notes, - vec![to2_unique_key], - to1, - reason.clone(), - first_wallet.key(), - )?; - let transfer_to_3 = SignedTransaction::new( - same_cash_notes, - vec![to3_unique_key], - to1, - reason, - first_wallet.key(), - )?; - - // send both transfers to the network - // upload won't error out, only error out during verification. - info!("Sending both transfers to the network..."); - let res = client.send_spends(transfer_to_2.spends.iter(), false).await; - assert!(res.is_ok()); - let res = client.send_spends(transfer_to_3.spends.iter(), false).await; - assert!(res.is_ok()); - - // we wait 5s to ensure that the double spend attempt is detected and accumulated - info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); - tokio::time::sleep(Duration::from_secs(10)).await; - - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - - // check the CashNotes, it should fail - let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; - let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; - info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); - assert!(should_err1.is_err() && should_err2.is_err()); - assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - Ok(()) -} - -#[tokio::test] -async fn genesis_double_spend_fail() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - - // create a client and an unused wallet to make sure some money already exists in the system - let first_wallet_dir = TempDir::new()?; - let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_addr = first_wallet.address(); - - // create a new genesis wallet with the intention to spend genesis again - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; - second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; - let genesis_amount = GENESIS_CASHNOTE.value(); - let second_wallet_addr = second_wallet.address(); - - // create a transfer from the second wallet to the first wallet - // this will spend Genesis (again) and transfer its value to the first wallet - let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; - let mut rng = rng::thread_rng(); - let recipient = ( - genesis_amount, - first_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let change_addr = second_wallet_addr; - let reason = SpendReason::default(); - let transfer = SignedTransaction::new( - genesis_cashnote, - vec![recipient], - change_addr, - reason, - second_wallet.key(), - )?; - - // send the transfer to the network which will mark genesis as a double spent - // making its direct descendants unspendable - let res = client.send_spends(transfer.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert!(res.is_ok()); - - // put the bad cashnote in the first wallet - first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; - - // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) - let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; - let recipient = ( - genesis_amount, - second_wallet_addr, - DerivationIndex::random(&mut rng), - false, - ); - let bad_genesis_descendant = genesis_cashnote_and_others - .iter() - .find(|cn| cn.value() == genesis_amount) - .unwrap() - .clone(); - let change_addr = first_wallet_addr; - let reason = SpendReason::default(); - let transfer2 = SignedTransaction::new( - vec![bad_genesis_descendant], - vec![recipient], - change_addr, - reason, - first_wallet.key(), - )?; - - // send the transfer to the network which should reject it - let res = client.send_spends(transfer2.spends.iter(), false).await; - std::mem::drop(exclusive_access); - assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); - - Ok(()) -} - -#[tokio::test] -async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_1 = TempDir::new()?; - - let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; - let balance_1 = wallet_1.balance().as_nano(); - let amount = NanoTokens::from(balance_1 / 2); - let to1 = wallet_1.address(); - - // Send from 1 -> 2 - let wallet_dir_2 = TempDir::new()?; - let mut wallet_2 = get_wallet(wallet_dir_2.path()); - assert_eq!(wallet_2.balance(), NanoTokens::zero()); - - let to2 = wallet_2.address(); - let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; - let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); - let transfer_to_2 = SignedTransaction::new( - cash_notes_1.clone(), - vec![to_2_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; - - info!("Sending 1->2 to the network..."); - client - .send_spends(transfer_to_2.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 1 -> 2 wallet..."); - let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_2[0]).await?; - wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 - - // Send from 2 -> 22 - let wallet_dir_22 = TempDir::new()?; - let mut wallet_22 = get_wallet(wallet_dir_22.path()); - assert_eq!(wallet_22.balance(), NanoTokens::zero()); - - let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; - assert!(!cash_notes_2.is_empty()); - let to_22_unique_key = ( - wallet_2.balance(), - wallet_22.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_22 = SignedTransaction::new( - cash_notes_2, - vec![to_22_unique_key], - to2, - reason.clone(), - wallet_2.key(), - )?; - - client - .send_spends(transfer_to_22.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 2 -> 22 wallet..."); - let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_22[0]).await?; - wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 - - // Try to double spend from 1 -> 3 - let wallet_dir_3 = TempDir::new()?; - let wallet_3 = get_wallet(wallet_dir_3.path()); - assert_eq!(wallet_3.balance(), NanoTokens::zero()); - - let to_3_unique_key = ( - amount, - wallet_3.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_3 = SignedTransaction::new( - cash_notes_1, - vec![to_3_unique_key], - to1, - reason.clone(), - wallet_1.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_3.spends.iter(), false) - .await?; - info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); - let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned - info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned - - // The old spend has been poisoned, but spends from 22 -> 222 should still work - let wallet_dir_222 = TempDir::new()?; - let wallet_222 = get_wallet(wallet_dir_222.path()); - assert_eq!(wallet_222.balance(), NanoTokens::zero()); - - let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; - assert!(!cash_notes_22.is_empty()); - let to_222_unique_key = ( - wallet_22.balance(), - wallet_222.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_222 = SignedTransaction::new( - cash_notes_22, - vec![to_222_unique_key], - wallet_22.address(), - reason, - wallet_22.key(), - )?; - client - .send_spends(transfer_to_222.spends.iter(), false) - .await?; - - info!("Verifying the transfers from 22 -> 222 wallet..."); - let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_222[0]).await?; - - // finally assert that we have a double spend attempt error here - // we wait 1s to ensure that the double spend attempt is detected and accumulated - tokio::time::sleep(Duration::from_secs(5)).await; - - match client.verify_cashnote(&cash_notes_for_2[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - - match client.verify_cashnote(&cash_notes_for_3[0]).await { - Ok(_) => bail!("Cashnote verification should have failed"), - Err(e) => { - assert!( - e.to_string() - .contains("Network Error Double spend(s) attempt was detected"), - "error should reflect double spend attempt", - ); - } - } - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C -/// being invalid. -async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - info!("spend B to C: {:?}", transfer_to_c.spends); - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a, - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(10)).await; - - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); // poisoned - - // Try to double spend from B -> Y - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_b, - vec![to_y_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; // reuse the old cash notes - - info!("spend B to Y: {:?}", transfer_to_y.spends); - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); - let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; - info!("B spends: {b_spends:?}"); - - info!("Verifying the transfers from B -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(30)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from B -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of A -> B"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - info!("Got result while verifying the original spend from A -> B: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - info!("Verifying the original cashnote of B -> C"); - let result = client.verify_cashnote(&cash_notes_for_c[0]).await; - info!("Got result while verifying the original spend from B -> C: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - let result = client.verify_cashnote(&cash_notes_for_b[0]).await; - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }, "result should be verify error, it was {result:?}"); - - Ok(()) -} - -#[tokio::test] -/// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over -/// should not lead to the original A disappearing and B becoming orphan -async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); - let mut rng = rng::thread_rng(); - let reason = SpendReason::default(); - // create 1 wallet add money from faucet - let wallet_dir_a = TempDir::new()?; - - let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; - let balance_a = wallet_a.balance().as_nano(); - let amount = NanoTokens::from(balance_a / 2); - - // Send from A -> B - let wallet_dir_b = TempDir::new()?; - let mut wallet_b = get_wallet(wallet_dir_b.path()); - assert_eq!(wallet_b.balance(), NanoTokens::zero()); - - let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; - let to_b_unique_key = ( - amount, - wallet_b.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_b = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_b_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; - - info!("Sending A->B to the network..."); - client - .send_spends(transfer_to_b.spends.iter(), false) - .await?; - - // save original A spend - let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); - let original_a_spend = if let [spend] = vec_of_spends.as_slice() { - spend - } else { - panic!("Expected to have one spend here!"); - }; - - info!("Verifying the transfers from A -> B wallet..."); - let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_b[0]).await?; - wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B - - // Send from B -> C - let wallet_dir_c = TempDir::new()?; - let mut wallet_c = get_wallet(wallet_dir_c.path()); - assert_eq!(wallet_c.balance(), NanoTokens::zero()); - - let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; - assert!(!cash_notes_b.is_empty()); - let to_c_unique_key = ( - wallet_b.balance(), - wallet_c.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_c = SignedTransaction::new( - cash_notes_b.clone(), - vec![to_c_unique_key], - wallet_b.address(), - reason.clone(), - wallet_b.key(), - )?; - - client - .send_spends(transfer_to_c.spends.iter(), false) - .await?; - - info!("Verifying the transfers from B -> C wallet..."); - let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); - client.verify_cashnote(&cash_notes_for_c[0]).await?; - wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c - - // Try to double spend from A -> X - let wallet_dir_x = TempDir::new()?; - let wallet_x = get_wallet(wallet_dir_x.path()); - assert_eq!(wallet_x.balance(), NanoTokens::zero()); - - let to_x_unique_key = ( - amount, - wallet_x.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_x = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_x_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_x.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> X wallet... It should error out."); - let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_secs(15)).await; - - let result = client.verify_cashnote(&cash_notes_for_x[0]).await; - info!("Got result while verifying double spend from A -> X: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - - // Try to double spend A -> n different random keys - for _ in 0..20 { - info!("Spamming double spends on A"); - let wallet_dir_y = TempDir::new()?; - let wallet_y = get_wallet(wallet_dir_y.path()); - assert_eq!(wallet_y.balance(), NanoTokens::zero()); - - let to_y_unique_key = ( - amount, - wallet_y.address(), - DerivationIndex::random(&mut rng), - false, - ); - let transfer_to_y = SignedTransaction::new( - cash_notes_a.clone(), - vec![to_y_unique_key], - wallet_a.address(), - reason.clone(), - wallet_a.key(), - )?; // reuse the old cash notes - client - .send_spends(transfer_to_y.spends.iter(), false) - .await?; - info!("Verifying the transfers from A -> Y wallet... It should error out."); - let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); - - // sleep for a bit to allow the network to process and accumulate the double spend - tokio::time::sleep(Duration::from_millis(500)).await; - - let result = client.verify_cashnote(&cash_notes_for_y[0]).await; - info!("Got result while verifying double spend from A -> Y: {result:?}"); - assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); - }); - - // the original A should still be present as one of the double spends - let res = client - .get_spend_from_network(original_a_spend.address()) - .await; - assert_matches!( - res, - Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( - _ - ))) - ); - if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { - assert!(spends.iter().contains(original_a_spend)) - } - } - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use assert_matches::assert_matches; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::{bail, Result}; +// use itertools::Itertools; +// use sn_transfers::{ +// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, +// SpendReason, WalletError, GENESIS_CASHNOTE, +// }; +// use sn_logging::LogBuilder; +// use sn_networking::NetworkError; +// use std::time::Duration; +// use tracing::*; + +// #[tokio::test] +// async fn cash_note_transfer_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// // create 1 wallet add money from faucet +// let first_wallet_dir = TempDir::new()?; + +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance = first_wallet.balance().as_nano(); + +// // create wallet 2 and 3 to receive money from 1 +// let second_wallet_dir = TempDir::new()?; +// let second_wallet = get_wallet(second_wallet_dir.path()); +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); +// let third_wallet_dir = TempDir::new()?; +// let third_wallet = get_wallet(third_wallet_dir.path()); +// assert_eq!(third_wallet.balance(), NanoTokens::zero()); + +// // manually forge two transfers of the same source +// let amount = first_wallet_balance / 3; +// let to1 = first_wallet.address(); +// let to2 = second_wallet.address(); +// let to3 = third_wallet.address(); + +// let (some_cash_notes, _exclusive_access) = first_wallet.available_cash_notes()?; +// let same_cash_notes = some_cash_notes.clone(); + +// let mut rng = rng::thread_rng(); + +// let reason = SpendReason::default(); +// let to2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let to3_unique_key = (amount, to3, DerivationIndex::random(&mut rng), false); + +// let transfer_to_2 = SignedTransaction::new( +// some_cash_notes, +// vec![to2_unique_key], +// to1, +// reason.clone(), +// first_wallet.key(), +// )?; +// let transfer_to_3 = SignedTransaction::new( +// same_cash_notes, +// vec![to3_unique_key], +// to1, +// reason, +// first_wallet.key(), +// )?; + +// // send both transfers to the network +// // upload won't error out, only error out during verification. +// info!("Sending both transfers to the network..."); +// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; +// assert!(res.is_ok()); +// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; +// assert!(res.is_ok()); + +// // we wait 5s to ensure that the double spend attempt is detected and accumulated +// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); +// tokio::time::sleep(Duration::from_secs(10)).await; + +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); + +// // check the CashNotes, it should fail +// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// assert!(should_err1.is_err() && should_err2.is_err()); +// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); +// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// Ok(()) +// } + +// #[tokio::test] +// async fn genesis_double_spend_fail() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); + +// // create a client and an unused wallet to make sure some money already exists in the system +// let first_wallet_dir = TempDir::new()?; +// let (client, mut first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_addr = first_wallet.address(); + +// // create a new genesis wallet with the intention to spend genesis again +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?; +// second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?; +// let genesis_amount = GENESIS_CASHNOTE.value(); +// let second_wallet_addr = second_wallet.address(); + +// // create a transfer from the second wallet to the first wallet +// // this will spend Genesis (again) and transfer its value to the first wallet +// let (genesis_cashnote, exclusive_access) = second_wallet.available_cash_notes()?; +// let mut rng = rng::thread_rng(); +// let recipient = ( +// genesis_amount, +// first_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let change_addr = second_wallet_addr; +// let reason = SpendReason::default(); +// let transfer = SignedTransaction::new( +// genesis_cashnote, +// vec![recipient], +// change_addr, +// reason, +// second_wallet.key(), +// )?; + +// // send the transfer to the network which will mark genesis as a double spent +// // making its direct descendants unspendable +// let res = client.send_spends(transfer.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert!(res.is_ok()); + +// // put the bad cashnote in the first wallet +// first_wallet.deposit_and_store_to_disk(&transfer.output_cashnotes)?; + +// // now try to spend this illegitimate cashnote (direct descendant of double spent genesis) +// let (genesis_cashnote_and_others, exclusive_access) = first_wallet.available_cash_notes()?; +// let recipient = ( +// genesis_amount, +// second_wallet_addr, +// DerivationIndex::random(&mut rng), +// false, +// ); +// let bad_genesis_descendant = genesis_cashnote_and_others +// .iter() +// .find(|cn| cn.value() == genesis_amount) +// .unwrap() +// .clone(); +// let change_addr = first_wallet_addr; +// let reason = SpendReason::default(); +// let transfer2 = SignedTransaction::new( +// vec![bad_genesis_descendant], +// vec![recipient], +// change_addr, +// reason, +// first_wallet.key(), +// )?; + +// // send the transfer to the network which should reject it +// let res = client.send_spends(transfer2.spends.iter(), false).await; +// std::mem::drop(exclusive_access); +// assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_1 = TempDir::new()?; + +// let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; +// let balance_1 = wallet_1.balance(); +// let amount = balance_1 / 2; +// let to1 = wallet_1.address(); + +// // Send from 1 -> 2 +// let wallet_dir_2 = TempDir::new()?; +// let mut wallet_2 = get_wallet(wallet_dir_2.path()); +// assert_eq!(wallet_2.balance(), NanoTokens::zero()); + +// let to2 = wallet_2.address(); +// let (cash_notes_1, _exclusive_access) = wallet_1.available_cash_notes()?; +// let to_2_unique_key = (amount, to2, DerivationIndex::random(&mut rng), false); +// let transfer_to_2 = SignedTransaction::new( +// cash_notes_1.clone(), +// vec![to_2_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; + +// info!("Sending 1->2 to the network..."); +// client +// .send_spends(transfer_to_2.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 1 -> 2 wallet..."); +// let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_2[0]).await?; +// wallet_2.deposit_and_store_to_disk(&cash_notes_for_2)?; // store inside 2 + +// // Send from 2 -> 22 +// let wallet_dir_22 = TempDir::new()?; +// let mut wallet_22 = get_wallet(wallet_dir_22.path()); +// assert_eq!(wallet_22.balance(), NanoTokens::zero()); + +// let (cash_notes_2, _exclusive_access) = wallet_2.available_cash_notes()?; +// assert!(!cash_notes_2.is_empty()); +// let to_22_unique_key = ( +// wallet_2.balance(), +// wallet_22.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_22 = SignedTransaction::new( +// cash_notes_2, +// vec![to_22_unique_key], +// to2, +// reason.clone(), +// wallet_2.key(), +// )?; + +// client +// .send_spends(transfer_to_22.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 2 -> 22 wallet..."); +// let cash_notes_for_22: Vec<_> = transfer_to_22.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_22[0]).await?; +// wallet_22.deposit_and_store_to_disk(&cash_notes_for_22)?; // store inside 22 + +// // Try to double spend from 1 -> 3 +// let wallet_dir_3 = TempDir::new()?; +// let wallet_3 = get_wallet(wallet_dir_3.path()); +// assert_eq!(wallet_3.balance(), NanoTokens::zero()); + +// let to_3_unique_key = ( +// amount, +// wallet_3.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_3 = SignedTransaction::new( +// cash_notes_1, +// vec![to_3_unique_key], +// to1, +// reason.clone(), +// wallet_1.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_3.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); +// let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); +// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned +// info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); +// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// // The old spend has been poisoned, but spends from 22 -> 222 should still work +// let wallet_dir_222 = TempDir::new()?; +// let wallet_222 = get_wallet(wallet_dir_222.path()); +// assert_eq!(wallet_222.balance(), NanoTokens::zero()); + +// let (cash_notes_22, _exclusive_access) = wallet_22.available_cash_notes()?; +// assert!(!cash_notes_22.is_empty()); +// let to_222_unique_key = ( +// wallet_22.balance(), +// wallet_222.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_222 = SignedTransaction::new( +// cash_notes_22, +// vec![to_222_unique_key], +// wallet_22.address(), +// reason, +// wallet_22.key(), +// )?; +// client +// .send_spends(transfer_to_222.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from 22 -> 222 wallet..."); +// let cash_notes_for_222: Vec<_> = transfer_to_222.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_222[0]).await?; + +// // finally assert that we have a double spend attempt error here +// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// tokio::time::sleep(Duration::from_secs(5)).await; + +// match client.verify_cashnote(&cash_notes_for_2[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } + +// match client.verify_cashnote(&cash_notes_for_3[0]).await { +// Ok(_) => bail!("Cashnote verification should have failed"), +// Err(e) => { +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", +// ); +// } +// } +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, then double spending A and then double spending B should lead to C +// /// being invalid. +// async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// info!("spend B to C: {:?}", transfer_to_c.spends); +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a, +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(10)).await; + +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // poisoned + +// // Try to double spend from B -> Y +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_b, +// vec![to_y_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; // reuse the old cash notes + +// info!("spend B to Y: {:?}", transfer_to_y.spends); +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// let spend_b_to_y = transfer_to_y.spends.first().expect("should have one"); +// let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; +// info!("B spends: {b_spends:?}"); + +// info!("Verifying the transfers from B -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(30)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from B -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of A -> B"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// info!("Got result while verifying the original spend from A -> B: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// info!("Verifying the original cashnote of B -> C"); +// let result = client.verify_cashnote(&cash_notes_for_c[0]).await; +// info!("Got result while verifying the original spend from B -> C: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); +// let result = client.verify_cashnote(&cash_notes_for_b[0]).await; +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); + +// Ok(()) +// } + +// #[tokio::test] +// /// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over +// /// should not lead to the original A disappearing and B becoming orphan +// async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let mut rng = rng::thread_rng(); +// let reason = SpendReason::default(); +// // create 1 wallet add money from faucet +// let wallet_dir_a = TempDir::new()?; + +// let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; +// let balance_a = wallet_a.balance(); +// let amount = balance_a / 2; + +// // Send from A -> B +// let wallet_dir_b = TempDir::new()?; +// let mut wallet_b = get_wallet(wallet_dir_b.path()); +// assert_eq!(wallet_b.balance(), NanoTokens::zero()); + +// let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; +// let to_b_unique_key = ( +// amount, +// wallet_b.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_b = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_b_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; + +// info!("Sending A->B to the network..."); +// client +// .send_spends(transfer_to_b.spends.iter(), false) +// .await?; + +// // save original A spend +// let vec_of_spends = transfer_to_b.spends.into_iter().collect::>(); +// let original_a_spend = if let [spend] = vec_of_spends.as_slice() { +// spend +// } else { +// panic!("Expected to have one spend here!"); +// }; + +// info!("Verifying the transfers from A -> B wallet..."); +// let cash_notes_for_b: Vec<_> = transfer_to_b.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_b[0]).await?; +// wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + +// // Send from B -> C +// let wallet_dir_c = TempDir::new()?; +// let mut wallet_c = get_wallet(wallet_dir_c.path()); +// assert_eq!(wallet_c.balance(), NanoTokens::zero()); + +// let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; +// assert!(!cash_notes_b.is_empty()); +// let to_c_unique_key = ( +// wallet_b.balance(), +// wallet_c.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_c = SignedTransaction::new( +// cash_notes_b.clone(), +// vec![to_c_unique_key], +// wallet_b.address(), +// reason.clone(), +// wallet_b.key(), +// )?; + +// client +// .send_spends(transfer_to_c.spends.iter(), false) +// .await?; + +// info!("Verifying the transfers from B -> C wallet..."); +// let cash_notes_for_c: Vec<_> = transfer_to_c.output_cashnotes.clone(); +// client.verify_cashnote(&cash_notes_for_c[0]).await?; +// wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + +// // Try to double spend from A -> X +// let wallet_dir_x = TempDir::new()?; +// let wallet_x = get_wallet(wallet_dir_x.path()); +// assert_eq!(wallet_x.balance(), NanoTokens::zero()); + +// let to_x_unique_key = ( +// amount, +// wallet_x.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_x = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_x_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_x.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> X wallet... It should error out."); +// let cash_notes_for_x: Vec<_> = transfer_to_x.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(15)).await; + +// let result = client.verify_cashnote(&cash_notes_for_x[0]).await; +// info!("Got result while verifying double spend from A -> X: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } + +// // Try to double spend A -> n different random keys +// for _ in 0..20 { +// info!("Spamming double spends on A"); +// let wallet_dir_y = TempDir::new()?; +// let wallet_y = get_wallet(wallet_dir_y.path()); +// assert_eq!(wallet_y.balance(), NanoTokens::zero()); + +// let to_y_unique_key = ( +// amount, +// wallet_y.address(), +// DerivationIndex::random(&mut rng), +// false, +// ); +// let transfer_to_y = SignedTransaction::new( +// cash_notes_a.clone(), +// vec![to_y_unique_key], +// wallet_a.address(), +// reason.clone(), +// wallet_a.key(), +// )?; // reuse the old cash notes +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; +// info!("Verifying the transfers from A -> Y wallet... It should error out."); +// let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); + +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_millis(500)).await; + +// let result = client.verify_cashnote(&cash_notes_for_y[0]).await; +// info!("Got result while verifying double spend from A -> Y: {result:?}"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); + +// // the original A should still be present as one of the double spends +// let res = client +// .get_spend_from_network(original_a_spend.address()) +// .await; +// assert_matches!( +// res, +// Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( +// _ +// ))) +// ); +// if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { +// assert!(spends.iter().contains(original_a_spend)) +// } +// } + +// Ok(()) +// } diff --git a/sn_node/tests/sequential_transfers.rs b/sn_node/tests/sequential_transfers.rs index 66d69337c8..d6906e37d1 100644 --- a/sn_node/tests/sequential_transfers.rs +++ b/sn_node/tests/sequential_transfers.rs @@ -1,54 +1,54 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use assert_fs::TempDir; -use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::Result; -use sn_client::send; -use sn_logging::LogBuilder; -use sn_transfers::NanoTokens; -use tracing::info; - -#[tokio::test] -async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); - - let first_wallet_dir = TempDir::new()?; - - let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; - let first_wallet_balance = first_wallet.balance().as_nano(); - - let second_wallet_balance = NanoTokens::from(first_wallet_balance / 2); - info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); - let second_wallet_dir = TempDir::new()?; - let mut second_wallet = get_wallet(second_wallet_dir.path()); - - assert_eq!(second_wallet.balance(), NanoTokens::zero()); - - let tokens = send( - first_wallet, - second_wallet_balance, - second_wallet.address(), - &client, - true, - ) - .await?; - info!("Verifying the transfer from first wallet..."); - - client.verify_cashnote(&tokens).await?; - second_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(second_wallet.balance(), second_wallet_balance); - info!("CashNotes deposited to second wallet: {second_wallet_balance}."); - - let first_wallet = get_wallet(&first_wallet_dir); - assert!(second_wallet_balance.as_nano() == first_wallet.balance().as_nano()); - - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use assert_fs::TempDir; +// use common::client::{get_client_and_funded_wallet, get_wallet}; +// use eyre::Result; +// use sn_client::send; +// use sn_logging::LogBuilder; +// use sn_transfers::NanoTokens; +// use tracing::info; + +// #[tokio::test] +// async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("sequential_transfer", true); + +// let first_wallet_dir = TempDir::new()?; + +// let (client, first_wallet) = get_client_and_funded_wallet(first_wallet_dir.path()).await?; +// let first_wallet_balance:NanoTokens = first_wallet.balance(); + +// let second_wallet_balance = first_wallet_balance / 2; +// info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); +// let second_wallet_dir = TempDir::new()?; +// let mut second_wallet = get_wallet(second_wallet_dir.path()); + +// assert_eq!(second_wallet.balance(), NanoTokens::zero()); + +// let tokens = send( +// first_wallet, +// second_wallet_balance, +// second_wallet.address(), +// &client, +// true, +// ) +// .await?; +// info!("Verifying the transfer from first wallet..."); + +// client.verify_cashnote(&tokens).await?; +// second_wallet.deposit_and_store_to_disk(&vec![tokens])?; +// assert_eq!(second_wallet.balance(), second_wallet_balance); +// info!("CashNotes deposited to second wallet: {second_wallet_balance}."); + +// let first_wallet = get_wallet(&first_wallet_dir); +// assert!(second_wallet_balance.as_atto() == first_wallet.balance().as_atto()); + +// Ok(()) +// } diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 57e63f05b6..6e11295cbd 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -1,399 +1,404 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use crate::common::{client::get_client_and_funded_wallet, random_content}; -use assert_fs::TempDir; -use eyre::{eyre, Result}; -use libp2p::PeerId; -use rand::Rng; -use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_networking::{GetRecordError, NetworkError}; -use sn_protocol::{ - error::Error as ProtocolError, - storage::{ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; -use std::collections::BTreeMap; -use tokio::time::{sleep, Duration}; -use tracing::info; -use xor_name::XorName; - -#[tokio::test] -async fn storage_payment_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let balance_before = paying_wallet.balance(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - info!( - "Paying for {} random addresses...", - random_content_addrs.len() - ); - - let _cost = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - - info!("Verifying balance has been paid from the wallet..."); - - let paying_wallet = wallet_client.into_wallet(); - assert!( - paying_wallet.balance() < balance_before, - "balance should have decreased after payment" - ); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_fails_with_insufficient_money() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - let (files_api, content_bytes, _random_content_addrs, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let subset_len = chunks.len() / 3; - let _storage_cost = wallet_client - .pay_for_storage( - chunks - .clone() - .into_iter() - .take(subset_len) - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), - ) - .await?; - - // now let's request to upload all addresses, even that we've already paid for a subset of them - let verify_store = false; - let res = files_api - .upload_test_bytes(content_bytes.clone(), verify_store) - .await; - assert!( - res.is_err(), - "Should have failed to store as we didnt pay for everything" - ); - Ok(()) -} - -// TODO: reenable -#[ignore = "Currently we do not cache the proofs in the wallet"] -#[tokio::test] -async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir: TempDir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let wallet_original_balance = paying_wallet.balance().as_nano(); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - // generate a random number (between 50 and 100) of random addresses - let mut rng = rand::thread_rng(); - let random_content_addrs = (0..rng.gen_range(50..100)) - .map(|_| { - sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) - }) - .collect::>(); - - // let's first pay only for a subset of the addresses - let subset_len = random_content_addrs.len() / 3; - info!("Paying for {subset_len} random addresses...",); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) - .await?; - - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for the subset of addresses, 1 nano per addr - let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); - info!("Verifying new balance on paying wallet is {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs for the subset have been cached in the wallet - assert!(random_content_addrs - .iter() - .take(subset_len) - .all(|name| paying_wallet - .api() - .get_recent_payment(&name.as_xorname().unwrap()) - .is_ok())); - - // now let's request to pay for all addresses, even that we've already paid for a subset of them - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let storage_payment_result = wallet_client - .pay_for_storage(random_content_addrs.clone().into_iter()) - .await?; - let total_cost = storage_payment_result - .storage_cost - .checked_add(storage_payment_result.royalty_fees) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?; - - // check we've paid only for addresses we haven't previously paid for, 1 nano per addr - let new_balance = NanoTokens::from( - wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), - ); - println!("Verifying new balance on paying wallet is now {new_balance} ..."); - let paying_wallet = wallet_client.into_wallet(); - assert_eq!(paying_wallet.balance(), new_balance); - - // let's verify payment proofs now for all addresses have been cached in the wallet - // assert!(random_content_addrs - // .iter() - // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); - - Ok(()) -} - -#[tokio::test] -async fn storage_payment_chunk_upload_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, _content_bytes, file_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - info!("Paying for {} random addresses...", chunks.len()); - - let _cost = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; - - let mut files_download = FilesDownload::new(files_api); - let _ = files_download.download_file(file_addr, None).await?; - - Ok(()) -} - -#[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] -#[tokio::test] -async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - let chunks_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let (files_api, content_bytes, content_addr, chunks) = - random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - - let mut no_data_payments = BTreeMap::default(); - for (chunk_name, _) in chunks.iter() { - no_data_payments.insert( - *chunk_name, - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), - PeerId::random().to_bytes(), - ), - ); - } - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - sleep(Duration::from_secs(5)).await; - - files_api - .upload_test_bytes(content_bytes.clone(), false) - .await?; +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use crate::common::{client::get_client_and_funded_wallet, random_content}; +// use assert_fs::TempDir; +// use eyre::{eyre, Result}; +// use libp2p::PeerId; +// use rand::Rng; +// use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; +// use sn_evm::{Amount, AttoTokens, PaymentQuote}; +// use sn_logging::LogBuilder; +// use sn_networking::{GetRecordError, NetworkError}; +// use sn_protocol::{ +// error::Error as ProtocolError, +// storage::{ChunkAddress, RegisterAddress}, +// NetworkAddress, +// }; +// use sn_registers::Permissions; +// use std::collections::BTreeMap; +// use tokio::time::{sleep, Duration}; +// use tracing::info; +// use xor_name::XorName; + +// #[tokio::test] +// async fn storage_payment_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let balance_before = paying_wallet.balance(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); +// info!( +// "Paying for {} random addresses...", +// random_content_addrs.len() +// ); + +// let _cost = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; + +// info!("Verifying balance has been paid from the wallet..."); + +// let paying_wallet = wallet_client.into_wallet(); +// assert!( +// paying_wallet.balance() < balance_before, +// "balance should have decreased after payment" +// ); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_fails_with_insufficient_money() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// let (files_api, content_bytes, _random_content_addrs, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let subset_len = chunks.len() / 3; +// let _storage_cost = wallet_client +// .pay_for_storage( +// chunks +// .clone() +// .into_iter() +// .take(subset_len) +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), +// ) +// .await?; + +// // now let's request to upload all addresses, even that we've already paid for a subset of them +// let verify_store = false; +// let res = files_api +// .upload_test_bytes(content_bytes.clone(), verify_store) +// .await; +// assert!( +// res.is_err(), +// "Should have failed to store as we didnt pay for everything" +// ); +// Ok(()) +// } + +// // TODO: reenable +// #[ignore = "Currently we do not cache the proofs in the wallet"] +// #[tokio::test] +// async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir: TempDir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// // generate a random number (between 50 and 100) of random addresses +// let mut rng = rand::thread_rng(); +// let random_content_addrs = (0..rng.gen_range(50..100)) +// .map(|_| { +// sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) +// }) +// .collect::>(); + +// // let's first pay only for a subset of the addresses +// let subset_len = random_content_addrs.len() / 3; +// info!("Paying for {subset_len} random addresses...",); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) +// .await?; + +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for the subset of addresses, 1 nano per addr +// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// info!("Verifying new balance on paying wallet is {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm + +// // let's verify payment proofs for the subset have been cached in the wallet +// assert!(random_content_addrs +// .iter() +// .take(subset_len) +// .all(|name| paying_wallet +// .api() +// .get_recent_payment(&name.as_xorname().unwrap()) +// .is_ok())); + +// // now let's request to pay for all addresses, even that we've already paid for a subset of them +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let storage_payment_result = wallet_client +// .pay_for_storage(random_content_addrs.clone().into_iter()) +// .await?; +// let total_cost = storage_payment_result +// .storage_cost +// .checked_add(storage_payment_result.royalty_fees) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?; + +// // check we've paid only for addresses we haven't previously paid for, 1 nano per addr +// let new_balance = AttoTokens::from_atto( +// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// ); +// println!("Verifying new balance on paying wallet is now {new_balance} ..."); +// let paying_wallet = wallet_client.into_wallet(); +// // TODO adapt to evm +// // assert_eq!(paying_wallet.balance(), new_balance); + +// // let's verify payment proofs now for all addresses have been cached in the wallet +// // assert!(random_content_addrs +// // .iter() +// // .all(|name| paying_wallet.get_payment_unique_pubkeys(name) == transfer_outputs_map.get(name))); + +// Ok(()) +// } + +// #[tokio::test] +// async fn storage_payment_chunk_upload_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, _content_bytes, file_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// info!("Paying for {} random addresses...", chunks.len()); + +// let _cost = wallet_client +// .pay_for_storage( +// chunks +// .iter() +// .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), +// ) +// .await?; + +// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.to_path_buf()); +// uploader.set_show_holders(true); +// uploader.insert_chunk_paths(chunks); +// let _upload_stats = uploader.start_upload().await?; + +// let mut files_download = FilesDownload::new(files_api); +// let _ = files_download.download_file(file_addr, None).await?; + +// Ok(()) +// } + +// #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] +// #[tokio::test] +// async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); + +// let paying_wallet_dir = TempDir::new()?; +// let chunks_dir = TempDir::new()?; + +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let (files_api, content_bytes, content_addr, chunks) = +// random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; + +// let mut no_data_payments = BTreeMap::default(); +// for (chunk_name, _) in chunks.iter() { +// no_data_payments.insert( +// *chunk_name, +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// PeerId::random().to_bytes(), +// ), +// ); +// } + +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; + +// sleep(Duration::from_secs(5)).await; + +// files_api +// .upload_test_bytes(content_bytes.clone(), false) +// .await?; - info!("Reading {content_addr:?} expected to fail"); - let mut files_download = FilesDownload::new(files_api); - assert!( - matches!( - files_download.download_file(content_addr, None).await, - Err(ClientError::Network(NetworkError::GetRecordError( - GetRecordError::RecordNotFound - ))) - ), - "read bytes should fail as we didn't store them" - ); +// info!("Reading {content_addr:?} expected to fail"); +// let mut files_download = FilesDownload::new(files_api); +// assert!( +// matches!( +// files_download.download_file(content_addr, None).await, +// Err(ClientError::Network(NetworkError::GetRecordError( +// GetRecordError::RecordNotFound +// ))) +// ), +// "read bytes should fail as we didn't store them" +// ); - Ok(()) -} +// Ok(()) +// } -#[tokio::test] -async fn storage_payment_register_creation_succeeds() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// #[tokio::test] +// async fn storage_payment_register_creation_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - let paying_wallet_dir = TempDir::new()?; +// let paying_wallet_dir = TempDir::new()?; - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_addr = NetworkAddress::from_register_address(address); - info!("Paying for random Register address {net_addr:?} ..."); +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_addr = NetworkAddress::from_register_address(address); +// info!("Paying for random Register address {net_addr:?} ..."); - let _cost = wallet_client - .pay_for_storage(std::iter::once(net_addr)) - .await?; +// let _cost = wallet_client +// .pay_for_storage(std::iter::once(net_addr)) +// .await?; - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) - .await?; +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// .await?; - println!("Newly created register has {} ops", register.read().len()); +// println!("Newly created register has {} ops", register.read().len()); + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(register.read(), retrieved_reg.read()); + +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; + +// println!( +// "Register has {} ops after first write", +// register.read().len() +// ); + +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); + +// assert_eq!(retrieved_reg.read().len(), 1); + +// for index in 1..10 { +// println!("current index is {index}"); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); + +// register.write(&random_entry)?; +// register.sync(&mut wallet_client, true, None).await?; + +// let retrieved_reg = client.get_register(address).await?; + +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); + +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops.len() +// ); +// println!("current local cached ops length is {}", register.ops.len()); + +// assert_eq!(retrieved_reg.read().len(), register.read().len()); - let retrieved_reg = client.get_register(address).await?; +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); - assert_eq!(register.read(), retrieved_reg.read()); +// println!("Current fetched register is {:?}", retrieved_reg.register); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.register.log_update_history() +// ); - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - - println!( - "Register has {} ops after first write", - register.read().len() - ); +// std::thread::sleep(std::time::Duration::from_millis(1000)); +// } - register.sync(&mut wallet_client, true, None).await?; +// Ok(()) +// } - let retrieved_reg = client.get_register(address).await?; +// #[tokio::test] +// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// let paying_wallet_dir = TempDir::new()?; - assert_eq!(retrieved_reg.read().len(), 1); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - for index in 1..10 { - println!("current index is {index}"); - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - - register.write(&random_entry)?; - register.sync(&mut wallet_client, true, None).await?; +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_address = +// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - let retrieved_reg = client.get_register(address).await?; +// let mut no_data_payments = BTreeMap::default(); +// no_data_payments.insert( +// net_address +// .as_xorname() +// .expect("RegisterAddress should convert to XorName"), +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// vec![], +// ), +// ); - println!( - "current retrieved register entry length is {}", - retrieved_reg.read().len() - ); - println!("current expected entry length is {}", register.read().len()); - - println!( - "current retrieved register ops length is {}", - retrieved_reg.ops.len() - ); - println!("current local cached ops length is {}", register.ops.len()); +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; - assert_eq!(retrieved_reg.read().len(), register.read().len()); +// // this should fail to store as the amount paid is not enough +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// .await?; - assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// client.get_register(address).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); - println!("Current fetched register is {:?}", retrieved_reg.register); - println!( - "Fetched register has update history of {}", - retrieved_reg.register.log_update_history() - ); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// register.write(&random_entry)?; - std::thread::sleep(std::time::Duration::from_millis(1000)); - } +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// register.sync(&mut wallet_client, false, None).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); - Ok(()) -} - -#[tokio::test] -#[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); - - let paying_wallet_dir = TempDir::new()?; - - let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let mut rng = rand::thread_rng(); - let xor_name = XorName::random(&mut rng); - let address = RegisterAddress::new(xor_name, client.signer_pk()); - let net_address = - NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); - - let mut no_data_payments = BTreeMap::default(); - no_data_payments.insert( - net_address - .as_xorname() - .expect("RegisterAddress should convert to XorName"), - ( - MainPubkey::new(bls::SecretKey::random().public_key()), - PaymentQuote::test_dummy(xor_name, NanoTokens::from(0)), - vec![], - ), - ); - - let _ = wallet_client - .mut_wallet() - .local_send_storage_payment(&no_data_payments)?; - - // this should fail to store as the amount paid is not enough - let (mut register, _cost, _royalties_fees) = client - .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) - .await?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - client.get_register(address).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - let random_entry = rng.gen::<[u8; 32]>().to_vec(); - register.write(&random_entry)?; - - sleep(Duration::from_secs(5)).await; - assert!(matches!( - register.sync(&mut wallet_client, false, None).await, - Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address - )); - - Ok(()) -} +// Ok(()) +// } diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 6dfd50bd04..78d5c5b9c4 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -52,13 +52,13 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.9" } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } sn-releases = "0.2.6" sn_transfers = { path = "../sn_transfers", version = "0.19.1" } +sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } tracing = { version = "~0.1.26" } tonic = { version = "0.6.2" } uuid = { version = "1.5.0", features = ["v4"] } -which = "6.0.1" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dependencies] nix = { version = "0.27.1", features = ["fs", "user"] } diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index ed10be31cf..ab0ba5fd03 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -23,12 +23,12 @@ use libp2p::Multiaddr; use mockall::{mock, predicate::*, Sequence}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; +use sn_evm::AttoTokens; use sn_service_management::{auditor::AuditorServiceData, control::ServiceControl}; use sn_service_management::{error::Result as ServiceControlResult, NatDetectionStatus}; use sn_service_management::{ DaemonServiceData, FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -234,7 +234,7 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n pid: None, peer_id: None, owner: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), status: ServiceStatus::Added, safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -896,7 +896,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1494,7 +1494,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1587,7 +1587,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2156,7 +2156,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2250,7 +2250,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2550,7 +2550,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -2644,7 +2644,7 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index b827e3f6a4..e1cf5faf6c 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -6,9 +6,13 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +mod subcommands; + +use crate::subcommands::evm_network::EvmNetworkCommand; use clap::{Parser, Subcommand}; use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; +use sn_evm::RewardsAddress; use sn_logging::{LogBuilder, LogFormat}; use sn_node_manager::{ add_services::config::PortRange, @@ -870,6 +874,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -987,6 +997,12 @@ pub enum LocalSubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] rpc_port: Option, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, + /// Optionally specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: Option, /// Set to skip the network validation process #[clap(long)] skip_validation: bool, @@ -1203,6 +1219,8 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { cmd::local::join( @@ -1221,6 +1239,8 @@ async fn main() -> Result<()> { owner_prefix, peers, rpc_port, + rewards_address, + evm_network.map(|v| v.into()), true, verbosity, ) @@ -1243,6 +1263,8 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network, skip_validation: _, } => { cmd::local::run( @@ -1261,6 +1283,8 @@ async fn main() -> Result<()> { owner, owner_prefix, rpc_port, + rewards_address, + evm_network.map(|v| v.into()), true, verbosity, ) diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs new file mode 100644 index 0000000000..89c39a16f6 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -0,0 +1,41 @@ +use clap::Subcommand; +use sn_evm::{EvmNetwork, EvmNetworkCustom}; + +#[derive(Subcommand, Clone, Debug)] +pub enum EvmNetworkCommand { + /// Use the Arbitrum One network + EvmArbitrumOne, + + /// Use a custom network + EvmCustom { + /// The RPC URL for the custom network + #[arg(long)] + rpc_url: String, + + /// The payment token contract address + #[arg(long, short)] + payment_token_address: String, + + /// The chunk payments contract address + #[arg(long, short)] + chunk_payments_address: String, + }, +} + +#[allow(clippy::from_over_into)] +impl Into for EvmNetworkCommand { + fn into(self) -> EvmNetwork { + match self { + Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmCustom { + rpc_url, + payment_token_address, + chunk_payments_address, + } => EvmNetwork::Custom(EvmNetworkCustom::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )), + } + } +} diff --git a/sn_node_manager/src/bin/cli/subcommands/mod.rs b/sn_node_manager/src/bin/cli/subcommands/mod.rs new file mode 100644 index 0000000000..80b95f1ea5 --- /dev/null +++ b/sn_node_manager/src/bin/cli/subcommands/mod.rs @@ -0,0 +1 @@ +pub mod evm_network; diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 5be4ef15b6..699495d291 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -15,6 +15,7 @@ use crate::{ print_banner, status_report, VerbosityLevel, }; use color_eyre::{eyre::eyre, Help, Report, Result}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -39,6 +40,8 @@ pub async fn join( owner_prefix: Option, peers_args: PeersArgs, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -107,6 +110,8 @@ pub async fn join( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; Ok(()) @@ -145,6 +150,8 @@ pub async fn run( owner: Option, owner_prefix: Option, rpc_port: Option, + rewards_address: RewardsAddress, + evm_network: Option, skip_validation: bool, verbosity: VerbosityLevel, ) -> Result<(), Report> { @@ -219,6 +226,8 @@ pub async fn run( safenode_bin_path, skip_validation, log_format, + rewards_address, + evm_network, }; run_network(options, &mut local_node_registry, &ServiceController {}).await?; diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 36a452819a..5ee8d4c5d7 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -41,6 +41,7 @@ impl From for VerbosityLevel { use crate::error::{Error, Result}; use colored::Colorize; use semver::Version; +use sn_evm::AttoTokens; use sn_service_management::rpc::RpcActions; use sn_service_management::{ control::ServiceControl, error::Error as ServiceError, rpc::RpcClient, NodeRegistry, @@ -555,7 +556,7 @@ pub async fn refresh_node_registry( // exists. match HotWallet::try_load_from(&node.data_dir_path) { Ok(wallet) => { - node.reward_balance = Some(wallet.balance()); + node.reward_balance = Some(AttoTokens::from_u64(wallet.balance().as_nano())); trace!( "Wallet balance for node {}: {}", node.service_name, @@ -672,6 +673,7 @@ mod tests { use mockall::{mock, predicate::*}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; + use sn_evm::AttoTokens; use sn_logging::LogFormat; use sn_service_management::{ error::{Error as ServiceControlError, Result as ServiceControlResult}, @@ -679,7 +681,6 @@ mod tests { rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, UpgradeOptions, UpgradeResult, }; - use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -780,7 +781,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -884,7 +885,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -951,7 +952,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1061,7 +1062,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1139,7 +1140,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1229,7 +1230,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1318,7 +1319,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1379,7 +1380,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1426,7 +1427,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1475,7 +1476,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1523,7 +1524,7 @@ mod tests { owner: None, peer_id: None, pid: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1588,7 +1589,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -1714,7 +1715,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1802,7 +1803,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -1935,7 +1936,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2080,7 +2081,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2220,7 +2221,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2361,7 +2362,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2532,7 +2533,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2686,7 +2687,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2843,7 +2844,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -2997,7 +2998,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3154,7 +3155,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3308,7 +3309,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3465,7 +3466,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3622,7 +3623,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3779,7 +3780,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -3935,7 +3936,7 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4005,7 +4006,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, @@ -4064,7 +4065,7 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4137,7 +4138,7 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), service_name: "safenode1".to_string(), @@ -4200,7 +4201,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), service_name: "safenode1".to_string(), @@ -4263,7 +4264,7 @@ mod tests { owner: None, pid: None, peer_id: None, - reward_balance: Some(NanoTokens::zero()), + reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), status: ServiceStatus::Stopped, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 58d650cf67..863cc748d9 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; @@ -18,13 +18,13 @@ use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; #[cfg(test)] use mockall::automock; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, - FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, + NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, @@ -37,6 +37,7 @@ use sysinfo::{Pid, System}; pub trait Launcher { fn get_safenode_path(&self) -> PathBuf; fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result; + #[allow(clippy::too_many_arguments)] fn launch_node( &self, bootstrap_peers: Vec, @@ -45,6 +46,8 @@ pub trait Launcher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()>; fn wait(&self, delay: u64); } @@ -90,6 +93,8 @@ impl Launcher for LocalSafeLauncher { node_port: Option, owner: Option, rpc_socket_addr: SocketAddr, + rewards_address: RewardsAddress, + evm_network: Option, ) -> Result<()> { let mut args = Vec::new(); @@ -126,6 +131,22 @@ impl Launcher for LocalSafeLauncher { args.push("--rpc".to_string()); args.push(rpc_socket_addr.to_string()); + args.push("--rewards-address".to_string()); + args.push(rewards_address.to_string()); + + if let Some(network) = evm_network { + args.push(format!("evm-{}", network.identifier())); + + if let EvmNetwork::Custom(custom) = network { + args.push("--rpc-url".to_string()); + args.push(custom.rpc_url_http.to_string()); + args.push("--payment-token-address".to_string()); + args.push(custom.payment_token_address.to_string()); + args.push("--chunk-payments-address".to_string()); + args.push(custom.chunk_payments_address.to_string()); + } + } + Command::new(self.safenode_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -197,13 +218,21 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res if !keep_directories { // At this point we don't allow path overrides, so deleting the data directory will clear // the log directory also. - std::fs::remove_dir_all(&node.data_dir_path)?; - debug!("Removed node data directory: {:?}", node.data_dir_path); - println!( - " {} Removed {}", - "✓".green(), - node.data_dir_path.to_string_lossy() - ); + if let Err(e) = std::fs::remove_dir_all(&node.data_dir_path) { + error!("Failed to remove node data directory: {:?}", e); + println!( + " {} Failed to remove {}: {e}", + "✗".red(), + node.data_dir_path.to_string_lossy() + ); + } else { + debug!("Removed node data directory: {:?}", node.data_dir_path); + println!( + " {} Removed {}", + "✓".green(), + node.data_dir_path.to_string_lossy() + ); + } } } @@ -225,6 +254,8 @@ pub struct LocalNetworkOptions { pub safenode_bin_path: PathBuf, pub skip_validation: bool, pub log_format: Option, + pub rewards_address: RewardsAddress, + pub evm_network: Option, } pub async fn run_network( @@ -301,6 +332,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -348,6 +381,8 @@ pub async fn run_network( number, owner, rpc_socket_addr, + rewards_address: options.rewards_address, + evm_network: options.evm_network.clone(), version: get_bin_version(&launcher.get_safenode_path())?, }, &launcher, @@ -374,22 +409,23 @@ pub async fn run_network( validate_network(node_registry, bootstrap_peers.clone()).await?; } - if !options.join { - println!("Launching the faucet server..."); - let version = get_bin_version(&options.faucet_bin_path)?; - let pid = launcher.launch_faucet(&bootstrap_peers[0])?; - let faucet = FaucetServiceData { - faucet_path: options.faucet_bin_path, - local: true, - log_dir_path: get_faucet_data_dir(), - pid: Some(pid), - service_name: "faucet".to_string(), - status: ServiceStatus::Running, - user: get_username()?, - version, - }; - node_registry.faucet = Some(faucet); - } + // TODO: re-enable faucet when it can do EVM payments or when we switch back to native payments + // if !options.join { + // println!("Launching the faucet server..."); + // let pid = launcher.launch_faucet(&bootstrap_peers[0])?; + // let version = get_bin_version(&options.faucet_bin_path)?; + // let faucet = FaucetServiceData { + // faucet_path: options.faucet_bin_path, + // local: true, + // log_dir_path: get_faucet_data_dir(), + // pid: Some(pid), + // service_name: "faucet".to_string(), + // status: ServiceStatus::Running, + // user: get_username()?, + // version, + // }; + // node_registry.faucet = Some(faucet); + // } Ok(()) } @@ -404,6 +440,8 @@ pub struct RunNodeOptions { pub number: u16, pub owner: Option, pub rpc_socket_addr: SocketAddr, + pub rewards_address: RewardsAddress, + pub evm_network: Option, pub version: String, } @@ -421,6 +459,8 @@ pub async fn run_node( run_options.node_port, run_options.owner.clone(), run_options.rpc_socket_addr, + run_options.rewards_address, + run_options.evm_network, )?; launcher.wait(run_options.interval); @@ -532,6 +572,7 @@ mod tests { use libp2p_identity::PeerId; use mockall::mock; use mockall::predicate::*; + use sn_evm::utils::dummy_address; use sn_service_management::{ error::Result as RpcResult, rpc::{NetworkInfo, NodeInfo, RecordAddress, RpcActions}, @@ -557,6 +598,7 @@ mod tests { async fn run_node_should_launch_the_genesis_node() -> Result<()> { let mut mock_launcher = MockLauncher::new(); let mut mock_rpc_client = MockRpcClient::new(); + let rewards_address = dummy_address(); let peer_id = PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?; let rpc_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 13000); @@ -569,9 +611,11 @@ mod tests { eq(None), eq(None), eq(rpc_socket_addr), + eq(rewards_address), + eq(None), ) .times(1) - .returning(|_, _, _, _, _, _| Ok(())); + .returning(|_, _, _, _, _, _, _, _| Ok(())); mock_launcher .expect_wait() .with(eq(100)) @@ -617,6 +661,8 @@ mod tests { number: 1, owner: None, rpc_socket_addr, + rewards_address, + evm_network: None, version: "0.100.12".to_string(), }, &mock_launcher, diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 178bf6671c..2cd4de4c17 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -31,6 +31,7 @@ sha2 = "0.10.7" sn_build_info = { path = "../sn_build_info", version = "0.1.13" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_registers = { path = "../sn_registers", version = "0.3.19" } +sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/src/messages.rs b/sn_protocol/src/messages.rs index 1cdab98f2e..cbef76ab90 100644 --- a/sn_protocol/src/messages.rs +++ b/sn_protocol/src/messages.rs @@ -16,7 +16,7 @@ mod response; pub use self::{ chunk_proof::{ChunkProof, Nonce}, - cmd::{Cmd, Hash}, + cmd::Cmd, node_id::NodeId, query::Query, register::RegisterCmd, diff --git a/sn_protocol/src/messages/cmd.rs b/sn_protocol/src/messages/cmd.rs index 094d93cae4..a9618ba3f8 100644 --- a/sn_protocol/src/messages/cmd.rs +++ b/sn_protocol/src/messages/cmd.rs @@ -9,8 +9,7 @@ use crate::{storage::RecordType, NetworkAddress}; use serde::{Deserialize, Serialize}; -// TODO: remove this dependency and define these types herein. -pub use sn_transfers::{Hash, PaymentQuote}; +pub use sn_evm::PaymentQuote; /// Data and CashNote cmds - recording spends or creating, updating, and removing data. /// diff --git a/sn_protocol/src/messages/response.rs b/sn_protocol/src/messages/response.rs index 28fb8035f3..17c986f581 100644 --- a/sn_protocol/src/messages/response.rs +++ b/sn_protocol/src/messages/response.rs @@ -12,7 +12,7 @@ use super::ChunkProof; use bytes::Bytes; use core::fmt; use serde::{Deserialize, Serialize}; -use sn_transfers::{MainPubkey, PaymentQuote}; +use sn_evm::{PaymentQuote, RewardsAddress}; use std::fmt::Debug; /// The response to a query, containing the query result. @@ -26,8 +26,8 @@ pub enum QueryResponse { GetStoreCost { /// The store cost quote for storing the next record. quote: Result, - /// The cash_note MainPubkey to pay this node's store cost to. - payment_address: MainPubkey, + /// The rewards address to pay this node's store cost to. + payment_address: RewardsAddress, /// Node's Peer Address peer_address: NetworkAddress, }, diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index ee88185752..04921730ef 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use lazy_static::lazy_static; -use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK, PAYMENT_FORWARD_PK}; +use sn_transfers::{FOUNDATION_PK, GENESIS_PK, NETWORK_ROYALTIES_PK}; lazy_static! { /// The node version used during Identify Behaviour. @@ -65,7 +65,5 @@ fn get_key_version_str() -> String { let _ = g_k_str.split_off(6); let mut n_k_str = NETWORK_ROYALTIES_PK.to_hex(); let _ = n_k_str.split_off(6); - let mut p_k_str = PAYMENT_FORWARD_PK.to_hex(); - let _ = p_k_str.split_off(6); - format!("{f_k_str}_{g_k_str}_{n_k_str}_{p_k_str}") + format!("{f_k_str}_{g_k_str}_{n_k_str}") } diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 46c6d80d26..d5a9119a46 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -23,7 +23,7 @@ sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.1" } +sn_evm = { path = "../sn_evm", version = "0.1.0" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index ffd6af0742..2cc7060d33 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -11,9 +11,9 @@ use async_trait::async_trait; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use service_manager::{ServiceInstallCtx, ServiceLabel}; +use sn_evm::AttoTokens; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; -use sn_transfers::NanoTokens; use std::{ ffi::OsString, net::{Ipv4Addr, SocketAddr}, @@ -282,7 +282,7 @@ pub struct NodeServiceData { )] pub peer_id: Option, pub pid: Option, - pub reward_balance: Option, + pub reward_balance: Option, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, pub service_name: String, From 6be5c30331608e6642f26831b68ceeb0c249a9e8 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Thu, 26 Sep 2024 15:52:34 +0200 Subject: [PATCH 058/255] feat(launchpad): 35gb nodes --- node-launchpad/src/components/popup/manage_nodes.rs | 2 +- node-launchpad/src/components/status.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/popup/manage_nodes.rs b/node-launchpad/src/components/popup/manage_nodes.rs index 3117d22077..2ad7674730 100644 --- a/node-launchpad/src/components/popup/manage_nodes.rs +++ b/node-launchpad/src/components/popup/manage_nodes.rs @@ -23,7 +23,7 @@ use crate::{ use super::super::{utils::centered_rect_fixed, Component}; -pub const GB_PER_NODE: usize = 5; +pub const GB_PER_NODE: usize = 35; pub const MB: usize = 1000 * 1000; pub const GB: usize = MB * 1000; pub const MAX_NODE_COUNT: usize = 50; diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 82d12c968c..3283a16de9 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -742,9 +742,12 @@ impl Component for Status { ]); let line2 = Line::from(vec![Span::styled( - "Each node will use 5GB of storage and a small amount of memory, \ + format!( + "Each node will use {}GB of storage and a small amount of memory, \ CPU, and Network bandwidth. Most computers can run many nodes at once, \ but we recommend you add them gradually", + GB_PER_NODE + ), Style::default().fg(LIGHT_PERIWINKLE), )]); From 00bcf9a7d05cc046e6940b81ce83421df9567d3d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 27 Sep 2024 13:25:25 +0200 Subject: [PATCH 059/255] feat!: merge EVM changes into `autonomi` --- Cargo.lock | 4 + autonomi/Cargo.toml | 8 +- autonomi/src/client/data.rs | 244 +++--------------- autonomi/src/client/files.rs | 76 +----- autonomi/src/client/mod.rs | 43 +-- autonomi/src/client/registers.rs | 105 +++----- autonomi/src/client/vault.rs | 32 ++- autonomi/src/evm/client/data.rs | 221 ++++++++++++++++ autonomi/src/evm/client/files.rs | 60 +++++ autonomi/src/evm/client/mod.rs | 33 +++ autonomi/src/evm/client/registers.rs | 153 +++++++++++ autonomi/src/evm/client/vault.rs | 4 + autonomi/src/evm/mod.rs | 5 + autonomi/src/lib.rs | 18 +- autonomi/src/native/client/data.rs | 226 ++++++++++++++++ autonomi/src/native/client/files.rs | 66 +++++ autonomi/src/native/client/mod.rs | 35 +++ autonomi/src/native/client/registers.rs | 80 ++++++ autonomi/src/{ => native}/client/transfers.rs | 18 +- autonomi/src/native/client/vault.rs | 4 + autonomi/src/native/mod.rs | 6 + autonomi/src/{ => native}/secrets.rs | 0 autonomi/src/{ => native}/wallet/error.rs | 0 autonomi/src/{ => native}/wallet/mod.rs | 2 +- autonomi/tests/common/mod.rs | 49 +++- autonomi/tests/evm/file.rs | 78 ++++++ autonomi/tests/evm/mod.rs | 11 + autonomi/tests/evm/put.rs | 26 ++ autonomi/tests/evm/register.rs | 48 ++++ autonomi/tests/evm/wallet.rs | 36 +++ autonomi/tests/integration.rs | 5 + autonomi/tests/{ => native}/file.rs | 8 +- autonomi/tests/native/mod.rs | 10 + autonomi/tests/{ => native}/put.rs | 6 +- autonomi/tests/{ => native}/register.rs | 6 +- 35 files changed, 1336 insertions(+), 390 deletions(-) create mode 100644 autonomi/src/evm/client/data.rs create mode 100644 autonomi/src/evm/client/files.rs create mode 100644 autonomi/src/evm/client/mod.rs create mode 100644 autonomi/src/evm/client/registers.rs create mode 100644 autonomi/src/evm/client/vault.rs create mode 100644 autonomi/src/evm/mod.rs create mode 100644 autonomi/src/native/client/data.rs create mode 100644 autonomi/src/native/client/files.rs create mode 100644 autonomi/src/native/client/mod.rs create mode 100644 autonomi/src/native/client/registers.rs rename autonomi/src/{ => native}/client/transfers.rs (96%) create mode 100644 autonomi/src/native/client/vault.rs create mode 100644 autonomi/src/native/mod.rs rename autonomi/src/{ => native}/secrets.rs (100%) rename autonomi/src/{ => native}/wallet/error.rs (100%) rename autonomi/src/{ => native}/wallet/mod.rs (99%) create mode 100644 autonomi/tests/evm/file.rs create mode 100644 autonomi/tests/evm/mod.rs create mode 100644 autonomi/tests/evm/put.rs create mode 100644 autonomi/tests/evm/register.rs create mode 100644 autonomi/tests/evm/wallet.rs create mode 100644 autonomi/tests/integration.rs rename autonomi/tests/{ => native}/file.rs (94%) create mode 100644 autonomi/tests/native/mod.rs rename autonomi/tests/{ => native}/put.rs (92%) rename autonomi/tests/{ => native}/register.rs (95%) diff --git a/Cargo.lock b/Cargo.lock index b952004587..92a270206b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1145,6 +1145,8 @@ dependencies = [ "bip39", "blsttc", "bytes", + "const-hex", + "evmlib", "eyre", "libp2p 0.54.1", "rand 0.8.5", @@ -1152,6 +1154,8 @@ dependencies = [ "self_encryption", "serde", "sn_client", + "sn_evm", + "sn_networking", "sn_peers_acquisition", "sn_protocol", "sn_registers", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index c8da7598df..0bae03e140 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/maidsafe/safe_network" [features] default = ["data"] -full = ["data", "files", "fs", "registers", "transfers", "vault"] +full = ["data", "files", "fs", "registers", "transfers", "vault", "native-payments"] data = ["transfers"] vault = ["data"] files = ["transfers", "data"] @@ -19,21 +19,27 @@ fs = [] local = ["sn_client/local-discovery"] registers = ["transfers"] transfers = [] +native-payments = [] +evm-payments = [] [dependencies] bip39 = "2.0.0" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } +const-hex = "1.12.0" +evmlib = { path = "../evmlib", version = "0.1" } libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } sn_client = { path = "../sn_client", version = "0.110.1" } +sn_networking = { path = "../sn_networking", version = "0.18.0" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } sn_protocol = { version = "0.17.9", path = "../sn_protocol" } sn_registers = { path = "../sn_registers", version = "0.3.19" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } +sn_evm = { path = "../sn_evm" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync", "fs"] } tracing = { version = "~0.1.26" } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 8f54c35387..9ee4559c20 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -1,30 +1,16 @@ -use std::collections::{BTreeMap, HashSet}; - -use crate::self_encryption::{encrypt, DataMapLevel}; -use crate::Client; +use crate::client::{Client, ClientWrapper}; +use crate::self_encryption::DataMapLevel; use bytes::Bytes; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; +use evmlib::wallet; +use libp2p::kad::Quorum; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_client::{ - networking::{GetRecordCfg, NetworkError, PutRecordCfg}, - transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}, - StoragePaymentResult, -}; -use sn_protocol::{ - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, - }, - NetworkAddress, -}; -use sn_transfers::Payment; -use tokio::task::{JoinError, JoinSet}; +use sn_networking::{GetRecordCfg, NetworkError}; +use sn_protocol::storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}; +use sn_protocol::NetworkAddress; +use std::collections::HashSet; +use tokio::task::JoinError; use xor_name::XorName; -use super::transfers::SendSpendsError; - /// Errors that can occur during the put operation. #[derive(Debug, thiserror::Error)] pub enum PutError { @@ -36,8 +22,12 @@ pub enum PutError { VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), + #[cfg(feature = "native-payments")] #[error("A wallet error occurred.")] Wallet(#[from] sn_transfers::WalletError), + #[cfg(feature = "evm-payments")] + #[error("A wallet error occurred.")] + EvmWallet(#[from] sn_evm::EvmError), #[error("Error occurred during payment.")] PayError(#[from] PayError), } @@ -45,14 +35,21 @@ pub enum PutError { /// Errors that can occur during the pay operation. #[derive(Debug, thiserror::Error)] pub enum PayError { + #[error("Could not get store quote for: {0:?} after several retries")] + CouldNotGetStoreQuote(XorName), #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(sn_client::networking::NetworkError), + CouldNotGetStoreCosts(NetworkError), #[error("Could not simultaneously fetch store costs: {0:?}")] JoinError(JoinError), + #[cfg(feature = "native-payments")] #[error("Hot wallet error")] WalletError(#[from] sn_transfers::WalletError), + #[cfg(feature = "evm-payments")] + #[error("Wallet error: {0:?}")] + EvmWalletError(#[from] wallet::Error), + #[cfg(feature = "native-payments")] #[error("Failed to send spends")] - SendSpendsError(#[from] SendSpendsError), + SendSpendsError(#[from] crate::native::client::transfers::SendSpendsError), } /// Errors that can occur during the get operation. @@ -63,9 +60,9 @@ pub enum GetError { #[error("Failed to decrypt data.")] Decryption(crate::self_encryption::Error), #[error("General networking error: {0:?}")] - Network(#[from] sn_client::networking::NetworkError), + Network(#[from] NetworkError), #[error("General protocol error: {0:?}")] - Protocol(#[from] sn_client::protocol::Error), + Protocol(#[from] sn_protocol::Error), } impl Client { @@ -83,6 +80,7 @@ impl Client { /// Get a raw chunk from the network. pub async fn fetch_chunk(&self, addr: XorName) -> Result { tracing::info!("Getting chunk: {addr:?}"); + let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); let get_cfg = GetRecordCfg { @@ -92,8 +90,10 @@ impl Client { expected_holders: HashSet::new(), is_register: false, }; + let record = self.network.get_record_from_network(key, &get_cfg).await?; let header = RecordHeader::from_record(&record)?; + if let RecordKind::Chunk = header.kind { let chunk: Chunk = try_deserialize_record(&record)?; Ok(chunk) @@ -102,41 +102,10 @@ impl Client { } } - /// Upload a piece of data to the network. This data will be self-encrypted, - /// and the data map XOR address will be returned. - pub async fn put(&mut self, data: Bytes, wallet: &mut HotWallet) -> Result { - let now = std::time::Instant::now(); - let (map, chunks) = encrypt(data)?; - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); - - let map_xor_name = *map.address().xorname(); - - let mut xor_names = vec![]; - xor_names.push(map_xor_name); - for chunk in &chunks { - xor_names.push(*chunk.name()); - } - - let StoragePaymentResult { skipped_chunks, .. } = - self.pay(xor_names.into_iter(), wallet).await?; - - // TODO: Upload in parallel - if !skipped_chunks.contains(map.name()) { - self.upload_chunk(map, wallet).await?; - } - for chunk in chunks { - if skipped_chunks.contains(chunk.name()) { - continue; - } - self.upload_chunk(chunk, wallet).await?; - } - - Ok(map_xor_name) - } - - // Fetch and decrypt all chunks in the data map. + /// Fetch and decrypt all chunks in the data map. async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { let mut encrypted_chunks = vec![]; + for info in data_map.infos() { let chunk = self.fetch_chunk(info.dst_hash).await?; let chunk = EncryptedChunk { @@ -152,7 +121,7 @@ impl Client { Ok(data) } - // Unpack a wrapped data map and fetch all bytes using self-encryption. + /// Unpack a wrapped data map and fetch all bytes using self-encryption. async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { let mut data_map_level: DataMapLevel = rmp_serde::from_slice(data_map_bytes).map_err(GetError::InvalidDataMap)?; @@ -175,153 +144,24 @@ impl Client { }; } } +} - pub(crate) async fn pay( - &mut self, - content_addrs: impl Iterator, - wallet: &mut HotWallet, - ) -> Result { - let mut tasks = JoinSet::new(); - for content_addr in content_addrs { - let network = self.network.clone(); - tasks.spawn(async move { - // TODO: retry, but where? - let cost = network - .get_store_costs_from_network( - NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), - vec![], - ) - .await - .map_err(PayError::CouldNotGetStoreCosts); - - tracing::debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - (content_addr, cost) - }); - } - tracing::debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(content_addr); - tracing::debug!("Skipped existing chunk {content_addr:?}"); - } else { - tracing::debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(content_addr, (cost.1, cost.2, cost.0.to_bytes())); - } - } - Ok((content_addr, Err(err))) => { - tracing::warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(PayError::JoinError(e)); - } - } - } - - let (storage_cost, royalty_fees) = if cost_map.is_empty() { - (NanoTokens::zero(), NanoTokens::zero()) - } else { - self.pay_for_records(&cost_map, wallet).await? - }; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) +pub trait Data: ClientWrapper { + async fn get(&self, data_map_addr: XorName) -> Result { + self.client().get(data_map_addr).await } - async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - wallet: &mut HotWallet, - ) -> Result<(NanoTokens, NanoTokens), PayError> { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transactions(wallet).await; - - let total_cost = wallet.local_send_storage_payment(cost_map)?; - - // send to network - tracing::trace!("Sending storage payment transfer to the network"); - let spend_attempt_result = self - .send_spends(wallet.unconfirmed_spend_requests().iter()) - .await; - - tracing::trace!("send_spends of {} chunks completed", cost_map.len(),); - - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - if let Err(error) = spend_attempt_result { - tracing::warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); - - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let SendSpendsError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - tracing::warn!( - "Removing double spends CashNote from wallet: {cash_note_key:?}" - ); - wallet.mark_notes_as_spent([cash_note_key]); - wallet.clear_specific_spend_request(*cash_note_key); - } - } - - wallet.store_unconfirmed_spend_requests()?; - - return Err(PayError::SendSpendsError(error)); - } else { - tracing::info!("Spend has completed: {:?}", spend_attempt_result); - wallet.clear_confirmed_spend_requests(); - } - tracing::trace!("clear up spends of {} chunks completed", cost_map.len(),); - - Ok(total_cost) + async fn fetch_chunk(&self, addr: XorName) -> Result { + self.client().fetch_chunk(addr).await } - /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. - async fn upload_chunk(&self, chunk: Chunk, wallet: &mut HotWallet) -> Result<(), PutError> { - let xor_name = *chunk.name(); - let (payment, payee) = self.get_recent_payment_for_addr(&xor_name, wallet)?; - - self.store_chunk(chunk, payee, payment).await?; - - wallet.api().remove_payment_transaction(&xor_name); - - Ok(()) + async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { + self.client().fetch_from_data_map(data_map).await } - /// Actually store a chunk to a peer. - async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - ) -> Result<(), PutError> { - tracing::debug!("Storing chunk: {chunk:?} to {payee:?}"); - - let key = chunk.network_address().to_record_key(); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - Ok(self.network.put_record(record, &put_cfg).await?) + async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { + self.client() + .fetch_from_data_map_chunk(data_map_bytes) + .await } } diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 457eba8fa5..7c01776e47 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -1,15 +1,11 @@ -use std::{collections::HashMap, path::PathBuf}; - +use crate::client::data::{GetError, PutError}; +use crate::client::{Client, ClientWrapper}; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_transfers::HotWallet; -use walkdir::WalkDir; +use std::collections::HashMap; +use std::path::PathBuf; use xor_name::XorName; -use crate::Client; - -use super::data::{GetError, PutError}; - /// Directory-like structure that containing file paths and their metadata. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Root { @@ -22,9 +18,9 @@ pub struct Root { /// This is similar to ['inodes'](https://en.wikipedia.org/wiki/Inode) in Unix-like filesystems. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct FilePointer { - data_map: XorName, - created_at: u64, - modified_at: u64, + pub(crate) data_map: XorName, + pub(crate) created_at: u64, + pub(crate) modified_at: u64, } #[derive(Debug, thiserror::Error)] @@ -44,45 +40,9 @@ pub enum UploadError { } impl Client { - /// Upload a directory to the network. The directory is recursively walked. - #[cfg(feature = "fs")] - pub async fn upload_from_dir( - &mut self, - path: PathBuf, - wallet: &mut HotWallet, - ) -> Result<(Root, XorName), UploadError> { - let mut map = HashMap::new(); - for entry in WalkDir::new(path) { - let entry = entry?; - if !entry.file_type().is_file() { - continue; - } - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - let file = upload_from_file(self, path.clone(), wallet).await?; - map.insert(path, file); - } - - let root = Root { map }; - let root_serialized = Bytes::from(rmp_serde::to_vec(&root)?); - - #[cfg(feature = "vault")] - self.write_bytes_to_vault_if_defined(root_serialized.clone(), wallet) - .await?; - - let xor_name = self.put(root_serialized, wallet).await?; - - Ok((root, xor_name)) - } - /// Fetch a directory from the network. pub async fn fetch_root(&mut self, address: XorName) -> Result { let data = self.get(address).await?; - - Self::deserialise_root(data) - } - - pub fn deserialise_root(data: Bytes) -> Result { let root: Root = rmp_serde::from_slice(&data[..]).expect("TODO"); Ok(root) @@ -95,20 +55,12 @@ impl Client { } } -async fn upload_from_file( - client: &mut Client, - path: PathBuf, - wallet: &mut HotWallet, -) -> Result { - let data = tokio::fs::read(path).await?; - let data = Bytes::from(data); - - let addr = client.put(data, wallet).await?; +pub trait Files: ClientWrapper { + async fn fetch_root(&mut self, address: XorName) -> Result { + self.client_mut().fetch_root(address).await + } - // TODO: Set created_at and modified_at - Ok(FilePointer { - data_map: addr, - created_at: 0, - modified_at: 0, - }) + async fn fetch_file(&mut self, file: &FilePointer) -> Result { + self.client_mut().fetch_file(file).await + } } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index b50d7e7ce8..e7cf97f3a4 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -1,3 +1,12 @@ +#[cfg(feature = "data")] +pub mod data; +#[cfg(feature = "files")] +pub mod files; +#[cfg(feature = "registers")] +pub mod registers; +#[cfg(feature = "vault")] +pub mod vault; + use std::{collections::HashSet, time::Duration}; #[cfg(feature = "vault")] @@ -7,21 +16,6 @@ use sn_client::networking::{multiaddr_is_global, Network, NetworkBuilder, Networ use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; use tokio::{sync::mpsc::Receiver, time::interval}; -#[cfg(feature = "data")] -#[cfg_attr(docsrs, doc(cfg(feature = "data")))] -mod data; -#[cfg(feature = "files")] -#[cfg_attr(docsrs, doc(cfg(feature = "files")))] -mod files; -#[cfg(feature = "registers")] -#[cfg_attr(docsrs, doc(cfg(feature = "registers")))] -mod registers; -#[cfg(feature = "transfers")] -#[cfg_attr(docsrs, doc(cfg(feature = "transfers")))] -mod transfers; -#[cfg(feature = "vault")] -mod vault; - /// Time before considering the connection timed out. pub const CONNECT_TIMEOUT_SECS: u64 = 20; @@ -175,3 +169,22 @@ async fn handle_event_receiver( // TODO: Handle closing of network events sender } + +pub trait ClientWrapper { + fn from_client(client: Client) -> Self; + + fn client(&self) -> &Client; + + fn client_mut(&mut self) -> &mut Client; + + fn into_client(self) -> Client; + + fn network(&self) -> &Network { + &self.client().network + } + + async fn connect(peers: &[Multiaddr]) -> Result { + let client = Client::connect(peers).await?; + Ok(Self::from_client(client)) + } +} diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 6aa77cf74b..126b8c10ac 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -1,26 +1,20 @@ use std::collections::BTreeSet; -use crate::Client; - +use super::data::PayError; +use crate::client::{Client, ClientWrapper}; use bls::SecretKey; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_client::networking::GetRecordCfg; -use sn_client::networking::NetworkError; -use sn_client::networking::PutRecordCfg; -use sn_client::registers::EntryHash; -use sn_client::registers::Permissions; -use sn_client::registers::Register as ClientRegister; -use sn_client::registers::SignedRegister; -use sn_client::transfers::HotWallet; +use sn_networking::GetRecordCfg; +use sn_networking::NetworkError; +use sn_networking::PutRecordCfg; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; -use xor_name::XorName; - -use super::data::PayError; +use sn_registers::EntryHash; +use sn_registers::SignedRegister; #[derive(Debug, thiserror::Error)] pub enum RegisterError { @@ -32,8 +26,12 @@ pub enum RegisterError { FailedVerification, #[error("Payment failure occurred during register creation.")] Pay(#[from] PayError), + #[cfg(feature = "native-payments")] #[error("Failed to retrieve wallet payment")] Wallet(#[from] sn_transfers::WalletError), + #[cfg(feature = "evm-payments")] + #[error("Failed to retrieve wallet payment")] + EvmWallet(#[from] evmlib::wallet::Error), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] @@ -42,7 +40,7 @@ pub enum RegisterError { #[derive(Clone, Debug)] pub struct Register { - inner: SignedRegister, + pub(crate) inner: SignedRegister, } impl Register { @@ -66,68 +64,6 @@ impl Register { } impl Client { - /// Creates a new Register with an initial value and uploads it to the network. - pub async fn create_register( - &mut self, - value: Bytes, - name: XorName, - owner: SecretKey, - wallet: &mut HotWallet, - ) -> Result { - let pk = owner.public_key(); - - // Owner can write to the register. - let permissions = Permissions::new_with([pk]); - let mut register = ClientRegister::new(pk, name, permissions); - let address = NetworkAddress::from_register_address(*register.address()); - - let entries = register - .read() - .into_iter() - .map(|(entry_hash, _value)| entry_hash) - .collect(); - register - .write(value.into(), &entries, &owner) - .map_err(RegisterError::Write)?; - - let _payment_result = self - .pay(std::iter::once(register.address().xorname()), wallet) - .await?; - - let (payment, payee) = - self.get_recent_payment_for_addr(®ister.address().xorname(), wallet)?; - - let signed_register = register - .clone() - .into_signed(&owner) - .map_err(RegisterError::CouldNotSign)?; - - let record = Record { - key: address.to_record_key(), - value: try_serialize_record( - &(payment, &signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - - self.network.put_record(record, &put_cfg).await?; - - Ok(Register { - inner: signed_register, - }) - } - /// Fetches a Register from the network. pub async fn fetch_register( &self, @@ -208,3 +144,20 @@ impl Client { Ok(()) } } + +pub trait Registers: ClientWrapper { + async fn fetch_register(&self, address: RegisterAddress) -> Result { + self.client().fetch_register(address).await + } + + async fn update_register( + &self, + register: Register, + new_value: Bytes, + owner: SecretKey, + ) -> Result<(), RegisterError> { + self.client() + .update_register(register, new_value, owner) + .await + } +} diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 5d27aa4cf9..2e08f23ce8 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; -use crate::Client; +use super::data::PutError; +use crate::client::{Client, ClientWrapper}; use bls::SecretKey; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; @@ -15,8 +16,6 @@ use sn_protocol::{ }; use tracing::info; -use super::data::PutError; - #[derive(Debug, thiserror::Error)] pub enum VaultError { #[error("Could not generate Vault secret key from entropy: {0:?}")] @@ -32,7 +31,7 @@ pub enum VaultError { } impl Client { - /// Add an vault secret key to the client + /// Add a vault secret key to the client /// /// The secret key is derived from the supplied entropy bytes. pub fn with_vault_entropy(mut self, bytes: Bytes) -> Result { @@ -179,3 +178,28 @@ impl Client { Ok(Some(next_count)) } } + +pub trait Vault: ClientWrapper { + fn with_vault_entropy(mut self, bytes: Bytes) -> Result { + let client = self.into_client().with_vault_entropy(bytes)?; + Ok(Self::from_client(client)) + } + + async fn fetch_and_decrypt_vault(&self) -> Result, VaultError> { + self.client().fetch_and_decrypt_vault().await + } + + async fn get_vault_from_network(&self) -> Result { + self.client().get_vault_from_network().await + } + + async fn write_bytes_to_vault_if_defined( + &mut self, + data: Bytes, + wallet: &mut HotWallet, + ) -> Result, PutError> { + self.client_mut() + .write_bytes_to_vault_if_defined(data, wallet) + .await + } +} diff --git a/autonomi/src/evm/client/data.rs b/autonomi/src/evm/client/data.rs new file mode 100644 index 0000000000..a17340f9ce --- /dev/null +++ b/autonomi/src/evm/client/data.rs @@ -0,0 +1,221 @@ +use crate::client::data::{Data, GetError, PayError, PutError}; +use crate::client::ClientWrapper; +use crate::evm::client::EvmClient; +use crate::evm::Client; +use crate::self_encryption::{encrypt, DataMapLevel}; +use bytes::Bytes; +use evmlib::common::{QuoteHash, QuotePayment, TxHash}; +use evmlib::wallet; +use evmlib::wallet::Wallet; +use libp2p::futures; +use libp2p::kad::{Quorum, Record}; +use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; +use sn_evm::ProofOfPayment; +use sn_networking::{GetRecordCfg, PutRecordCfg}; +use sn_networking::{Network, NetworkError, PayeeQuote}; +use sn_protocol::{ + storage::{ + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, + }, + NetworkAddress, +}; +use std::collections::{BTreeMap, HashMap, HashSet}; +use xor_name::XorName; + +impl Data for EvmClient {} + +impl EvmClient { + /// Upload a piece of data to the network. This data will be self-encrypted, + /// and the data map XOR address will be returned. + pub async fn put(&mut self, data: Bytes, wallet: &Wallet) -> Result { + let now = std::time::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + + let map_xor_name = *data_map_chunk.address().xorname(); + let mut xor_names = vec![map_xor_name]; + + for chunk in &chunks { + xor_names.push(*chunk.name()); + } + + // Pay for all chunks + data map chunk + let (payment_proofs, _free_chunks) = self.pay(xor_names.into_iter(), wallet).await?; + + // Upload data map + if let Some(proof) = payment_proofs.get(&map_xor_name) { + self.upload_chunk(data_map_chunk.clone(), proof.clone()) + .await?; + } + + // Upload the rest of the chunks + for chunk in chunks { + if let Some(proof) = payment_proofs.get(chunk.name()) { + self.upload_chunk(chunk, proof.clone()).await?; + } + } + + Ok(map_xor_name) + } + + pub(crate) async fn pay( + &mut self, + content_addrs: impl Iterator, + wallet: &Wallet, + ) -> Result<(HashMap, Vec), PayError> { + let cost_map = self.get_store_quotes(content_addrs).await?; + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. + // TODO: retry when it fails? + // Execute chunk payments + let payments = wallet + .pay_for_quotes(quote_payments) + .await + .map_err(|err| PayError::from(err.0))?; + + let proofs = construct_proofs(&cost_map, &payments); + + tracing::trace!( + "Chunk payments of {} chunks completed. {} chunks were free / already paid for", + proofs.len(), + skipped_chunks.len() + ); + + Ok((proofs, skipped_chunks)) + } + + async fn get_store_quotes( + &mut self, + content_addrs: impl Iterator, + ) -> Result, PayError> { + let futures: Vec<_> = content_addrs + .into_iter() + .map(|content_addr| fetch_store_quote_with_retries(&self.network(), content_addr)) + .collect(); + + let quotes = futures::future::try_join_all(futures).await?; + + Ok(quotes.into_iter().collect::>()) + } + + /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. + async fn upload_chunk( + &self, + chunk: Chunk, + proof_of_payment: ProofOfPayment, + ) -> Result<(), PutError> { + self.store_chunk(chunk, proof_of_payment).await?; + Ok(()) + } + + /// Actually store a chunk to a peer. + async fn store_chunk(&self, chunk: Chunk, payment: ProofOfPayment) -> Result<(), PutError> { + let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); + + tracing::debug!("Storing chunk: {chunk:?} to {:?}", storing_node); + + let key = chunk.network_address().to_record_key(); + + let record_kind = RecordKind::ChunkWithPayment; + let record = Record { + key: key.clone(), + value: try_serialize_record(&(payment, chunk.clone()), record_kind) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: None, + use_put_record_to: Some(vec![storing_node]), + verification: None, + }; + Ok(self.network().put_record(record, &put_cfg).await?) + } +} + +/// Fetch a store quote for a content address with a retry strategy. +async fn fetch_store_quote_with_retries( + network: &Network, + content_addr: XorName, +) -> Result<(XorName, PayeeQuote), PayError> { + let mut retries = 0; + + loop { + match fetch_store_quote(network, content_addr).await { + Ok(quote) => { + break Ok((content_addr, quote)); + } + Err(err) if retries < 2 => { + retries += 1; + tracing::error!("Error while fetching store quote: {err:?}, retry #{retries}"); + } + Err(err) => { + tracing::error!( + "Error while fetching store quote: {err:?}, stopping after {retries} retries" + ); + break Err(PayError::CouldNotGetStoreQuote(content_addr)); + } + } + } +} + +/// Fetch a store quote for a content address. +async fn fetch_store_quote( + network: &Network, + content_addr: XorName, +) -> Result { + network + .get_store_costs_from_network( + NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), + vec![], + ) + .await +} + +/// Form to be executed payments and already executed payments from a cost map. +fn extract_quote_payments( + cost_map: &HashMap, +) -> (Vec, Vec) { + let mut to_be_paid = vec![]; + let mut already_paid = vec![]; + + for (chunk_address, quote) in cost_map.iter() { + if quote.2.cost.is_zero() { + already_paid.push(*chunk_address); + } else { + to_be_paid.push(( + quote.2.hash(), + quote.2.rewards_address, + quote.2.cost.as_atto(), + )); + } + } + + (to_be_paid, already_paid) +} + +/// Construct payment proofs from cost map and payments map. +fn construct_proofs( + cost_map: &HashMap, + payments: &BTreeMap, +) -> HashMap { + cost_map + .iter() + .filter_map(|(xor_name, (_, _, quote))| { + payments.get("e.hash()).map(|tx_hash| { + ( + *xor_name, + ProofOfPayment { + quote: quote.clone(), + tx_hash: *tx_hash, + }, + ) + }) + }) + .collect() +} diff --git a/autonomi/src/evm/client/files.rs b/autonomi/src/evm/client/files.rs new file mode 100644 index 0000000000..3d3a1424c3 --- /dev/null +++ b/autonomi/src/evm/client/files.rs @@ -0,0 +1,60 @@ +use std::{collections::HashMap, path::PathBuf}; + +use super::data::{GetError, PutError}; +use crate::client::files::{FilePointer, Files, Root, UploadError}; +use crate::evm::client::EvmClient; +use bytes::{BufMut, Bytes}; +use evmlib::wallet::Wallet; +use serde::{Deserialize, Serialize}; +use walkdir::WalkDir; +use xor_name::XorName; + +impl Files for EvmClient {} + +impl EvmClient { + /// Upload a directory to the network. The directory is recursively walked. + #[cfg(feature = "fs")] + pub async fn upload_from_dir( + &mut self, + path: PathBuf, + wallet: &Wallet, + ) -> Result<(Root, XorName), UploadError> { + let mut map = HashMap::new(); + + for entry in WalkDir::new(path) { + let entry = entry?; + if !entry.file_type().is_file() { + continue; + } + let path = entry.path().to_path_buf(); + tracing::info!("Uploading file: {path:?}"); + let file = upload_from_file(self, path.clone(), wallet).await?; + map.insert(path, file); + } + + let root = Root { map }; + let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + + let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; + + Ok((root, xor_name)) + } +} + +async fn upload_from_file( + client: &mut EvmClient, + path: PathBuf, + wallet: &Wallet, +) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + + let addr = client.put(data, wallet).await?; + + // TODO: Set created_at and modified_at + Ok(FilePointer { + data_map: addr, + created_at: 0, + modified_at: 0, + }) +} diff --git a/autonomi/src/evm/client/mod.rs b/autonomi/src/evm/client/mod.rs new file mode 100644 index 0000000000..1c94079eda --- /dev/null +++ b/autonomi/src/evm/client/mod.rs @@ -0,0 +1,33 @@ +use crate::client::{Client, ClientWrapper, ConnectError}; +use crate::Multiaddr; + +#[cfg(feature = "data")] +pub mod data; +#[cfg(feature = "files")] +pub mod files; +#[cfg(feature = "registers")] +pub mod registers; +mod vault; + +#[derive(Clone)] +pub struct EvmClient { + client: Client, +} + +impl ClientWrapper for EvmClient { + fn from_client(client: Client) -> Self { + EvmClient { client } + } + + fn client(&self) -> &Client { + &self.client + } + + fn client_mut(&mut self) -> &mut Client { + &mut self.client + } + + fn into_client(self) -> Client { + self.client + } +} diff --git a/autonomi/src/evm/client/registers.rs b/autonomi/src/evm/client/registers.rs new file mode 100644 index 0000000000..60687c478b --- /dev/null +++ b/autonomi/src/evm/client/registers.rs @@ -0,0 +1,153 @@ +use std::collections::BTreeSet; + +use crate::client::registers::{Register, RegisterError, Registers}; +use crate::client::ClientWrapper; +use crate::evm::client::EvmClient; +use bls::SecretKey; +use bytes::Bytes; +use evmlib::wallet::Wallet; +use libp2p::kad::{Quorum, Record}; +use sn_networking::GetRecordCfg; +use sn_networking::PutRecordCfg; +use sn_protocol::storage::try_deserialize_record; +use sn_protocol::storage::try_serialize_record; +use sn_protocol::storage::RecordKind; +use sn_protocol::storage::RegisterAddress; +use sn_protocol::NetworkAddress; +use sn_registers::EntryHash; +use sn_registers::Permissions; +use sn_registers::Register as ClientRegister; +use sn_registers::SignedRegister; +use xor_name::XorName; + +impl Registers for EvmClient {} + +impl EvmClient { + /// Creates a new Register with an initial value and uploads it to the network. + pub async fn create_register( + &mut self, + value: Bytes, + name: XorName, + owner: SecretKey, + wallet: &Wallet, + ) -> Result { + let pk = owner.public_key(); + + // Owner can write to the register. + let permissions = Permissions::new_with([pk]); + let mut register = ClientRegister::new(pk, name, permissions); + let address = NetworkAddress::from_register_address(*register.address()); + + let entries = register + .read() + .into_iter() + .map(|(entry_hash, _value)| entry_hash) + .collect(); + + // TODO: Handle error. + let _ = register.write(value.into(), &entries, &owner); + let reg_xor = register.address().xorname(); + let (payment_proofs, _) = self.pay(std::iter::once(reg_xor), wallet).await?; + // Should always be there, else it would have failed on the payment step. + let proof = payment_proofs.get(®_xor).expect("Missing proof"); + let payee = proof.to_peer_id_payee().expect("Missing payee Peer ID"); + let signed_register = register.clone().into_signed(&owner).expect("TODO"); + + let record = Record { + key: address.to_record_key(), + value: try_serialize_record( + &(proof, &signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: None, + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Register { + inner: signed_register, + }) + } + + /// Fetches a Register from the network. + pub async fn fetch_register( + &self, + address: RegisterAddress, + ) -> Result { + let network_address = NetworkAddress::from_register_address(address); + let key = network_address.to_record_key(); + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::One, + retry_strategy: None, + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + + let record = self + .network() + .get_record_from_network(key, &get_cfg) + .await?; + + let register: SignedRegister = + try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + + Ok(Register { inner: register }) + } + + /// Updates a Register on the network with a new value. This will overwrite existing value(s). + pub async fn update_register( + &self, + register: Register, + new_value: Bytes, + owner: SecretKey, + ) -> Result<(), RegisterError> { + // Fetch the current register + let mut signed_register = register.inner; + let mut register = signed_register.clone().register().expect("TODO"); + + // Get all current branches + let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); + + // Write the new value to all branches + let (_, op) = register + .write(new_value.to_vec(), &children, &owner) + .expect("TODO"); + + // Apply the operation to the register + signed_register.add_op(op.clone()).expect("TODO"); + + // Prepare the record for network storage + let record = Record { + key: NetworkAddress::from_register_address(*register.address()).to_record_key(), + value: try_serialize_record(&signed_register, RecordKind::Register) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: None, + verification: None, + }; + + // Store the updated register on the network + self.network().put_record(record, &put_cfg).await?; + + Ok(()) + } +} diff --git a/autonomi/src/evm/client/vault.rs b/autonomi/src/evm/client/vault.rs new file mode 100644 index 0000000000..7d91ed27cd --- /dev/null +++ b/autonomi/src/evm/client/vault.rs @@ -0,0 +1,4 @@ +use crate::client::vault::Vault; +use crate::evm::client::EvmClient; + +impl Vault for EvmClient {} diff --git a/autonomi/src/evm/mod.rs b/autonomi/src/evm/mod.rs new file mode 100644 index 0000000000..bb68c57eed --- /dev/null +++ b/autonomi/src/evm/mod.rs @@ -0,0 +1,5 @@ +pub use client::Client; + +pub mod client; + +pub type EvmWallet = evmlib::wallet::Wallet; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 3314d8a1b3..ac98e10ebe 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -43,14 +43,18 @@ pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; -pub use client::{Client, ConnectError, CONNECT_TIMEOUT_SECS}; - -mod client; -mod secrets; -#[cfg(feature = "data")] +pub(crate) mod client; +#[cfg(feature = "evm-payments")] +pub mod evm; +#[cfg(feature = "native-payments")] +pub mod native; mod self_encryption; -#[cfg(feature = "transfers")] -mod wallet; #[cfg(feature = "transfers")] const VERIFY_STORE: bool = true; + +#[cfg(all(feature = "native-payments", not(feature = "evm-payments")))] +pub type Client = native::Client; + +#[cfg(all(feature = "evm-payments", not(feature = "native-payments")))] +pub type Client = evm::Client; diff --git a/autonomi/src/native/client/data.rs b/autonomi/src/native/client/data.rs new file mode 100644 index 0000000000..02126fba4e --- /dev/null +++ b/autonomi/src/native/client/data.rs @@ -0,0 +1,226 @@ +use std::collections::{BTreeMap, HashSet}; + +use super::transfers::SendSpendsError; +use crate::client::data::{Data, PayError, PutError}; +use crate::client::ClientWrapper; +use crate::native::client::NativeClient; +use crate::native::Client; +use crate::self_encryption::{encrypt, DataMapLevel}; +use bytes::Bytes; +use libp2p::{ + kad::{Quorum, Record}, + PeerId, +}; +use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; +use sn_client::{ + networking::{GetRecordCfg, NetworkError, PutRecordCfg}, + transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}, + StoragePaymentResult, +}; +use sn_protocol::{ + storage::{ + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, + }, + NetworkAddress, +}; +use sn_transfers::Payment; +use tokio::task::{JoinError, JoinSet}; +use xor_name::XorName; + +impl Data for NativeClient {} + +impl NativeClient { + /// Upload a piece of data to the network. This data will be self-encrypted, + /// and the data map XOR address will be returned. + pub async fn put(&mut self, data: Bytes, wallet: &mut HotWallet) -> Result { + let now = std::time::Instant::now(); + let (map, chunks) = encrypt(data)?; + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + + let map_xor_name = *map.address().xorname(); + + let mut xor_names = vec![]; + xor_names.push(map_xor_name); + + for chunk in &chunks { + xor_names.push(*chunk.name()); + } + + let StoragePaymentResult { skipped_chunks, .. } = + self.pay(xor_names.into_iter(), wallet).await?; + + // TODO: Upload in parallel + if !skipped_chunks.contains(map.name()) { + self.upload_chunk(map, wallet).await?; + } + + for chunk in chunks { + if skipped_chunks.contains(chunk.name()) { + continue; + } + self.upload_chunk(chunk, wallet).await?; + } + + Ok(map_xor_name) + } + + pub(crate) async fn pay( + &mut self, + content_addrs: impl Iterator, + wallet: &mut HotWallet, + ) -> Result { + let mut tasks = JoinSet::new(); + + for content_addr in content_addrs { + let network = self.network().clone(); + + tasks.spawn(async move { + // TODO: retry, but where? + let cost = network + .get_store_costs_from_network( + NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), + vec![], + ) + .await + .map_err(PayError::CouldNotGetStoreCosts); + + tracing::debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); + + (content_addr, cost) + }); + } + + tracing::debug!("Pending store cost tasks: {:?}", tasks.len()); + + // collect store costs + let mut cost_map = BTreeMap::default(); + let mut skipped_chunks = vec![]; + + while let Some(res) = tasks.join_next().await { + match res { + Ok((content_addr, Ok(cost))) => { + if cost.2.cost == NanoTokens::zero() { + skipped_chunks.push(content_addr); + tracing::debug!("Skipped existing chunk {content_addr:?}"); + } else { + tracing::debug!("Storecost inserted into payment map for {content_addr:?}"); + let _ = cost_map.insert(content_addr, (cost.1, cost.2, cost.0.to_bytes())); + } + } + Ok((content_addr, Err(err))) => { + tracing::warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); + return Err(err); + } + Err(e) => { + return Err(PayError::JoinError(e)); + } + } + } + + let (storage_cost, royalty_fees) = if cost_map.is_empty() { + (NanoTokens::zero(), NanoTokens::zero()) + } else { + self.pay_for_records(&cost_map, wallet).await? + }; + + let res = StoragePaymentResult { + storage_cost, + royalty_fees, + skipped_chunks, + }; + + Ok(res) + } + + async fn pay_for_records( + &mut self, + cost_map: &BTreeMap)>, + wallet: &mut HotWallet, + ) -> Result<(NanoTokens, NanoTokens), PayError> { + // Before wallet progress, there shall be no `unconfirmed_spend_requests` + self.resend_pending_transactions(wallet).await; + + let total_cost = wallet.local_send_storage_payment(cost_map)?; + + // send to network + tracing::trace!("Sending storage payment transfer to the network"); + + let spend_attempt_result = self + .send_spends(wallet.unconfirmed_spend_requests().iter()) + .await; + + tracing::trace!("send_spends of {} chunks completed", cost_map.len(),); + + // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee + // they will get re-paid again for ALL, if any one of the payment failed to be put. + if let Err(error) = spend_attempt_result { + tracing::warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); + + // if we have a DoubleSpend error, lets remove the CashNote from the wallet + if let SendSpendsError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { + for cash_note_key in spent_cash_notes { + tracing::warn!( + "Removing double spends CashNote from wallet: {cash_note_key:?}" + ); + wallet.mark_notes_as_spent([cash_note_key]); + wallet.clear_specific_spend_request(*cash_note_key); + } + } + + wallet.store_unconfirmed_spend_requests()?; + + return Err(PayError::SendSpendsError(error)); + } else { + tracing::info!("Spend has completed: {:?}", spend_attempt_result); + wallet.clear_confirmed_spend_requests(); + } + + tracing::trace!("clear up spends of {} chunks completed", cost_map.len(),); + + Ok(total_cost) + } + + /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. + async fn upload_chunk(&self, chunk: Chunk, wallet: &mut HotWallet) -> Result<(), PutError> { + let xor_name = *chunk.name(); + let (payment, payee) = self.get_recent_payment_for_addr(&xor_name, wallet)?; + + self.store_chunk(chunk, payee, payment).await?; + + wallet.api().remove_payment_transaction(&xor_name); + + Ok(()) + } + + /// Actually store a chunk to a peer. + async fn store_chunk( + &self, + chunk: Chunk, + payee: PeerId, + payment: Payment, + ) -> Result<(), PutError> { + tracing::debug!("Storing chunk: {chunk:?} to {payee:?}"); + + let key = chunk.network_address().to_record_key(); + + let record_kind = RecordKind::ChunkWithPayment; + + let record = Record { + key: key.clone(), + value: try_serialize_record(&(payment, chunk.clone()), record_kind) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: None, + }; + + Ok(self.network().put_record(record, &put_cfg).await?) + } +} diff --git a/autonomi/src/native/client/files.rs b/autonomi/src/native/client/files.rs new file mode 100644 index 0000000000..f9fec44518 --- /dev/null +++ b/autonomi/src/native/client/files.rs @@ -0,0 +1,66 @@ +use std::{collections::HashMap, path::PathBuf}; + +use crate::client::files::{FilePointer, Files, Root, UploadError}; +use crate::native::client::NativeClient; +use crate::native::Client; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_transfers::HotWallet; +use walkdir::WalkDir; +use xor_name::XorName; + +impl Files for NativeClient {} + +impl NativeClient { + /// Upload a directory to the network. The directory is recursively walked. + #[cfg(feature = "fs")] + pub async fn upload_from_dir( + &mut self, + path: PathBuf, + wallet: &mut HotWallet, + ) -> Result<(Root, XorName), UploadError> { + let mut map = HashMap::new(); + + for entry in WalkDir::new(path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path().to_path_buf(); + tracing::info!("Uploading file: {path:?}"); + let file = upload_from_file(self, path.clone(), wallet).await?; + map.insert(path, file); + } + + let root = Root { map }; + let root_serialized = Bytes::from(rmp_serde::to_vec(&root)?); + + #[cfg(feature = "vault")] + self.write_bytes_to_vault_if_defined(root_serialized.clone(), wallet) + .await?; + + let xor_name = self.put(root_serialized, wallet).await?; + + Ok((root, xor_name)) + } +} + +async fn upload_from_file( + client: &mut NativeClient, + path: PathBuf, + wallet: &mut HotWallet, +) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + + let addr = client.put(data, wallet).await?; + + // TODO: Set created_at and modified_at + Ok(FilePointer { + data_map: addr, + created_at: 0, + modified_at: 0, + }) +} diff --git a/autonomi/src/native/client/mod.rs b/autonomi/src/native/client/mod.rs new file mode 100644 index 0000000000..58d86f0a9d --- /dev/null +++ b/autonomi/src/native/client/mod.rs @@ -0,0 +1,35 @@ +use crate::client::{Client, ClientWrapper, ConnectError}; +use crate::Multiaddr; + +#[cfg(feature = "data")] +pub mod data; +#[cfg(feature = "files")] +pub mod files; +#[cfg(feature = "registers")] +pub mod registers; +#[cfg(feature = "transfers")] +pub mod transfers; +mod vault; + +#[derive(Clone)] +pub struct NativeClient { + client: Client, +} + +impl ClientWrapper for NativeClient { + fn from_client(client: Client) -> Self { + NativeClient { client } + } + + fn client(&self) -> &Client { + &self.client + } + + fn client_mut(&mut self) -> &mut Client { + &mut self.client + } + + fn into_client(self) -> Client { + self.client + } +} diff --git a/autonomi/src/native/client/registers.rs b/autonomi/src/native/client/registers.rs new file mode 100644 index 0000000000..5fa6f21109 --- /dev/null +++ b/autonomi/src/native/client/registers.rs @@ -0,0 +1,80 @@ +use crate::client::registers::{Register, RegisterError, Registers}; +use crate::client::ClientWrapper; +use crate::native::client::NativeClient; +use bls::SecretKey; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use sn_client::networking::PutRecordCfg; +use sn_client::registers::Permissions; +use sn_client::registers::Register as ClientRegister; +use sn_client::transfers::HotWallet; +use sn_protocol::storage::try_serialize_record; +use sn_protocol::storage::RecordKind; +use sn_protocol::NetworkAddress; +use xor_name::XorName; + +impl Registers for NativeClient {} + +impl NativeClient { + /// Creates a new Register with an initial value and uploads it to the network. + pub async fn create_register( + &mut self, + value: Bytes, + name: XorName, + owner: SecretKey, + wallet: &mut HotWallet, + ) -> Result { + let pk = owner.public_key(); + + // Owner can write to the register. + let permissions = Permissions::new_with([pk]); + let mut register = ClientRegister::new(pk, name, permissions); + let address = NetworkAddress::from_register_address(*register.address()); + + let entries = register + .read() + .into_iter() + .map(|(entry_hash, _value)| entry_hash) + .collect(); + register + .write(value.into(), &entries, &owner) + .map_err(RegisterError::Write)?; + + let _payment_result = self + .pay(std::iter::once(register.address().xorname()), wallet) + .await?; + + let (payment, payee) = + self.get_recent_payment_for_addr(®ister.address().xorname(), wallet)?; + + let signed_register = register + .clone() + .into_signed(&owner) + .map_err(RegisterError::CouldNotSign)?; + + let record = Record { + key: address.to_record_key(), + value: try_serialize_record( + &(payment, &signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: None, + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Register { + inner: signed_register, + }) + } +} diff --git a/autonomi/src/client/transfers.rs b/autonomi/src/native/client/transfers.rs similarity index 96% rename from autonomi/src/client/transfers.rs rename to autonomi/src/native/client/transfers.rs index 7e34b93209..3bfc9c166c 100644 --- a/autonomi/src/client/transfers.rs +++ b/autonomi/src/native/client/transfers.rs @@ -1,5 +1,3 @@ -use crate::wallet::MemWallet; -use crate::Client; use sn_client::transfers::{MainPubkey, NanoTokens}; use sn_transfers::{SpendReason, Transfer}; @@ -21,7 +19,7 @@ pub enum TransferError { #[error("Failed to send tokens due to {0}")] CouldNotSendMoney(String), #[error("Wallet error: {0:?}")] - WalletError(#[from] crate::wallet::error::WalletError), + WalletError(#[from] wallet::error::WalletError), #[error("Network error: {0:?}")] NetworkError(#[from] sn_client::networking::NetworkError), } @@ -52,6 +50,10 @@ use sn_protocol::{ use sn_transfers::Payment; use xor_name::XorName; +use crate::client::ClientWrapper; +use crate::native::client::NativeClient; +use crate::native::wallet::MemWallet; +use crate::native::{wallet, Client}; use crate::VERIFY_STORE; use sn_transfers::CashNote; use std::collections::HashSet; @@ -63,7 +65,7 @@ pub enum SendError { #[error("CashNote has no parent spends.")] CashNoteHasNoParentSpends, #[error("Wallet error occurred during sending of transfer.")] - WalletError(#[from] crate::wallet::error::WalletError), + WalletError(#[from] wallet::error::WalletError), #[error("Encountered transfer error during sending.")] TransferError(#[from] sn_transfers::TransferError), #[error("Spends error: {0:?}")] @@ -80,7 +82,7 @@ pub enum ReceiveError { // Hide these from the docs. #[doc(hidden)] -impl Client { +impl NativeClient { /// Send spend requests to the network. pub async fn send_spends( &self, @@ -97,7 +99,7 @@ impl Client { let the_task = async move { let cash_note_key = spend_request.unique_pubkey(); - let result = store_spend(self.network.clone(), spend_request.clone()).await; + let result = store_spend(self.network().clone(), spend_request.clone()).await; (cash_note_key, result) }; @@ -181,7 +183,7 @@ impl Client { .map_err(TransferError::WalletError)?; let cash_notes = self - .network + .network() .verify_cash_notes_redemptions(wallet.address(), &cash_note_redemptions) .await?; @@ -205,7 +207,7 @@ impl Client { let pk = cash_note.unique_pubkey(); let addr = SpendAddress::from_unique_pubkey(&pk); - match self.network.get_spend(addr).await { + match self.network().get_spend(addr).await { // if we get a RecordNotFound, it means the CashNote is not spent, which is good Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)) => Ok(()), // if we get a spend, it means the CashNote is already spent diff --git a/autonomi/src/native/client/vault.rs b/autonomi/src/native/client/vault.rs new file mode 100644 index 0000000000..70696b8652 --- /dev/null +++ b/autonomi/src/native/client/vault.rs @@ -0,0 +1,4 @@ +use crate::client::vault::Vault; +use crate::native::client::NativeClient; + +impl Vault for NativeClient {} diff --git a/autonomi/src/native/mod.rs b/autonomi/src/native/mod.rs new file mode 100644 index 0000000000..3341f5c0a0 --- /dev/null +++ b/autonomi/src/native/mod.rs @@ -0,0 +1,6 @@ +pub use client::Client; + +pub mod client; +mod secrets; +#[cfg(feature = "transfers")] +mod wallet; diff --git a/autonomi/src/secrets.rs b/autonomi/src/native/secrets.rs similarity index 100% rename from autonomi/src/secrets.rs rename to autonomi/src/native/secrets.rs diff --git a/autonomi/src/wallet/error.rs b/autonomi/src/native/wallet/error.rs similarity index 100% rename from autonomi/src/wallet/error.rs rename to autonomi/src/native/wallet/error.rs diff --git a/autonomi/src/wallet/mod.rs b/autonomi/src/native/wallet/mod.rs similarity index 99% rename from autonomi/src/wallet/mod.rs rename to autonomi/src/native/wallet/mod.rs index 25ba99bc03..db114c4d3e 100644 --- a/autonomi/src/wallet/mod.rs +++ b/autonomi/src/native/wallet/mod.rs @@ -1,6 +1,6 @@ pub mod error; -use crate::wallet::error::WalletError; +use crate::native::wallet::error::WalletError; use sn_client::transfers::{HotWallet, MainSecretKey}; use sn_transfers::{ CashNote, CashNoteRedemption, DerivationIndex, MainPubkey, NanoTokens, SignedSpend, diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs index c01ae23187..767209f503 100644 --- a/autonomi/tests/common/mod.rs +++ b/autonomi/tests/common/mod.rs @@ -1,11 +1,18 @@ #![allow(dead_code)] use bytes::Bytes; +use const_hex::ToHexExt; +use evmlib::CustomNetwork; use libp2p::Multiaddr; use rand::Rng; use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; use sn_peers_acquisition::parse_peer_addr; use sn_transfers::{get_faucet_data_dir, HotWallet}; +use std::env; + +fn get_var_or_panic(var: &str) -> String { + env::var(var).expect(&format!("{var} environment variable needs to be set")) +} /// When launching a testnet locally, we can use the faucet wallet. pub fn load_hot_wallet_from_faucet() -> HotWallet { @@ -31,9 +38,49 @@ pub fn enable_logging() { /// /// An empty `Vec` will be returned if the env var is not set. pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { - let Ok(peers_str) = std::env::var("SAFE_PEERS") else { + let Ok(peers_str) = env::var("SAFE_PEERS") else { return Ok(vec![]); }; peers_str.split(',').map(parse_peer_addr).collect() } + +pub fn evm_network_from_env() -> evmlib::Network { + let evm_network = env::var("EVM_NETWORK").ok(); + let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); + + let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { + ( + evmlib::Network::ArbitrumOne.rpc_url().to_string(), + evmlib::Network::ArbitrumOne + .payment_token_address() + .encode_hex_with_prefix(), + evmlib::Network::ArbitrumOne + .chunk_payments_address() + .encode_hex_with_prefix(), + ) + } else { + ( + get_var_or_panic("RPC_URL"), + get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), + get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), + ) + }; + + evmlib::Network::Custom(CustomNetwork::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )) +} + +pub fn evm_wallet_from_env_or_default(network: evmlib::Network) -> evmlib::wallet::Wallet { + // Default deployer wallet of the testnet. + const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + + let private_key = env::var("PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + + evmlib::wallet::Wallet::new_from_private_key(network, &private_key) + .expect("Invalid private key") +} diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs new file mode 100644 index 0000000000..4b4abfee35 --- /dev/null +++ b/autonomi/tests/evm/file.rs @@ -0,0 +1,78 @@ +use crate::common; +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use crate::evm::Client; +use bytes::Bytes; +use eyre::bail; +use std::time::Duration; +use tokio::time::sleep; + +#[tokio::test] +async fn file() -> Result<(), Box> { + common::enable_logging(); + + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = evm_wallet_from_env_or_default(network); + + // let data = common::gen_random_data(1024 * 1024 * 1000); + // let user_key = common::gen_random_data(32); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + + sleep(Duration::from_secs(10)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + Ok(()) +} + +#[cfg(feature = "vault")] +#[tokio::test] +async fn file_into_vault() -> eyre::Result<()> { + common::enable_logging(); + + let network = evm_network_from_env(); + + let mut client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + let mut wallet = evm_wallet_from_env_or_default(network); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + sleep(Duration::from_secs(2)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + // now assert over the stored account packet + let new_client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { + let ap_root_fetched = Client::deserialise_root(ap)?; + + assert_eq!( + root.map, ap_root_fetched.map, + "root fetched should match root put" + ); + } else { + bail!("No account packet found"); + } + + Ok(()) +} diff --git a/autonomi/tests/evm/mod.rs b/autonomi/tests/evm/mod.rs new file mode 100644 index 0000000000..fa74db16b4 --- /dev/null +++ b/autonomi/tests/evm/mod.rs @@ -0,0 +1,11 @@ +use autonomi; + +#[cfg(feature = "files")] +mod file; +#[cfg(feature = "data")] +mod put; +#[cfg(feature = "registers")] +mod register; +mod wallet; + +pub type Client = autonomi::evm::client::EvmClient; diff --git a/autonomi/tests/evm/put.rs b/autonomi/tests/evm/put.rs new file mode 100644 index 0000000000..415e9be6c6 --- /dev/null +++ b/autonomi/tests/evm/put.rs @@ -0,0 +1,26 @@ +use std::time::Duration; + +use crate::common; +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use crate::evm::Client; +use tokio::time::sleep; + +#[tokio::test] +async fn put() { + common::enable_logging(); + + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = evm_wallet_from_env_or_default(network); + + let data = common::gen_random_data(1024 * 1024 * 10); + + // let quote = client.quote(data.clone()).await.unwrap(); + // let payment = client.pay(quote, &mut wallet).await.unwrap(); + let addr = client.put(data.clone(), &mut wallet).await.unwrap(); + + sleep(Duration::from_secs(10)).await; + + let data_fetched = client.get(addr).await.unwrap(); + assert_eq!(data, data_fetched, "data fetched should match data put"); +} diff --git a/autonomi/tests/evm/register.rs b/autonomi/tests/evm/register.rs new file mode 100644 index 0000000000..71aada72f8 --- /dev/null +++ b/autonomi/tests/evm/register.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use crate::common; +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use crate::evm::Client; +use bytes::Bytes; +use tokio::time::sleep; +use xor_name::XorName; + +#[tokio::test] +async fn register() { + common::enable_logging(); + + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = evm_wallet_from_env_or_default(network); + + // Owner key of the register. + let key = bls::SecretKey::random(); + + // Create a register with the value [1, 2, 3, 4] + let register = client + .create_register( + vec![1, 2, 3, 4].into(), + XorName::random(&mut rand::thread_rng()), + key.clone(), + &mut wallet, + ) + .await + .unwrap(); + + sleep(Duration::from_secs(10)).await; + + // Fetch the register again + let register = client.fetch_register(*register.address()).await.unwrap(); + + // Update the register with the value [5, 6, 7, 8] + client + .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) + .await + .unwrap(); + + sleep(Duration::from_secs(2)).await; + + // Fetch and verify the register contains the updated value + let register = client.fetch_register(*register.address()).await.unwrap(); + assert_eq!(register.values(), vec![Bytes::from(vec![5, 6, 7, 8])]); +} diff --git a/autonomi/tests/evm/wallet.rs b/autonomi/tests/evm/wallet.rs new file mode 100644 index 0000000000..3bb4972c03 --- /dev/null +++ b/autonomi/tests/evm/wallet.rs @@ -0,0 +1,36 @@ +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use const_hex::traits::FromHex; +use evmlib::common::{Address, Amount}; +use evmlib::wallet::Wallet; + +#[tokio::test] +async fn from_private_key() { + let private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa"; + let network = evm_network_from_env(); + let wallet = Wallet::new_from_private_key(network, private_key).unwrap(); + + assert_eq!( + wallet.address(), + Address::from_hex("0x69D5BF2Bc42bca8782b8D2b4FdfF2b1Fa7644Fe7").unwrap() + ) +} + +#[tokio::test] +async fn send_tokens() { + let network = evm_network_from_env(); + let wallet = evm_wallet_from_env_or_default(network.clone()); + + let receiving_wallet = Wallet::new_with_random_wallet(network); + + let initial_balance = receiving_wallet.balance_of_tokens().await.unwrap(); + + assert_eq!(initial_balance, Amount::from(0)); + + let _ = wallet + .transfer_tokens(receiving_wallet.address(), Amount::from(10)) + .await; + + let final_balance = receiving_wallet.balance_of_tokens().await.unwrap(); + + assert_eq!(final_balance, Amount::from(10)); +} diff --git a/autonomi/tests/integration.rs b/autonomi/tests/integration.rs new file mode 100644 index 0000000000..386c36ec91 --- /dev/null +++ b/autonomi/tests/integration.rs @@ -0,0 +1,5 @@ +pub(crate) mod common; +#[cfg(feature = "evm-payments")] +mod evm; +#[cfg(feature = "native-payments")] +mod native; diff --git a/autonomi/tests/file.rs b/autonomi/tests/native/file.rs similarity index 94% rename from autonomi/tests/file.rs rename to autonomi/tests/native/file.rs index 180fd49644..f6b5c30f35 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/native/file.rs @@ -1,13 +1,11 @@ use std::time::Duration; -use autonomi::Client; +use crate::common; +use crate::native::Client; use bytes::Bytes; use eyre::{bail, Result}; use tokio::time::sleep; -mod common; - -#[cfg(feature = "files")] #[tokio::test] async fn file() -> Result<(), Box> { common::enable_logging(); @@ -34,7 +32,7 @@ async fn file() -> Result<(), Box> { } // files and vault feats -#[cfg(all(feature = "files", feature = "vault"))] +#[cfg(feature = "vault")] #[tokio::test] async fn file_into_vault() -> Result<()> { common::enable_logging(); diff --git a/autonomi/tests/native/mod.rs b/autonomi/tests/native/mod.rs new file mode 100644 index 0000000000..620b16f77f --- /dev/null +++ b/autonomi/tests/native/mod.rs @@ -0,0 +1,10 @@ +use autonomi; + +#[cfg(feature = "files")] +mod file; +#[cfg(feature = "data")] +mod put; +#[cfg(feature = "registers")] +mod register; + +pub type Client = autonomi::native::client::NativeClient; diff --git a/autonomi/tests/put.rs b/autonomi/tests/native/put.rs similarity index 92% rename from autonomi/tests/put.rs rename to autonomi/tests/native/put.rs index 49eb263130..f9c1bb2dde 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/native/put.rs @@ -1,11 +1,9 @@ use std::time::Duration; -use autonomi::Client; +use crate::common; +use crate::native::Client; use tokio::time::sleep; -mod common; - -#[cfg(feature = "data")] #[tokio::test] async fn put() { common::enable_logging(); diff --git a/autonomi/tests/register.rs b/autonomi/tests/native/register.rs similarity index 95% rename from autonomi/tests/register.rs rename to autonomi/tests/native/register.rs index c51873b966..97355149a9 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/native/register.rs @@ -1,13 +1,11 @@ use std::time::Duration; -use autonomi::Client; +use crate::common; +use crate::native::Client; use bytes::Bytes; use tokio::time::sleep; use xor_name::XorName; -mod common; - -#[cfg(feature = "registers")] #[tokio::test] async fn register() { common::enable_logging(); From ff20d6df0ad6fccf4a1266d4456da56405c98bc7 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 27 Sep 2024 15:02:15 +0200 Subject: [PATCH 060/255] fix: merge conflicts --- Cargo.lock | 10 +- Cargo.toml | 1 + autonomi/src/client/data.rs | 6 +- autonomi/src/client/mod.rs | 7 +- autonomi/src/client/registers.rs | 10 +- autonomi/src/client/vault.rs | 121 ++---------------------- autonomi/src/evm/client/data.rs | 15 +-- autonomi/src/evm/client/files.rs | 8 +- autonomi/src/evm/client/mod.rs | 3 +- autonomi/src/evm/client/vault.rs | 103 ++++++++++++++++++++ autonomi/src/evm/mod.rs | 2 +- autonomi/src/native/client/data.rs | 41 +++----- autonomi/src/native/client/files.rs | 2 - autonomi/src/native/client/mod.rs | 3 +- autonomi/src/native/client/registers.rs | 8 +- autonomi/src/native/client/transfers.rs | 52 +++++----- autonomi/src/native/client/vault.rs | 103 ++++++++++++++++++++ autonomi/src/native/mod.rs | 3 +- autonomi/tests/common/mod.rs | 17 ++-- 19 files changed, 295 insertions(+), 220 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15a364cdfc..c0bf2820ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1155,7 +1155,6 @@ dependencies = [ "serde", "sn_bls_ckd", "sn_curv", - "sn_networking", "sn_evm", "sn_networking", "sn_peers_acquisition", @@ -2904,6 +2903,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "evm_testnet" +version = "0.1.0" +dependencies = [ + "clap", + "evmlib", + "tokio", +] + [[package]] name = "evmlib" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 10ebb63d70..fb86e31a39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "autonomi", "evmlib", + "evm_testnet", "sn_auditor", "sn_build_info", "sn_evm", diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index b064da1e76..9ee4559c20 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -4,13 +4,11 @@ use bytes::Bytes; use evmlib::wallet; use libp2p::kad::Quorum; use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; +use sn_networking::{GetRecordCfg, NetworkError}; use sn_protocol::storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}; use sn_protocol::NetworkAddress; use std::collections::HashSet; -use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg}; -use sn_transfers::Payment; -use sn_transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}; -use tokio::task::{JoinError, JoinSet}; +use tokio::task::JoinError; use xor_name::XorName; /// Errors that can occur during the put operation. diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index c7235f5739..3943c1e2bd 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -38,7 +38,7 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; pub struct Client { pub(crate) network: Network, #[cfg(feature = "vault")] - vault_secret_key: Option, + pub(crate) vault_secret_key: Option, } /// Error returned by [`Client::connect`]. @@ -183,7 +183,10 @@ pub trait ClientWrapper { &self.client().network } - async fn connect(peers: &[Multiaddr]) -> Result { + async fn connect(peers: &[Multiaddr]) -> Result + where + Self: Sized, + { let client = Client::connect(peers).await?; Ok(Self::from_client(client)) } diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index f89957ab94..fb87071a42 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -1,6 +1,6 @@ use std::collections::BTreeSet; -use super::data::PayError; +use crate::client::data::PayError; use crate::client::{Client, ClientWrapper}; use bls::SecretKey; use bytes::Bytes; @@ -14,13 +14,7 @@ use sn_protocol::storage::RecordKind; use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; use sn_registers::EntryHash; -use sn_registers::Permissions; -use sn_registers::Register as ClientRegister; use sn_registers::SignedRegister; -use sn_transfers::HotWallet; -use xor_name::XorName; - -use super::data::PayError; #[derive(Debug, thiserror::Error)] pub enum RegisterError { @@ -46,7 +40,7 @@ pub enum RegisterError { #[derive(Clone, Debug)] pub struct Register { - inner: SignedRegister, + pub(crate) inner: SignedRegister, } impl Register { diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 2b79fafe0b..7cc1d080bb 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -1,18 +1,12 @@ use std::collections::HashSet; -use super::data::PutError; use crate::client::{Client, ClientWrapper}; use bls::SecretKey; use bytes::Bytes; -use libp2p::kad::{Quorum, Record}; -use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; -use sn_protocol::storage::{RetryStrategy, Scratchpad, ScratchpadAddress}; -use sn_protocol::{ - storage::{try_deserialize_record, try_serialize_record, RecordKind}, - NetworkAddress, -}; -use sn_transfers::HotWallet; -use tracing::info; +use libp2p::kad::Quorum; +use sn_networking::{GetRecordCfg, NetworkError}; +use sn_protocol::storage::{Scratchpad, ScratchpadAddress}; +use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; #[derive(Debug, thiserror::Error)] pub enum VaultError { @@ -83,102 +77,13 @@ impl Client { Ok(pad) } - - /// Put data into the client's VaultPacket - /// - /// Returns Ok(None) early if no vault packet is defined. - /// - /// Pays for a new VaultPacket if none yet created for the client. Returns the current version - /// of the data on success. - pub async fn write_bytes_to_vault_if_defined( - &mut self, - data: Bytes, - wallet: &mut HotWallet, - ) -> Result, PutError> { - // Exit early if no vault packet defined - let Some(client_sk) = self.vault_secret_key.as_ref() else { - return Ok(None); - }; - - let client_pk = client_sk.public_key(); - - let pad_res = self.get_vault_from_network().await; - - let mut is_new = true; - let mut scratch = if let Ok(existing_data) = pad_res { - tracing::info!("Scratchpad already exists, returning existing data"); - - info!( - "scratch already exists, is version {:?}", - existing_data.count() - ); - - is_new = false; - existing_data - } else { - tracing::trace!("new scratchpad creation"); - Scratchpad::new(client_pk) - }; - - let next_count = scratch.update_and_sign(data, client_sk); - let scratch_address = scratch.network_address(); - let scratch_key = scratch_address.to_record_key(); - - let record = if is_new { - self.pay( - [&scratch_address].iter().filter_map(|f| f.as_xorname()), - wallet, - ) - .await?; - - let (payment, _payee) = self.get_recent_payment_for_addr( - &scratch_address.as_xorname().ok_or(PutError::VaultXorName)?, - wallet, - )?; - - Record { - key: scratch_key, - value: try_serialize_record(&(payment, scratch), RecordKind::ScratchpadWithPayment) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - } else { - Record { - key: scratch_key, - value: try_serialize_record(&scratch, RecordKind::Scratchpad) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: None, - verification: Some(( - VerificationKind::Network, - GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: None, - expected_holders: HashSet::new(), - is_register: false, - }, - )), - }; - - self.network.put_record(record, &put_cfg).await?; - - Ok(Some(next_count)) - } } pub trait Vault: ClientWrapper { - fn with_vault_entropy(mut self, bytes: Bytes) -> Result { + fn with_vault_entropy(self, bytes: Bytes) -> Result + where + Self: Sized, + { let client = self.into_client().with_vault_entropy(bytes)?; Ok(Self::from_client(client)) } @@ -190,14 +95,4 @@ pub trait Vault: ClientWrapper { async fn get_vault_from_network(&self) -> Result { self.client().get_vault_from_network().await } - - async fn write_bytes_to_vault_if_defined( - &mut self, - data: Bytes, - wallet: &mut HotWallet, - ) -> Result, PutError> { - self.client_mut() - .write_bytes_to_vault_if_defined(data, wallet) - .await - } } diff --git a/autonomi/src/evm/client/data.rs b/autonomi/src/evm/client/data.rs index a17340f9ce..9061b6fb19 100644 --- a/autonomi/src/evm/client/data.rs +++ b/autonomi/src/evm/client/data.rs @@ -1,25 +1,20 @@ -use crate::client::data::{Data, GetError, PayError, PutError}; +use crate::client::data::{Data, PayError, PutError}; use crate::client::ClientWrapper; use crate::evm::client::EvmClient; -use crate::evm::Client; -use crate::self_encryption::{encrypt, DataMapLevel}; +use crate::self_encryption::encrypt; use bytes::Bytes; use evmlib::common::{QuoteHash, QuotePayment, TxHash}; -use evmlib::wallet; use evmlib::wallet::Wallet; use libp2p::futures; use libp2p::kad::{Quorum, Record}; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; use sn_evm::ProofOfPayment; -use sn_networking::{GetRecordCfg, PutRecordCfg}; +use sn_networking::PutRecordCfg; use sn_networking::{Network, NetworkError, PayeeQuote}; use sn_protocol::{ - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, - }, + storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind}, NetworkAddress, }; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap}; use xor_name::XorName; impl Data for EvmClient {} diff --git a/autonomi/src/evm/client/files.rs b/autonomi/src/evm/client/files.rs index 3d3a1424c3..be8e02e1cb 100644 --- a/autonomi/src/evm/client/files.rs +++ b/autonomi/src/evm/client/files.rs @@ -1,11 +1,8 @@ -use std::{collections::HashMap, path::PathBuf}; - -use super::data::{GetError, PutError}; use crate::client::files::{FilePointer, Files, Root, UploadError}; use crate::evm::client::EvmClient; use bytes::{BufMut, Bytes}; use evmlib::wallet::Wallet; -use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, path::PathBuf}; use walkdir::WalkDir; use xor_name::XorName; @@ -23,12 +20,15 @@ impl EvmClient { for entry in WalkDir::new(path) { let entry = entry?; + if !entry.file_type().is_file() { continue; } + let path = entry.path().to_path_buf(); tracing::info!("Uploading file: {path:?}"); let file = upload_from_file(self, path.clone(), wallet).await?; + map.insert(path, file); } diff --git a/autonomi/src/evm/client/mod.rs b/autonomi/src/evm/client/mod.rs index 1c94079eda..361fdddc9e 100644 --- a/autonomi/src/evm/client/mod.rs +++ b/autonomi/src/evm/client/mod.rs @@ -1,5 +1,4 @@ -use crate::client::{Client, ClientWrapper, ConnectError}; -use crate::Multiaddr; +use crate::client::{Client, ClientWrapper}; #[cfg(feature = "data")] pub mod data; diff --git a/autonomi/src/evm/client/vault.rs b/autonomi/src/evm/client/vault.rs index 7d91ed27cd..c71b9803b7 100644 --- a/autonomi/src/evm/client/vault.rs +++ b/autonomi/src/evm/client/vault.rs @@ -1,4 +1,107 @@ +use crate::client::data::PutError; use crate::client::vault::Vault; +use crate::client::ClientWrapper; use crate::evm::client::EvmClient; +use bytes::Bytes; +use evmlib::wallet::Wallet; +use libp2p::kad::{Quorum, Record}; +use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; +use sn_protocol::storage::{try_serialize_record, RecordKind, RetryStrategy, Scratchpad}; +use std::collections::HashSet; +use tracing::info; impl Vault for EvmClient {} + +impl EvmClient { + /// Put data into the client's VaultPacket + /// + /// Returns Ok(None) early if no vault packet is defined. + /// + /// Pays for a new VaultPacket if none yet created for the client. Returns the current version + /// of the data on success. + pub async fn write_bytes_to_vault_if_defined( + &mut self, + data: Bytes, + wallet: &mut Wallet, + ) -> Result, PutError> { + // Exit early if no vault packet defined + let Some(client_sk) = self.client().vault_secret_key.as_ref() else { + return Ok(None); + }; + + let client_pk = client_sk.public_key(); + + let pad_res = self.get_vault_from_network().await; + let mut is_new = true; + + let mut scratch = if let Ok(existing_data) = pad_res { + tracing::info!("Scratchpad already exists, returning existing data"); + + info!( + "scratch already exists, is version {:?}", + existing_data.count() + ); + + is_new = false; + existing_data + } else { + tracing::trace!("new scratchpad creation"); + Scratchpad::new(client_pk) + }; + + let next_count = scratch.update_and_sign(data, client_sk); + let scratch_address = scratch.network_address(); + let scratch_key = scratch_address.to_record_key(); + + let record = if is_new { + self.pay( + [&scratch_address].iter().filter_map(|f| f.as_xorname()), + wallet, + ) + .await?; + + let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; + let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; + // Should always be there, else it would have failed on the payment step. + let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + + Record { + key: scratch_key, + value: try_serialize_record(&(proof, scratch), RecordKind::ScratchpadWithPayment) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: scratch_key, + value: try_serialize_record(&scratch, RecordKind::Scratchpad) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: None, + verification: Some(( + VerificationKind::Network, + GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }, + )), + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Some(next_count)) + } +} diff --git a/autonomi/src/evm/mod.rs b/autonomi/src/evm/mod.rs index bb68c57eed..a7c160ea53 100644 --- a/autonomi/src/evm/mod.rs +++ b/autonomi/src/evm/mod.rs @@ -1,4 +1,4 @@ -pub use client::Client; +pub use crate::client::Client; pub mod client; diff --git a/autonomi/src/native/client/data.rs b/autonomi/src/native/client/data.rs index 02126fba4e..2b0a96bfb7 100644 --- a/autonomi/src/native/client/data.rs +++ b/autonomi/src/native/client/data.rs @@ -1,30 +1,22 @@ -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use super::transfers::SendSpendsError; use crate::client::data::{Data, PayError, PutError}; use crate::client::ClientWrapper; use crate::native::client::NativeClient; -use crate::native::Client; -use crate::self_encryption::{encrypt, DataMapLevel}; +use crate::self_encryption::encrypt; use bytes::Bytes; use libp2p::{ kad::{Quorum, Record}, PeerId, }; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_client::{ - networking::{GetRecordCfg, NetworkError, PutRecordCfg}, - transfers::{HotWallet, MainPubkey, NanoTokens, PaymentQuote}, - StoragePaymentResult, -}; +use sn_networking::PutRecordCfg; use sn_protocol::{ - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, - }, + storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind}, NetworkAddress, }; -use sn_transfers::Payment; -use tokio::task::{JoinError, JoinSet}; +use sn_transfers::{HotWallet, MainPubkey, NanoTokens, Payment, PaymentQuote}; +use tokio::task::JoinSet; use xor_name::XorName; impl Data for NativeClient {} @@ -46,8 +38,7 @@ impl NativeClient { xor_names.push(*chunk.name()); } - let StoragePaymentResult { skipped_chunks, .. } = - self.pay(xor_names.into_iter(), wallet).await?; + let (_, skipped_chunks) = self.pay(xor_names.into_iter(), wallet).await?; // TODO: Upload in parallel if !skipped_chunks.contains(map.name()) { @@ -68,7 +59,7 @@ impl NativeClient { &mut self, content_addrs: impl Iterator, wallet: &mut HotWallet, - ) -> Result { + ) -> Result<(NanoTokens, Vec), PayError> { let mut tasks = JoinSet::new(); for content_addr in content_addrs { @@ -117,26 +108,20 @@ impl NativeClient { } } - let (storage_cost, royalty_fees) = if cost_map.is_empty() { - (NanoTokens::zero(), NanoTokens::zero()) + let storage_cost = if cost_map.is_empty() { + NanoTokens::zero() } else { self.pay_for_records(&cost_map, wallet).await? }; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - - Ok(res) + Ok((storage_cost, skipped_chunks)) } async fn pay_for_records( &mut self, cost_map: &BTreeMap)>, wallet: &mut HotWallet, - ) -> Result<(NanoTokens, NanoTokens), PayError> { + ) -> Result { // Before wallet progress, there shall be no `unconfirmed_spend_requests` self.resend_pending_transactions(wallet).await; @@ -177,7 +162,7 @@ impl NativeClient { tracing::trace!("clear up spends of {} chunks completed", cost_map.len(),); - Ok(total_cost) + Ok(total_cost.0) } /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. diff --git a/autonomi/src/native/client/files.rs b/autonomi/src/native/client/files.rs index f9fec44518..b73839bf01 100644 --- a/autonomi/src/native/client/files.rs +++ b/autonomi/src/native/client/files.rs @@ -2,9 +2,7 @@ use std::{collections::HashMap, path::PathBuf}; use crate::client::files::{FilePointer, Files, Root, UploadError}; use crate::native::client::NativeClient; -use crate::native::Client; use bytes::Bytes; -use serde::{Deserialize, Serialize}; use sn_transfers::HotWallet; use walkdir::WalkDir; use xor_name::XorName; diff --git a/autonomi/src/native/client/mod.rs b/autonomi/src/native/client/mod.rs index 58d86f0a9d..4b3f2544ac 100644 --- a/autonomi/src/native/client/mod.rs +++ b/autonomi/src/native/client/mod.rs @@ -1,5 +1,4 @@ -use crate::client::{Client, ClientWrapper, ConnectError}; -use crate::Multiaddr; +use crate::client::{Client, ClientWrapper}; #[cfg(feature = "data")] pub mod data; diff --git a/autonomi/src/native/client/registers.rs b/autonomi/src/native/client/registers.rs index 5fa6f21109..6ca80f36d6 100644 --- a/autonomi/src/native/client/registers.rs +++ b/autonomi/src/native/client/registers.rs @@ -4,13 +4,13 @@ use crate::native::client::NativeClient; use bls::SecretKey; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; -use sn_client::networking::PutRecordCfg; -use sn_client::registers::Permissions; -use sn_client::registers::Register as ClientRegister; -use sn_client::transfers::HotWallet; +use sn_networking::PutRecordCfg; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; use sn_protocol::NetworkAddress; +use sn_registers::Permissions; +use sn_registers::Register as ClientRegister; +use sn_transfers::HotWallet; use xor_name::XorName; impl Registers for NativeClient {} diff --git a/autonomi/src/native/client/transfers.rs b/autonomi/src/native/client/transfers.rs index 6d14d3615a..0117631386 100644 --- a/autonomi/src/native/client/transfers.rs +++ b/autonomi/src/native/client/transfers.rs @@ -1,10 +1,29 @@ -use crate::wallet::MemWallet; -use crate::Client; +use crate::client::ClientWrapper; +use crate::native::client::NativeClient; +use crate::native::wallet; +use crate::native::wallet::MemWallet; +use crate::VERIFY_STORE; +use libp2p::{ + futures::future::join_all, + kad::{Quorum, Record}, + PeerId, +}; +use sn_networking::{ + GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, +}; +use sn_protocol::{ + storage::{try_serialize_record, RecordKind, RetryStrategy, SpendAddress}, + NetworkAddress, PrettyPrintRecordKey, +}; +use sn_transfers::CashNote; +use sn_transfers::Payment; +use sn_transfers::UniquePubkey; +use sn_transfers::{HotWallet, SignedSpend}; use sn_transfers::{MainPubkey, NanoTokens}; use sn_transfers::{SpendReason, Transfer}; - -use sn_transfers::UniquePubkey; use std::collections::BTreeSet; +use std::collections::HashSet; +use xor_name::XorName; #[derive(Debug, thiserror::Error)] pub enum SendSpendsError { @@ -23,7 +42,7 @@ pub enum TransferError { #[error("Wallet error: {0:?}")] WalletError(#[from] wallet::error::WalletError), #[error("Network error: {0:?}")] - NetworkError(#[from] sn_networking::NetworkError), + NetworkError(#[from] NetworkError), } #[derive(Debug, thiserror::Error)] @@ -34,29 +53,6 @@ pub enum CashNoteError { FailedToGetSpend(String), } -use libp2p::{ - futures::future::join_all, - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::{ - GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, -}; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind, RetryStrategy, SpendAddress}, - NetworkAddress, PrettyPrintRecordKey, -}; -use sn_transfers::Payment; -use sn_transfers::{HotWallet, SignedSpend}; -use xor_name::XorName; -use crate::client::ClientWrapper; -use crate::native::client::NativeClient; -use crate::native::wallet::MemWallet; -use crate::native::{wallet, Client}; -use crate::VERIFY_STORE; -use sn_transfers::CashNote; -use std::collections::HashSet; - #[derive(Debug, thiserror::Error)] pub enum SendError { #[error("CashNote amount unexpected: {0}")] diff --git a/autonomi/src/native/client/vault.rs b/autonomi/src/native/client/vault.rs index 70696b8652..bfa5f6d31d 100644 --- a/autonomi/src/native/client/vault.rs +++ b/autonomi/src/native/client/vault.rs @@ -1,4 +1,107 @@ +use crate::client::data::PutError; use crate::client::vault::Vault; +use crate::client::ClientWrapper; use crate::native::client::NativeClient; +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; +use sn_protocol::storage::{try_serialize_record, RecordKind, RetryStrategy, Scratchpad}; +use sn_transfers::HotWallet; +use std::collections::HashSet; +use tracing::info; impl Vault for NativeClient {} + +impl NativeClient { + /// Put data into the client's VaultPacket + /// + /// Returns Ok(None) early if no vault packet is defined. + /// + /// Pays for a new VaultPacket if none yet created for the client. Returns the current version + /// of the data on success. + pub async fn write_bytes_to_vault_if_defined( + &mut self, + data: Bytes, + wallet: &mut HotWallet, + ) -> Result, PutError> { + // Exit early if no vault packet defined + let Some(client_sk) = self.client().vault_secret_key.as_ref() else { + return Ok(None); + }; + + let client_pk = client_sk.public_key(); + + let pad_res = self.get_vault_from_network().await; + + let mut is_new = true; + let mut scratch = if let Ok(existing_data) = pad_res { + tracing::info!("Scratchpad already exists, returning existing data"); + + info!( + "scratch already exists, is version {:?}", + existing_data.count() + ); + + is_new = false; + existing_data + } else { + tracing::trace!("new scratchpad creation"); + Scratchpad::new(client_pk) + }; + + let next_count = scratch.update_and_sign(data, client_sk); + let scratch_address = scratch.network_address(); + let scratch_key = scratch_address.to_record_key(); + + let record = if is_new { + self.pay( + [&scratch_address].iter().filter_map(|f| f.as_xorname()), + wallet, + ) + .await?; + + let (payment, _payee) = self.get_recent_payment_for_addr( + &scratch_address.as_xorname().ok_or(PutError::VaultXorName)?, + wallet, + )?; + + Record { + key: scratch_key, + value: try_serialize_record(&(payment, scratch), RecordKind::ScratchpadWithPayment) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: scratch_key, + value: try_serialize_record(&scratch, RecordKind::Scratchpad) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: None, + verification: Some(( + VerificationKind::Network, + GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }, + )), + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Some(next_count)) + } +} diff --git a/autonomi/src/native/mod.rs b/autonomi/src/native/mod.rs index 3341f5c0a0..d5d898c98b 100644 --- a/autonomi/src/native/mod.rs +++ b/autonomi/src/native/mod.rs @@ -1,6 +1,5 @@ -pub use client::Client; +pub use crate::client::Client; pub mod client; -mod secrets; #[cfg(feature = "transfers")] mod wallet; diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs index 9d5b08b5e9..3f62f45328 100644 --- a/autonomi/tests/common/mod.rs +++ b/autonomi/tests/common/mod.rs @@ -6,14 +6,13 @@ use bip39::Mnemonic; use bls::SecretKey; use bytes::Bytes; use const_hex::ToHexExt; -use evmlib::CustomNetwork; use curv::elliptic::curves::ECScalar as _; +use evmlib::CustomNetwork; use libp2p::Multiaddr; -use rand::Rng; +use rand::{Rng, RngCore}; use sn_peers_acquisition::parse_peer_addr; -use sn_transfers::{get_faucet_data_dir, HotWallet}; -use std::env; use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; +use std::env; const MNEMONIC_FILENAME: &str = "account_secret"; const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; @@ -110,7 +109,7 @@ fn create_faucet_account_and_wallet() -> HotWallet { pub fn write_mnemonic_to_disk( files_dir: &Path, - mnemonic: &bip39::Mnemonic, + mnemonic: &Mnemonic, ) -> Result<(), Box> { let filename = files_dir.join(MNEMONIC_FILENAME); let content = mnemonic.to_string(); @@ -120,18 +119,18 @@ pub fn write_mnemonic_to_disk( pub(super) fn read_mnemonic_from_disk( files_dir: &Path, -) -> Result> { +) -> Result> { let filename = files_dir.join(MNEMONIC_FILENAME); let content = std::fs::read_to_string(filename)?; - let mnemonic = bip39::Mnemonic::parse_normalized(&content)?; + let mnemonic = Mnemonic::parse_normalized(&content)?; Ok(mnemonic) } -fn random_eip2333_mnemonic() -> Result> { +fn random_eip2333_mnemonic() -> Result> { let mut entropy = [1u8; 32]; let rng = &mut rand::rngs::OsRng; rng.fill_bytes(&mut entropy); - let mnemonic = bip39::Mnemonic::from_entropy(&entropy)?; + let mnemonic = Mnemonic::from_entropy(&entropy)?; Ok(mnemonic) } From 9077214a14b7fbcb3ea96eab02b5df107fe635ff Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 27 Sep 2024 15:10:52 +0200 Subject: [PATCH 061/255] chore: add `evm_testnet` to workspace members --- Cargo.lock | 9 +++++++++ Cargo.toml | 1 + 2 files changed, 10 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index dc24863112..d0bb8234a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2900,6 +2900,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "evm_testnet" +version = "0.1.0" +dependencies = [ + "clap", + "evmlib", + "tokio", +] + [[package]] name = "evmlib" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 10ebb63d70..fb86e31a39 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ "autonomi", "evmlib", + "evm_testnet", "sn_auditor", "sn_build_info", "sn_evm", From 87943f8a4feaa9c8880fdc5f02748384ce035a75 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 27 Sep 2024 17:30:55 +0200 Subject: [PATCH 062/255] fix: node manager --- sn_node/src/bin/safenode/main.rs | 9 ++++-- sn_node_manager/Cargo.toml | 1 + sn_node_manager/src/cmd/local.rs | 8 ++++++ sn_node_manager/src/local.rs | 47 +++++++++++++++++++------------- 4 files changed, 43 insertions(+), 22 deletions(-) diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 9d2211597b..f3888db47c 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -12,9 +12,9 @@ extern crate tracing; mod rpc_service; mod subcommands; +use crate::subcommands::EvmNetworkCommand; use clap::{command, Parser}; use color_eyre::{eyre::eyre, Result}; -use crate::subcommands::EvmNetworkCommand; use const_hex::traits::FromHex; use libp2p::{identity::Keypair, PeerId}; use sn_evm::{EvmNetwork, RewardsAddress}; @@ -129,7 +129,7 @@ struct Opt { /// The rewards address is the address that will receive the rewards for the node. /// It should be a valid EVM address. #[clap(long)] - rewards_address: String, + rewards_address: Option, /// Specify the EVM network to use. /// The network can either be a pre-configured one or a custom network. @@ -231,8 +231,11 @@ fn main() -> Result<()> { ); return Ok(()); } + // evm config - let rewards_address = RewardsAddress::from_hex(&opt.rewards_address)?; + let rewards_address = RewardsAddress::from_hex(opt.rewards_address.as_ref().expect( + "the following required arguments were not provided: --rewards-address ", + ))?; if opt.crate_version { println!("Crate version: {}", env!("CARGO_PKG_VERSION")); diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 78d5c5b9c4..4163854115 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -29,6 +29,7 @@ quic = [] statemap = [] tcp = [] websockets = [] +faucet = [] [dependencies] chrono = "~0.4.19" diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 699495d291..8e1ba90c31 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -61,6 +61,8 @@ pub async fn join( let mut local_node_registry = NodeRegistry::load(local_node_reg_path)?; let release_repo = ::default_config(); + + #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, faucet_path, @@ -70,6 +72,7 @@ pub async fn join( verbosity, ) .await?; + let safenode_bin_path = get_bin_path( build, node_path, @@ -97,6 +100,7 @@ pub async fn join( }; let options = LocalNetworkOptions { enable_metrics_server, + #[cfg(feature = "faucet")] faucet_bin_path, interval, join: true, @@ -192,6 +196,8 @@ pub async fn run( info!("Launching local network"); let release_repo = ::default_config(); + + #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, faucet_path, @@ -201,6 +207,7 @@ pub async fn run( verbosity, ) .await?; + let safenode_bin_path = get_bin_path( build, node_path, @@ -213,6 +220,7 @@ pub async fn run( let options = LocalNetworkOptions { enable_metrics_server, + #[cfg(feature = "faucet")] faucet_bin_path, join: false, interval, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 863cc748d9..ed39f67c12 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, + check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, increment_port_option, }; use color_eyre::eyre::OptionExt; @@ -23,8 +23,9 @@ use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, - NodeRegistry, NodeServiceData, ServiceStatus, + FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, }; +use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, @@ -36,6 +37,7 @@ use sysinfo::{Pid, System}; #[cfg_attr(test, automock)] pub trait Launcher { fn get_safenode_path(&self) -> PathBuf; + #[cfg(feature = "faucet")] fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result; #[allow(clippy::too_many_arguments)] fn launch_node( @@ -54,6 +56,7 @@ pub trait Launcher { #[derive(Default)] pub struct LocalSafeLauncher { + #[cfg(feature = "faucet")] pub faucet_bin_path: PathBuf, pub safenode_bin_path: PathBuf, } @@ -63,6 +66,7 @@ impl Launcher for LocalSafeLauncher { self.safenode_bin_path.clone() } + #[cfg(feature = "faucet")] fn launch_faucet(&self, genesis_multiaddr: &Multiaddr) -> Result { info!("Launching the faucet server..."); debug!("Using genesis_multiaddr: {}", genesis_multiaddr.to_string()); @@ -72,11 +76,14 @@ impl Launcher for LocalSafeLauncher { "server".to_string(), ]; + #[cfg(feature = "faucet")] debug!( "Using faucet binary: {}", self.faucet_bin_path.to_string_lossy() ); + debug!("Using args: {}", args.join(" ")); + let child = Command::new(self.faucet_bin_path.clone()) .args(args) .stdout(Stdio::inherit()) @@ -241,6 +248,7 @@ pub fn kill_network(node_registry: &NodeRegistry, keep_directories: bool) -> Res pub struct LocalNetworkOptions { pub enable_metrics_server: bool, + #[cfg(feature = "faucet")] pub faucet_bin_path: PathBuf, pub join: bool, pub interval: u64, @@ -283,6 +291,7 @@ pub async fn run_network( let launcher = LocalSafeLauncher { safenode_bin_path: options.safenode_bin_path.to_path_buf(), + #[cfg(feature = "faucet")] faucet_bin_path: options.faucet_bin_path.to_path_buf(), }; @@ -409,23 +418,23 @@ pub async fn run_network( validate_network(node_registry, bootstrap_peers.clone()).await?; } - // TODO: re-enable faucet when it can do EVM payments or when we switch back to native payments - // if !options.join { - // println!("Launching the faucet server..."); - // let pid = launcher.launch_faucet(&bootstrap_peers[0])?; - // let version = get_bin_version(&options.faucet_bin_path)?; - // let faucet = FaucetServiceData { - // faucet_path: options.faucet_bin_path, - // local: true, - // log_dir_path: get_faucet_data_dir(), - // pid: Some(pid), - // service_name: "faucet".to_string(), - // status: ServiceStatus::Running, - // user: get_username()?, - // version, - // }; - // node_registry.faucet = Some(faucet); - // } + #[cfg(feature = "faucet")] + if !options.join { + println!("Launching the faucet server..."); + let pid = launcher.launch_faucet(&bootstrap_peers[0])?; + let version = get_bin_version(&options.faucet_bin_path)?; + let faucet = FaucetServiceData { + faucet_path: options.faucet_bin_path, + local: true, + log_dir_path: get_faucet_data_dir(), + pid: Some(pid), + service_name: "faucet".to_string(), + status: ServiceStatus::Running, + user: get_username()?, + version, + }; + node_registry.faucet = Some(faucet); + } Ok(()) } From 983ef897b73218f7a7f1a0d33037feca48151b70 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Sun, 29 Sep 2024 20:50:09 +0200 Subject: [PATCH 063/255] fix: tests --- autonomi/src/client/mod.rs | 4 ++-- autonomi/src/evm/client/mod.rs | 3 ++- autonomi/src/lib.rs | 19 +------------------ autonomi/src/native/client/mod.rs | 3 ++- autonomi/src/native/mod.rs | 2 +- autonomi/tests/evm/put.rs | 2 ++ 6 files changed, 10 insertions(+), 23 deletions(-) diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 3943c1e2bd..292bf53275 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -26,7 +26,7 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; /// To connect to the network, use [`Client::connect`]. /// /// ```no_run -/// # use autonomi::Client; +/// # use autonomi::client::Client; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; @@ -58,7 +58,7 @@ impl Client { /// This will timeout after 20 seconds. (See [`CONNECT_TIMEOUT_SECS`].) /// /// ```no_run - /// # use autonomi::Client; + /// # use autonomi::client::Client; /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; diff --git a/autonomi/src/evm/client/mod.rs b/autonomi/src/evm/client/mod.rs index 361fdddc9e..855dc30256 100644 --- a/autonomi/src/evm/client/mod.rs +++ b/autonomi/src/evm/client/mod.rs @@ -6,7 +6,8 @@ pub mod data; pub mod files; #[cfg(feature = "registers")] pub mod registers; -mod vault; +#[cfg(feature = "vault")] +pub mod vault; #[derive(Clone)] pub struct EvmClient { diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index ac98e10ebe..295c3ca576 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -14,23 +14,6 @@ //! and the history is kept. Multiple values can exist side by side in case of //! concurrency, but should converge to a single value eventually. //! -//! # Example -//! -//! ```no_run -//! # use autonomi::{Client, Bytes}; -//! # #[tokio::main] -//! # async fn main() -> Result<(), Box> { -//! let peers = ["/ip4/127.0.0.1/udp/1234/quic-v1".parse()?]; -//! let client = Client::connect(&peers).await?; -//! -//! # let mut wallet = todo!(); -//! let addr = client.put(Bytes::from("Hello, World"), &mut wallet).await?; -//! let data = client.get(addr).await?; -//! assert_eq!(data, Bytes::from("Hello, World")); -//! # Ok(()) -//! # } -//! ``` -//! //! # Features //! //! - `local`: Discover local peers using mDNS. Useful for development. @@ -43,7 +26,7 @@ pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; -pub(crate) mod client; +pub mod client; #[cfg(feature = "evm-payments")] pub mod evm; #[cfg(feature = "native-payments")] diff --git a/autonomi/src/native/client/mod.rs b/autonomi/src/native/client/mod.rs index 4b3f2544ac..766aa67416 100644 --- a/autonomi/src/native/client/mod.rs +++ b/autonomi/src/native/client/mod.rs @@ -8,7 +8,8 @@ pub mod files; pub mod registers; #[cfg(feature = "transfers")] pub mod transfers; -mod vault; +#[cfg(feature = "vault")] +pub mod vault; #[derive(Clone)] pub struct NativeClient { diff --git a/autonomi/src/native/mod.rs b/autonomi/src/native/mod.rs index d5d898c98b..099e38490e 100644 --- a/autonomi/src/native/mod.rs +++ b/autonomi/src/native/mod.rs @@ -2,4 +2,4 @@ pub use crate::client::Client; pub mod client; #[cfg(feature = "transfers")] -mod wallet; +pub mod wallet; diff --git a/autonomi/tests/evm/put.rs b/autonomi/tests/evm/put.rs index 415e9be6c6..7502ceef4d 100644 --- a/autonomi/tests/evm/put.rs +++ b/autonomi/tests/evm/put.rs @@ -3,6 +3,8 @@ use std::time::Duration; use crate::common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use crate::evm::Client; +use autonomi::client::data::Data; +use autonomi::client::ClientWrapper; use tokio::time::sleep; #[tokio::test] From 4dd0405b3da84d939ca858068f1a4c9f03fd02bf Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 30 Sep 2024 11:28:56 +0200 Subject: [PATCH 064/255] fix(launchapd): cancelling changing to custom port mode restores old values --- .../src/components/popup/port_range.rs | 51 +++++++++++-------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index b3267969b2..23b79870e5 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -297,31 +297,40 @@ impl Component for PortRangePopUp { } KeyCode::Esc => { debug!("Got Esc, restoring the old values and switching to actual screen"); - // if the old values are 0 means that is the first time the user opens the app, - // so we should set the connection mode to automatic. - if self.port_from_old_value.to_string() == "0" - && self.port_to_old_value.to_string() == "0" - { - self.connection_mode = self - .connection_mode_old_value - .unwrap_or(ConnectionMode::Automatic); - return Ok(vec![ - Action::StoreConnectionMode(self.connection_mode), + if let Some(connection_mode_old_value) = self.connection_mode_old_value { + vec![ Action::OptionsActions(OptionsActions::UpdateConnectionMode( - self.connection_mode, + connection_mode_old_value, )), Action::SwitchScene(Scene::Options), - ]); + ] + } else { + // if the old values are 0 means that is the first time the user opens the app, + // so we should set the connection mode to automatic. + if self.port_from_old_value.to_string() == "0" + && self.port_to_old_value.to_string() == "0" + { + self.connection_mode = self + .connection_mode_old_value + .unwrap_or(ConnectionMode::Automatic); + return Ok(vec![ + Action::StoreConnectionMode(self.connection_mode), + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + self.connection_mode, + )), + Action::SwitchScene(Scene::Options), + ]); + } + self.port_from = self + .port_from + .clone() + .with_value(self.port_from_old_value.to_string()); + self.port_to = self + .port_to + .clone() + .with_value(self.port_to_old_value.to_string()); + vec![Action::SwitchScene(Scene::Options)] } - self.port_from = self - .port_from - .clone() - .with_value(self.port_from_old_value.to_string()); - self.port_to = self - .port_to - .clone() - .with_value(self.port_to_old_value.to_string()); - vec![Action::SwitchScene(Scene::Options)] } KeyCode::Char(c) if !c.is_numeric() => vec![], KeyCode::Up => { From e2568f3c0c43b04297424d9075bbbe8922dd4260 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 30 Sep 2024 11:38:54 +0200 Subject: [PATCH 065/255] chore(autonomi): update default features --- autonomi/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index e6161232d5..8e6dc03f72 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -10,14 +10,14 @@ readme = "README.md" repository = "https://github.com/maidsafe/safe_network" [features] -default = ["data"] -full = ["data", "files", "fs", "registers", "transfers", "vault", "native-payments"] -data = ["transfers"] +default = ["evm-payments", "data"] +full = ["data", "registers", "vault", "evm-payments"] +data = [] vault = ["data"] -files = ["transfers", "data"] +files = ["fs", "data"] fs = [] local = ["sn_networking/local-discovery"] -registers = ["transfers"] +registers = [] transfers = [] native-payments = [] evm-payments = [] From 14d6ec074187d0cc58523f70b8e40c8073337bb7 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Sat, 28 Sep 2024 20:43:19 +0530 Subject: [PATCH 066/255] feat(metrics): shunned count across time frames --- sn_networking/src/metrics/bad_node.rs | 293 ++++++++++++++++++++++++++ sn_networking/src/metrics/mod.rs | 34 ++- sn_networking/src/metrics/upnp.rs | 8 + 3 files changed, 329 insertions(+), 6 deletions(-) create mode 100644 sn_networking/src/metrics/bad_node.rs diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs new file mode 100644 index 0000000000..c7b7459ba3 --- /dev/null +++ b/sn_networking/src/metrics/bad_node.rs @@ -0,0 +1,293 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use crate::target_arch::interval; +use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; +use prometheus_client::metrics::{family::Family, gauge::Gauge}; +use std::time::{Duration, Instant}; +use strum::IntoEnumIterator; + +const UPDATE_INTERVAL: Duration = Duration::from_secs(20); + +/// A struct to record the the number of reports against our node across different time frames. +pub struct ShunnedCountAcrossTimeFrames { + metric: Family, + tracked_values: Vec, +} + +struct TrackedValue { + time: Instant, + least_bucket_it_fits_in: TimeFrameType, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelSet)] +pub struct TimeFrame { + time_frame: TimeFrameType, +} + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, EncodeLabelValue, strum::EnumIter)] +pub enum TimeFrameType { + LastTenMinutes, + LastHour, + LastSixHours, + LastDay, + LastWeek, + Indefinite, +} + +impl TimeFrameType { + #[cfg(not(test))] + fn get_duration_sec(&self) -> u64 { + match self { + TimeFrameType::LastTenMinutes => 10 * 60, + TimeFrameType::LastHour => 60 * 60, + TimeFrameType::LastSixHours => 6 * 60 * 60, + TimeFrameType::LastDay => 24 * 60 * 60, + TimeFrameType::LastWeek => 7 * 24 * 60 * 60, + TimeFrameType::Indefinite => u64::MAX, + } + } + + #[cfg(test)] + fn get_duration_sec(&self) -> u64 { + match self { + TimeFrameType::LastTenMinutes => 2, + TimeFrameType::LastHour => 4, + TimeFrameType::LastSixHours => 6, + TimeFrameType::LastDay => 8, + TimeFrameType::LastWeek => 10, + TimeFrameType::Indefinite => u64::MAX, + } + } + + fn next_time_frame(&self) -> Self { + match self { + TimeFrameType::LastTenMinutes => TimeFrameType::LastHour, + TimeFrameType::LastHour => TimeFrameType::LastSixHours, + TimeFrameType::LastSixHours => TimeFrameType::LastDay, + TimeFrameType::LastDay => TimeFrameType::LastWeek, + TimeFrameType::LastWeek => TimeFrameType::Indefinite, + TimeFrameType::Indefinite => TimeFrameType::Indefinite, + } + } +} + +impl ShunnedCountAcrossTimeFrames { + pub fn spawn_background_task( + time_based_shunned_count: Family, + ) -> tokio::sync::mpsc::Sender<()> { + let (tx, mut rx) = tokio::sync::mpsc::channel(10); + + tokio::spawn(async move { + let mut shunned_metrics = ShunnedCountAcrossTimeFrames { + metric: time_based_shunned_count, + tracked_values: Vec::new(), + }; + let mut update_interval = interval(UPDATE_INTERVAL); + update_interval.tick().await; + + loop { + tokio::select! { + _ = rx.recv() => { + shunned_metrics.record_shunned(); + + } + _ = update_interval.tick() => { + shunned_metrics.update(); + } + } + } + }); + tx + } + + pub fn record_shunned(&mut self) { + let now = Instant::now(); + self.tracked_values.push(TrackedValue { + time: now, + least_bucket_it_fits_in: TimeFrameType::LastTenMinutes, + }); + + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + self.metric.get_or_create(&time_frame).inc(); + } + } + + pub fn update(&mut self) { + let now = Instant::now(); + let mut idx_to_remove = Vec::new(); + + for (idx, tracked_value) in self.tracked_values.iter_mut().enumerate() { + let time_elapsed_since_adding = now.duration_since(tracked_value.time).as_secs(); + + if time_elapsed_since_adding > tracked_value.least_bucket_it_fits_in.get_duration_sec() + { + let time_frame = TimeFrame { + time_frame: tracked_value.least_bucket_it_fits_in, + }; + self.metric.get_or_create(&time_frame).dec(); + + let new_time_frame = tracked_value.least_bucket_it_fits_in.next_time_frame(); + if new_time_frame == TimeFrameType::Indefinite { + idx_to_remove.push(idx); + } else { + tracked_value.least_bucket_it_fits_in = new_time_frame; + } + } + } + // remove the ones that are now indefinite + for idx in idx_to_remove { + self.tracked_values.remove(idx); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn update_should_move_to_next_state() -> eyre::Result<()> { + let mut shunned_metrics = ShunnedCountAcrossTimeFrames { + metric: Family::default(), + tracked_values: Vec::new(), + }; + shunned_metrics.record_shunned(); + + let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastTenMinutes)); + // all the counters should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.update(); + let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastHour)); + // all the counters except LastTenMinutes should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.update(); + let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastSixHours)); + // all the counters except LastTenMinutes and LastHour should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes || variant == TimeFrameType::LastHour { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.update(); + let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastDay)); + // all the counters except LastTenMinutes, LastHour and LastSixHours should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes + || variant == TimeFrameType::LastHour + || variant == TimeFrameType::LastSixHours + { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.update(); + let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + assert!(matches!(current_state, TimeFrameType::LastWeek)); + // all the counters except LastTenMinutes, LastHour, LastSixHours and LastDay should be 1 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::LastTenMinutes + || variant == TimeFrameType::LastHour + || variant == TimeFrameType::LastSixHours + || variant == TimeFrameType::LastDay + { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } + } + + println!( + "current_state: {current_state:?}. Sleeping for {} sec", + current_state.get_duration_sec() + 1 + ); + std::thread::sleep(std::time::Duration::from_secs( + current_state.get_duration_sec() + 1, + )); + shunned_metrics.update(); + assert_eq!(shunned_metrics.tracked_values.len(), 0); + // all the counters except Indefinite should be 0 + for variant in TimeFrameType::iter() { + let time_frame = TimeFrame { + time_frame: variant, + }; + if variant == TimeFrameType::Indefinite { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 1); + } else { + assert_eq!(shunned_metrics.metric.get_or_create(&time_frame).get(), 0); + } + } + + Ok(()) + } +} diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index a7fdfbeee1..3cd057b44d 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -6,21 +6,22 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +// Implementation to record `libp2p::upnp::Event` metrics +mod bad_node; +#[cfg(feature = "upnp")] +mod upnp; + use crate::{log_markers::Marker, target_arch::sleep}; +use bad_node::{ShunnedCountAcrossTimeFrames, TimeFrame}; use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; -#[cfg(feature = "upnp")] -use prometheus_client::metrics::family::Family; use prometheus_client::{ + metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, registry::Registry, }; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; -// Implementation to record `libp2p::upnp::Event` metrics -#[cfg(feature = "upnp")] -mod upnp; - const UPDATE_INTERVAL: Duration = Duration::from_secs(15); const TO_MB: u64 = 1_000_000; @@ -49,11 +50,16 @@ pub(crate) struct NetworkMetricsRecorder { // bad node metrics bad_peers_count: Counter, + #[allow(dead_code)] // This is updated by the background task + shunned_across_time_frames: Family, shunned_count: Counter, // system info process_memory_used_mb: Gauge, process_cpu_usage_percentage: Gauge, + + // helpers + shunned_report_notifier: tokio::sync::mpsc::Sender<()>, } impl NetworkMetricsRecorder { @@ -94,6 +100,16 @@ impl NetworkMetricsRecorder { peers_in_routing_table.clone(), ); + let shunned_count_across_time_frames = Family::default(); + let shunned_report_notifier = ShunnedCountAcrossTimeFrames::spawn_background_task( + shunned_count_across_time_frames.clone(), + ); + sub_registry.register( + "shunned_count_across_time_frames", + "The number of peers that have been shunned across different time frames", + shunned_count_across_time_frames.clone(), + ); + let shunned_count = Counter::default(); sub_registry.register( "shunned_count", @@ -180,10 +196,13 @@ impl NetworkMetricsRecorder { live_time, bad_peers_count, + shunned_across_time_frames: shunned_count_across_time_frames, shunned_count, process_memory_used_mb, process_cpu_usage_percentage, + + shunned_report_notifier, }; network_metrics.system_metrics_recorder_task(); @@ -227,6 +246,9 @@ impl NetworkMetricsRecorder { } Marker::FlaggedAsBadNode { .. } => { let _ = self.shunned_count.inc(); + if let Err(err) = self.shunned_report_notifier.try_send(()) { + debug!("Failed to send shunned report via notifier: {err:?}"); + } } Marker::StoreCost { cost, diff --git a/sn_networking/src/metrics/upnp.rs b/sn_networking/src/metrics/upnp.rs index 9dd3b923b7..593e7eaeab 100644 --- a/sn_networking/src/metrics/upnp.rs +++ b/sn_networking/src/metrics/upnp.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; #[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelSet)] From 33e04b97a569ec08e2a6d8e6771bbb24c34d701e Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 30 Sep 2024 16:24:29 +0530 Subject: [PATCH 067/255] feat(metrics): add metrics_extended endpoint --- sn_networking/src/driver.rs | 65 +++++------ sn_networking/src/event/mod.rs | 16 ++- sn_networking/src/event/swarm.rs | 22 ++-- sn_networking/src/lib.rs | 7 +- sn_networking/src/metrics/mod.rs | 39 ++++--- .../service.rs} | 106 +++++++++++++----- sn_networking/src/transport/other.rs | 8 +- sn_node/src/metrics.rs | 20 +++- sn_node/src/node.rs | 55 ++++----- sn_node/src/put_validation.rs | 4 +- 10 files changed, 199 insertions(+), 143 deletions(-) rename sn_networking/src/{metrics_service.rs => metrics/service.rs} (59%) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 4b39b80907..823db7845e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -6,10 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -#[cfg(feature = "open-metrics")] -use crate::metrics::NetworkMetricsRecorder; -#[cfg(feature = "open-metrics")] -use crate::metrics_service::run_metrics_server; use crate::{ bootstrap::{ContinuousBootstrap, BOOTSTRAP_INTERVAL}, circular_vec::CircularVec, @@ -27,6 +23,10 @@ use crate::{ target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; +#[cfg(feature = "open-metrics")] +use crate::{ + metrics::service::run_metrics_server, metrics::NetworkMetricsRecorder, MetricsRegistries, +}; use crate::{transport, NodeIssue}; use futures::future::Either; use futures::StreamExt; @@ -46,7 +46,7 @@ use libp2p::{ Multiaddr, PeerId, }; #[cfg(feature = "open-metrics")] -use prometheus_client::{metrics::info::Info, registry::Registry}; +use prometheus_client::metrics::info::Info; use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, @@ -257,9 +257,7 @@ pub struct NetworkBuilder { concurrency_limit: Option, initial_peers: Vec, #[cfg(feature = "open-metrics")] - metrics_metadata_registry: Option, - #[cfg(feature = "open-metrics")] - metrics_registry: Option, + metrics_registries: Option, #[cfg(feature = "open-metrics")] metrics_server_port: Option, #[cfg(feature = "upnp")] @@ -278,9 +276,7 @@ impl NetworkBuilder { concurrency_limit: None, initial_peers: Default::default(), #[cfg(feature = "open-metrics")] - metrics_metadata_registry: None, - #[cfg(feature = "open-metrics")] - metrics_registry: None, + metrics_registries: None, #[cfg(feature = "open-metrics")] metrics_server_port: None, #[cfg(feature = "upnp")] @@ -308,18 +304,11 @@ impl NetworkBuilder { self.initial_peers = initial_peers; } - /// Set the Registry that will be served at the `/metadata` endpoint. This Registry should contain only the static - /// info about the peer. Configure the `metrics_server_port` to enable the metrics server. - #[cfg(feature = "open-metrics")] - pub fn metrics_metadata_registry(&mut self, metrics_metadata_registry: Registry) { - self.metrics_metadata_registry = Some(metrics_metadata_registry); - } - - /// Set the Registry that will be served at the `/metrics` endpoint. + /// Set the registries used inside the metrics server. /// Configure the `metrics_server_port` to enable the metrics server. #[cfg(feature = "open-metrics")] - pub fn metrics_registry(&mut self, metrics_registry: Registry) { - self.metrics_registry = Some(metrics_registry); + pub fn metrics_registries(&mut self, registries: MetricsRegistries) { + self.metrics_registries = Some(registries); } #[cfg(feature = "open-metrics")] @@ -479,11 +468,11 @@ impl NetworkBuilder { ); #[cfg(feature = "open-metrics")] - let mut metrics_registry = self.metrics_registry.unwrap_or_default(); + let mut metrics_registries = self.metrics_registries.unwrap_or_default(); // ==== Transport ==== #[cfg(feature = "open-metrics")] - let main_transport = transport::build_transport(&self.keypair, &mut metrics_registry); + let main_transport = transport::build_transport(&self.keypair, &mut metrics_registries); #[cfg(not(feature = "open-metrics"))] let main_transport = transport::build_transport(&self.keypair); let transport = if !self.local { @@ -513,18 +502,18 @@ impl NetworkBuilder { .boxed(); #[cfg(feature = "open-metrics")] - let network_metrics = if let Some(port) = self.metrics_server_port { - let network_metrics = NetworkMetricsRecorder::new(&mut metrics_registry); - let mut metadata_registry = self.metrics_metadata_registry.unwrap_or_default(); - let network_metadata_sub_registry = - metadata_registry.sub_registry_with_prefix("sn_networking"); + let metrics_recorder = if let Some(port) = self.metrics_server_port { + let metrics_recorder = NetworkMetricsRecorder::new(&mut metrics_registries); + let metadata_sub_reg = metrics_registries + .metadata + .sub_registry_with_prefix("sn_networking"); - network_metadata_sub_registry.register( + metadata_sub_reg.register( "peer_id", "Identifier of a peer of the network", Info::new(vec![("peer_id".to_string(), peer_id.to_string())]), ); - network_metadata_sub_registry.register( + metadata_sub_reg.register( "identify_protocol_str", "The protocol version string that is used to connect to the correct network", Info::new(vec![( @@ -533,8 +522,8 @@ impl NetworkBuilder { )]), ); - run_metrics_server(metrics_registry, metadata_registry, port); - Some(network_metrics) + run_metrics_server(metrics_registries, port); + Some(metrics_recorder) } else { None }; @@ -576,9 +565,9 @@ impl NetworkBuilder { #[cfg(feature = "open-metrics")] let mut node_record_store = node_record_store; #[cfg(feature = "open-metrics")] - if let Some(metrics) = &network_metrics { + if let Some(metrics_recorder) = &metrics_recorder { node_record_store = node_record_store - .set_record_count_metric(metrics.records_stored.clone()); + .set_record_count_metric(metrics_recorder.records_stored.clone()); } let store = UnifiedRecordStore::Node(node_record_store); @@ -682,7 +671,7 @@ impl NetworkBuilder { external_address_manager, replication_fetcher, #[cfg(feature = "open-metrics")] - network_metrics, + metrics_recorder, // kept here to ensure we can push messages to the channel // and not block the processing thread unintentionally network_cmd_sender: network_swarm_cmd_sender.clone(), @@ -733,7 +722,7 @@ pub struct SwarmDriver { /// The peers that are closer to our PeerId. Includes self. pub(crate) replication_fetcher: ReplicationFetcher, #[cfg(feature = "open-metrics")] - pub(crate) network_metrics: Option, + pub(crate) metrics_recorder: Option, network_cmd_sender: mpsc::Sender, pub(crate) local_cmd_sender: mpsc::Sender, @@ -998,8 +987,8 @@ impl SwarmDriver { pub(crate) fn record_metrics(&self, marker: Marker) { marker.log(); #[cfg(feature = "open-metrics")] - if let Some(network_metrics) = self.network_metrics.as_ref() { - network_metrics.record_from_marker(marker) + if let Some(metrics_recorder) = self.metrics_recorder.as_ref() { + metrics_recorder.record_from_marker(marker) } } diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 20f45ca2c8..ede545ae9e 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -227,8 +227,10 @@ impl SwarmDriver { self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.peers_in_routing_table.set(self.peers_in_rt as i64); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .peers_in_routing_table + .set(self.peers_in_rt as i64); } } @@ -243,8 +245,10 @@ impl SwarmDriver { self.send_event(NetworkEvent::PeerRemoved(removed_peer, self.peers_in_rt)); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.peers_in_routing_table.set(self.peers_in_rt as i64); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder + .peers_in_routing_table + .set(self.peers_in_rt as i64); } } @@ -284,8 +288,8 @@ impl SwarmDriver { let estimated_network_size = Self::estimate_network_size(peers_in_non_full_buckets, num_of_full_buckets); #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - let _ = metrics + if let Some(metrics_recorder) = &self.metrics_recorder { + let _ = metrics_recorder .estimated_network_size .set(estimated_network_size as i64); } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index af74a1455e..af52f4dc8a 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -33,8 +33,8 @@ impl SwarmDriver { // This does not record all the events. `SwarmEvent::Behaviour(_)` are skipped. Hence `.record()` has to be // called individually on each behaviour. #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&event); } let start = Instant::now(); let event_string; @@ -47,8 +47,8 @@ impl SwarmDriver { } SwarmEvent::Behaviour(NodeEvent::Kademlia(kad_event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&kad_event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&kad_event); } event_string = "kad_event"; self.handle_kad_event(kad_event)?; @@ -69,8 +69,8 @@ impl SwarmDriver { #[cfg(feature = "upnp")] SwarmEvent::Behaviour(NodeEvent::Upnp(upnp_event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&upnp_event); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&upnp_event); } event_string = "upnp_event"; info!(?upnp_event, "UPnP event"); @@ -84,8 +84,8 @@ impl SwarmDriver { SwarmEvent::Behaviour(NodeEvent::RelayServer(event)) => { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*event)); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&(*event)); } event_string = "relay_server_event"; @@ -109,8 +109,8 @@ impl SwarmDriver { SwarmEvent::Behaviour(NodeEvent::Identify(iden)) => { // Record the Identify event for metrics if the feature is enabled. #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - metrics.record(&(*iden)); + if let Some(metrics_recorder) = &self.metrics_recorder { + metrics_recorder.record(&(*iden)); } event_string = "identify"; @@ -643,7 +643,7 @@ impl SwarmDriver { /// Record the metrics on update of connection state. fn record_connection_metrics(&self) { #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { + if let Some(metrics) = &self.metrics_recorder { metrics .open_connections .set(self.live_connected_peers.len() as i64); diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 0df7812ebb..5253c6b668 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -19,8 +19,6 @@ mod external_address; mod log_markers; #[cfg(feature = "open-metrics")] mod metrics; -#[cfg(feature = "open-metrics")] -mod metrics_service; mod network_discovery; mod record_store; mod record_store_api; @@ -35,8 +33,6 @@ use cmd::LocalSwarmCmd; use xor_name::XorName; // re-export arch dependent deps for use in the crate, or above -pub use target_arch::{interval, sleep, spawn, Instant, Interval}; - pub use self::{ cmd::{NodeIssue, SwarmLocalState}, driver::{ @@ -47,6 +43,9 @@ pub use self::{ record_store::{calculate_cost_for_records, NodeRecordStore}, transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; +#[cfg(feature = "open-metrics")] +pub use metrics::service::MetricsRegistries; +pub use target_arch::{interval, sleep, spawn, Instant, Interval}; use self::{cmd::NetworkSwarmCmd, error::Result}; use backoff::{Error as BackoffError, ExponentialBackoff}; diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index 3cd057b44d..28c80d30a3 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -8,16 +8,18 @@ // Implementation to record `libp2p::upnp::Event` metrics mod bad_node; +pub mod service; #[cfg(feature = "upnp")] mod upnp; +#[cfg(feature = "open-metrics")] +use crate::MetricsRegistries; use crate::{log_markers::Marker, target_arch::sleep}; use bad_node::{ShunnedCountAcrossTimeFrames, TimeFrame}; use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; use prometheus_client::{ metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, - registry::Registry, }; use sysinfo::{Pid, ProcessRefreshKind, System}; use tokio::time::Duration; @@ -63,9 +65,13 @@ pub(crate) struct NetworkMetricsRecorder { } impl NetworkMetricsRecorder { - pub fn new(registry: &mut Registry) -> Self { - let libp2p_metrics = Libp2pMetrics::new(registry); - let sub_registry = registry.sub_registry_with_prefix("sn_networking"); + pub fn new(registries: &mut MetricsRegistries) -> Self { + // ==== Standard metrics ===== + + let libp2p_metrics = Libp2pMetrics::new(&mut registries.standard_metrics); + let sub_registry = registries + .standard_metrics + .sub_registry_with_prefix("sn_networking"); let records_stored = Gauge::default(); sub_registry.register( @@ -100,16 +106,6 @@ impl NetworkMetricsRecorder { peers_in_routing_table.clone(), ); - let shunned_count_across_time_frames = Family::default(); - let shunned_report_notifier = ShunnedCountAcrossTimeFrames::spawn_background_task( - shunned_count_across_time_frames.clone(), - ); - sub_registry.register( - "shunned_count_across_time_frames", - "The number of peers that have been shunned across different time frames", - shunned_count_across_time_frames.clone(), - ); - let shunned_count = Counter::default(); sub_registry.register( "shunned_count", @@ -179,6 +175,21 @@ impl NetworkMetricsRecorder { live_time.clone(), ); + // ==== Extended metrics ===== + + let extended_metrics_sub_registry = registries + .extended_metrics + .sub_registry_with_prefix("sn_networking"); + let shunned_count_across_time_frames = Family::default(); + let shunned_report_notifier = ShunnedCountAcrossTimeFrames::spawn_background_task( + shunned_count_across_time_frames.clone(), + ); + extended_metrics_sub_registry.register( + "shunned_count_across_time_frames", + "The number of peers that have been shunned across different time frames", + shunned_count_across_time_frames.clone(), + ); + let network_metrics = Self { libp2p_metrics, #[cfg(feature = "upnp")] diff --git a/sn_networking/src/metrics_service.rs b/sn_networking/src/metrics/service.rs similarity index 59% rename from sn_networking/src/metrics_service.rs rename to sn_networking/src/metrics/service.rs index 4d8e0a165f..d3c46f228a 100644 --- a/sn_networking/src/metrics_service.rs +++ b/sn_networking/src/metrics/service.rs @@ -16,22 +16,27 @@ use std::{ task::{Context, Poll}, }; +/// The types of metrics that are exposed via the various endpoints. +#[derive(Default, Debug)] +pub struct MetricsRegistries { + pub standard_metrics: Registry, + pub extended_metrics: Registry, + pub metadata: Registry, +} + const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;version=1.0.0"; -pub(crate) fn run_metrics_server( - metrics_registry: Registry, - metadata_registry: Registry, - port: u16, -) { +pub(crate) fn run_metrics_server(registries: MetricsRegistries, port: u16) { // todo: containers don't work with localhost. let addr = ([127, 0, 0, 1], port).into(); tokio::spawn(async move { - let server = - Server::bind(&addr).serve(MakeMetricService::new(metrics_registry, metadata_registry)); + let server = Server::bind(&addr).serve(MakeMetricService::new(registries)); + // keep these for programs that might be grepping this info info!("Metrics server on http://{}/metrics", server.local_addr()); - info!("Metadata server on http://{}/metadata", server.local_addr()); println!("Metrics server on http://{}/metrics", server.local_addr()); + + info!("Metrics server on http://{} Available endpoints: /metrics, /metrics_extended, /metadata", server.local_addr()); // run the server forever if let Err(e) = server.await { error!("server error: {}", e); @@ -42,17 +47,22 @@ pub(crate) fn run_metrics_server( type SharedRegistry = Arc>; pub(crate) struct MetricService { - metrics_registry: SharedRegistry, - metadata_registry: SharedRegistry, + standard_registry: SharedRegistry, + extended_registry: SharedRegistry, + metadata: SharedRegistry, } impl MetricService { - fn get_metrics_registry(&mut self) -> SharedRegistry { - Arc::clone(&self.metrics_registry) + fn get_standard_metrics_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.standard_registry) + } + + fn get_extended_metrics_registry(&mut self) -> SharedRegistry { + Arc::clone(&self.extended_registry) } fn get_metadata_registry(&mut self) -> SharedRegistry { - Arc::clone(&self.metadata_registry) + Arc::clone(&self.metadata) } fn respond_with_metrics(&mut self) -> Result> { @@ -65,10 +75,42 @@ impl MetricService { .map_err(|_| NetworkError::NetworkMetricError)?, ); - let reg = self.get_metrics_registry(); + let reg = self.get_standard_metrics_registry(); let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; encode(&mut response.body_mut(), ®).map_err(|err| { - error!("Failed to encode the metrics Registry {err:?}"); + error!("Failed to encode the standard metrics Registry {err:?}"); + NetworkError::NetworkMetricError + })?; + + *response.status_mut() = StatusCode::OK; + + Ok(response) + } + + fn respond_with_metrics_extended(&mut self) -> Result> { + let mut response: Response = Response::default(); + + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + METRICS_CONTENT_TYPE + .try_into() + .map_err(|_| NetworkError::NetworkMetricError)?, + ); + + let standard_registry = self.get_standard_metrics_registry(); + let standard_registry = standard_registry + .lock() + .map_err(|_| NetworkError::NetworkMetricError)?; + encode(&mut response.body_mut(), &standard_registry).map_err(|err| { + error!("Failed to encode the standard metrics Registry {err:?}"); + NetworkError::NetworkMetricError + })?; + let extended_registry = self.get_extended_metrics_registry(); + let extended_registry = extended_registry + .lock() + .map_err(|_| NetworkError::NetworkMetricError)?; + encode(&mut response.body_mut(), &extended_registry).map_err(|err| { + error!("Failed to encode the standard metrics Registry {err:?}"); NetworkError::NetworkMetricError })?; @@ -91,7 +133,7 @@ impl MetricService { let reg = self.get_metadata_registry(); let reg = reg.lock().map_err(|_| NetworkError::NetworkMetricError)?; encode(&mut response.body_mut(), ®).map_err(|err| { - error!("Failed to encode the metrics Registry {err:?}"); + error!("Failed to encode the metadata Registry {err:?}"); NetworkError::NetworkMetricError })?; @@ -133,6 +175,12 @@ impl Service> for MetricService { Ok(resp) => resp, Err(_) => self.respond_with_500_server_error(), } + } else if req_method == Method::GET && req_path == "/metrics_extended" { + // Encode and serve metrics from registry. + match self.respond_with_metrics_extended() { + Ok(resp) => resp, + Err(_) => self.respond_with_500_server_error(), + } } else if req_method == Method::GET && req_path == "/metadata" { match self.respond_with_metadata() { Ok(resp) => resp, @@ -146,18 +194,17 @@ impl Service> for MetricService { } pub(crate) struct MakeMetricService { - metrics_registry: SharedRegistry, - metadata_registry: SharedRegistry, + standard_registry: SharedRegistry, + extended_registry: SharedRegistry, + metadata: SharedRegistry, } impl MakeMetricService { - pub(crate) fn new( - metrics_registry: Registry, - metadata_registry: Registry, - ) -> MakeMetricService { + pub(crate) fn new(registries: MetricsRegistries) -> MakeMetricService { MakeMetricService { - metrics_registry: Arc::new(Mutex::new(metrics_registry)), - metadata_registry: Arc::new(Mutex::new(metadata_registry)), + standard_registry: Arc::new(Mutex::new(registries.standard_metrics)), + extended_registry: Arc::new(Mutex::new(registries.extended_metrics)), + metadata: Arc::new(Mutex::new(registries.metadata)), } } } @@ -172,12 +219,15 @@ impl Service for MakeMetricService { } fn call(&mut self, _: T) -> Self::Future { - let metrics_registry = Arc::clone(&self.metrics_registry); - let metadata_registry = Arc::clone(&self.metadata_registry); + let standard_registry = Arc::clone(&self.standard_registry); + let extended_registry = Arc::clone(&self.extended_registry); + let metadata = Arc::clone(&self.metadata); + let fut = async move { Ok(MetricService { - metrics_registry, - metadata_registry, + standard_registry, + extended_registry, + metadata, }) }; Box::pin(fut) diff --git a/sn_networking/src/transport/other.rs b/sn_networking/src/transport/other.rs index 78683ca15d..9143c27e63 100644 --- a/sn_networking/src/transport/other.rs +++ b/sn_networking/src/transport/other.rs @@ -1,7 +1,7 @@ +#[cfg(feature = "open-metrics")] +use crate::MetricsRegistries; #[cfg(feature = "websockets")] use futures::future::Either; -#[cfg(feature = "open-metrics")] -use libp2p::metrics::Registry; #[cfg(feature = "websockets")] use libp2p::{core::upgrade, noise, yamux}; use libp2p::{ @@ -12,11 +12,11 @@ use libp2p::{ pub(crate) fn build_transport( keypair: &Keypair, - #[cfg(feature = "open-metrics")] registry: &mut Registry, + #[cfg(feature = "open-metrics")] registries: &mut MetricsRegistries, ) -> transport::Boxed<(PeerId, StreamMuxerBox)> { let trans = generate_quic_transport(keypair); #[cfg(feature = "open-metrics")] - let trans = libp2p::metrics::BandwidthTransport::new(trans, registry); + let trans = libp2p::metrics::BandwidthTransport::new(trans, &mut registries.standard_metrics); #[cfg(feature = "websockets")] // Using a closure here due to the complex return type diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index 4ba458448e..b52eed7b48 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -14,10 +14,12 @@ use prometheus_client::{ family::Family, gauge::Gauge, histogram::{exponential_buckets, Histogram}, + info::Info, }, - registry::Registry, }; use sn_networking::Instant; +#[cfg(feature = "open-metrics")] +use sn_networking::MetricsRegistries; #[derive(Clone)] /// The shared recorders that are used to record metrics. @@ -56,8 +58,20 @@ enum RecordType { } impl NodeMetricsRecorder { - pub(crate) fn new(registry: &mut Registry) -> Self { - let sub_registry = registry.sub_registry_with_prefix("sn_node"); + pub(crate) fn new(registries: &mut MetricsRegistries) -> Self { + let node_metadata_sub_registry = registries.metadata.sub_registry_with_prefix("sn_node"); + node_metadata_sub_registry.register( + "safenode_version", + "The version of the safe node", + Info::new(vec![( + "safenode_version".to_string(), + env!("CARGO_PKG_VERSION").to_string(), + )]), + ); + + let sub_registry = registries + .standard_metrics + .sub_registry_with_prefix("sn_node"); let put_record_ok = Family::default(); sub_registry.register( diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 0caeab2fa7..08211664af 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -18,10 +18,10 @@ use crate::RunningNode; use bytes::Bytes; use libp2p::{identity::Keypair, Multiaddr, PeerId}; #[cfg(feature = "open-metrics")] -use prometheus_client::metrics::{gauge::Gauge, info::Info}; -#[cfg(feature = "open-metrics")] -use prometheus_client::registry::Registry; +use prometheus_client::metrics::gauge::Gauge; use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; +#[cfg(feature = "open-metrics")] +use sn_networking::MetricsRegistries; use sn_networking::{ close_group_majority, Instant, Network, NetworkBuilder, NetworkError, NetworkEvent, NodeIssue, SwarmDriver, @@ -161,26 +161,14 @@ impl NodeBuilder { let mut network_builder = NetworkBuilder::new(self.keypair, self.local, self.root_dir); #[cfg(feature = "open-metrics")] - let node_metrics = if self.metrics_server_port.is_some() { + let metrics_recorder = if self.metrics_server_port.is_some() { // metadata registry - let mut metadata_registry = Registry::default(); - let node_metadata_sub_registry = metadata_registry.sub_registry_with_prefix("sn_node"); - node_metadata_sub_registry.register( - "safenode_version", - "The version of the safe node", - Info::new(vec![( - "safenode_version".to_string(), - env!("CARGO_PKG_VERSION").to_string(), - )]), - ); - network_builder.metrics_metadata_registry(metadata_registry); + let mut metrics_registries = MetricsRegistries::default(); + let metrics_recorder = NodeMetricsRecorder::new(&mut metrics_registries); - // metrics registry - let mut metrics_registry = Registry::default(); - let node_metrics = NodeMetricsRecorder::new(&mut metrics_registry); - network_builder.metrics_registry(metrics_registry); + network_builder.metrics_registries(metrics_registries); - Some(node_metrics) + Some(metrics_recorder) } else { None }; @@ -203,7 +191,7 @@ impl NodeBuilder { initial_peers: self.initial_peers, reward_address, #[cfg(feature = "open-metrics")] - node_metrics, + metrics_recorder, owner: self.owner, }; let node = Node { @@ -237,7 +225,7 @@ struct NodeInner { initial_peers: Vec, network: Network, #[cfg(feature = "open-metrics")] - node_metrics: Option, + metrics_recorder: Option, /// Node owner's discord username, in readable format /// If not set, there will be no payment forward to be undertaken owner: Option, @@ -261,9 +249,10 @@ impl Node { } #[cfg(feature = "open-metrics")] - /// Returns a reference to the NodeMetrics if the `open-metrics` feature flag is enabled - pub(crate) fn node_metrics(&self) -> Option<&NodeMetricsRecorder> { - self.inner.node_metrics.as_ref() + /// Returns a reference to the NodeMetricsRecorder if the `open-metrics` feature flag is enabled + /// This is used to record various metrics for the node. + pub(crate) fn metrics_recorder(&self) -> Option<&NodeMetricsRecorder> { + self.inner.metrics_recorder.as_ref() } /// Returns the owner of the node @@ -292,8 +281,8 @@ impl Node { let balance_file_path = root_dir.join(FORWARDED_BALANCE_FILE_NAME); let balance = read_forwarded_balance_value(&balance_file_path); - if let Some(node_metrics) = node_copy.node_metrics() { - let _ = node_metrics.total_forwarded_rewards.set(balance as i64); + if let Some(metrics_recorder) = node_copy.metrics_recorder() { + let _ = metrics_recorder.total_forwarded_rewards.set(balance as i64); } }); @@ -405,9 +394,9 @@ impl Node { let forwarding_reason = owner.clone(); #[cfg(feature = "open-metrics")] - let total_forwarded_rewards = self.node_metrics().map(|metrics|metrics.total_forwarded_rewards.clone()); + let total_forwarded_rewards = self.metrics_recorder().map(|recorder|recorder.total_forwarded_rewards.clone()); #[cfg(feature = "open-metrics")] - let current_reward_wallet_balance = self.node_metrics().map(|metrics|metrics.current_reward_wallet_balance.clone()); + let current_reward_wallet_balance = self.metrics_recorder().map(|recorder|recorder.current_reward_wallet_balance.clone()); let _handle = spawn(async move { @@ -427,8 +416,8 @@ impl Node { } _ = uptime_metrics_update_interval.tick() => { #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics.uptime.set(node_metrics.started_instant.elapsed().as_secs() as i64); + if let Some(metrics_recorder) = self.metrics_recorder() { + let _ = metrics_recorder.uptime.set(metrics_recorder.started_instant.elapsed().as_secs() as i64); } } _ = unrelevant_records_cleanup_interval.tick() => { @@ -448,8 +437,8 @@ impl Node { pub(crate) fn record_metrics(&self, marker: Marker) { marker.log(); #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - node_metrics.record(marker) + if let Some(metrics_recorder) = self.metrics_recorder() { + metrics_recorder.record(marker) } } diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 8839c8d631..c3acab4574 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -693,8 +693,8 @@ impl Node { ); #[cfg(feature = "open-metrics")] - if let Some(node_metrics) = self.node_metrics() { - let _ = node_metrics + if let Some(metrics_recorder) = self.metrics_recorder() { + let _ = metrics_recorder .current_reward_wallet_balance .set(new_balance as i64); } From 89e4fe1a433c3ca72d6f0d2930cffe2d142f21c2 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 30 Sep 2024 17:08:15 +0530 Subject: [PATCH 068/255] fix(metrics): remove EOF line when joining two registries --- sn_networking/src/metrics/bad_node.rs | 6 +++--- sn_networking/src/metrics/mod.rs | 9 ++++++--- sn_networking/src/metrics/service.rs | 10 ++++++++++ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index c7b7459ba3..578ba25cce 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -94,7 +94,7 @@ impl ShunnedCountAcrossTimeFrames { loop { tokio::select! { _ = rx.recv() => { - shunned_metrics.record_shunned(); + shunned_metrics.record_shunned_metric(); } _ = update_interval.tick() => { @@ -106,7 +106,7 @@ impl ShunnedCountAcrossTimeFrames { tx } - pub fn record_shunned(&mut self) { + pub fn record_shunned_metric(&mut self) { let now = Instant::now(); self.tracked_values.push(TrackedValue { time: now, @@ -160,7 +160,7 @@ mod tests { metric: Family::default(), tracked_values: Vec::new(), }; - shunned_metrics.record_shunned(); + shunned_metrics.record_shunned_metric(); let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastTenMinutes)); diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index 28c80d30a3..6e8fa60812 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -257,9 +257,12 @@ impl NetworkMetricsRecorder { } Marker::FlaggedAsBadNode { .. } => { let _ = self.shunned_count.inc(); - if let Err(err) = self.shunned_report_notifier.try_send(()) { - debug!("Failed to send shunned report via notifier: {err:?}"); - } + let shunned_report_notifier = self.shunned_report_notifier.clone(); + crate::target_arch::spawn(async move { + if let Err(err) = shunned_report_notifier.send(()).await { + error!("Failed to send shunned report via notifier: {err:?}"); + } + }); } Marker::StoreCost { cost, diff --git a/sn_networking/src/metrics/service.rs b/sn_networking/src/metrics/service.rs index d3c46f228a..e64ae01701 100644 --- a/sn_networking/src/metrics/service.rs +++ b/sn_networking/src/metrics/service.rs @@ -105,6 +105,16 @@ impl MetricService { error!("Failed to encode the standard metrics Registry {err:?}"); NetworkError::NetworkMetricError })?; + + // remove the EOF line from the response + let mut buffer = response.body().split("\n").collect::>(); + let _ = buffer.pop(); + let _ = buffer.pop(); + buffer.push("\n"); + let mut buffer = buffer.join("\n"); + let _ = buffer.pop(); + *response.body_mut() = buffer; + let extended_registry = self.get_extended_metrics_registry(); let extended_registry = extended_registry .lock() From 039dc8774de9bfb81096af1da28bc9024b886b23 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 30 Sep 2024 12:54:44 +0200 Subject: [PATCH 069/255] docs(autonomi): update README.md --- autonomi/README.md | 94 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 88 insertions(+), 6 deletions(-) diff --git a/autonomi/README.md b/autonomi/README.md index e3b2291766..465ddc707c 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -7,19 +7,101 @@ Connect to and build on the Autonomi network. ## Usage -See [docs.rs/autonomi](https://docs.rs/autonomi) for usage examples. +Add the autonomi crate to your `Cargo.toml`: + +```toml +[dependencies] +autonomi = { path = "../autonomi", version = "0.1.0" } +``` ## Running tests -Run a local network with the `local-discovery` feature: +### Using a local EVM testnet + +1. If you haven't, install Foundry, to be able to run Anvil + nodes: https://book.getfoundry.sh/getting-started/installation +2. Run a local EVM node: + +```sh +cargo run --bin evm_testnet -- --royalties-wallet +``` + +Take note of the console output for the next step (`RPC URL`, `Payment token address` & `Chunk payments address`). + +3. Run a local network with the `local-discovery` feature and pass the EVM params: + +```sh +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --chunk-payments-address +``` + +4. Then run the tests with the `local` feature and pass the EVM params again: + +```sh +$ RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local +# Or with logs +$ RUST_LOG=autonomi RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local -- --nocapture +``` + +### Using a live testnet or mainnet + +Using the hardcoded `Arbitrum One` option as an example, but you can also use the command flags of the steps above and +point it to a live network. + +1. Run a local network with the `local-discovery` feature: ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-arbitrum-one ``` -Then run the tests with the `local` feature: +2. Then run the tests with the `local` feature. Make sure that the wallet of the private key you pass has enough gas and + payment tokens on the network (in this case Arbitrum One): + ```sh -$ cargo test --package=autonomi --features=local +$ EVM_NETWORK=arbitrum-one PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +``` + +## Faucet (local) + +There is no faucet server, but instead you can use the `Deployer wallet private key` printed in the EVM node output to +initialise a wallet from with almost infinite gas and payment tokens. Example: + +```rust +let rpc_url = "http://localhost:54370/"; +let payment_token_address = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; +let chunk_payments_address = "0x8464135c8F25Da09e49BC8782676a84730C318bC"; +let private_key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + +let network = Network::Custom(CustomNetwork::new( +rpc_url, +payment_token_address, +chunk_payments_address, +)); + +let deployer_wallet = Wallet::new_from_private_key(network, private_key).unwrap(); +let receiving_wallet = Wallet::new_with_random_wallet(network); + +// Send 10 payment tokens (atto) +let _ = deployer_wallet +.transfer_tokens(receiving_wallet.address(), Amount::from(10)) +.await; +``` + +Alternatively, you can provide the wallet address that should own all the gas and payment tokens to the EVM testnet +startup command using the `--genesis-wallet` flag: + +```sh +cargo run --bin evm_testnet -- --royalties-wallet --genesis-wallet ``` + +```shell +************************* +* Ethereum node started * +************************* +RPC URL: http://localhost:60093/ +Payment token address: 0x5FbDB2315678afecb367f032d93F642f64180aa3 +Chunk payments address: 0x8464135c8F25Da09e49BC8782676a84730C318bC +Deployer wallet private key: 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +Genesis wallet balance: (tokens: 20000000000000000000000000, gas: 9998998011366954730202) +``` \ No newline at end of file From bdf95ca282b24e7ec5b04caffea7484b5817e802 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 30 Sep 2024 17:10:15 +0200 Subject: [PATCH 070/255] feat: removed royalties from the smart contract --- autonomi/README.md | 8 ++++---- autonomi/tests/common/mod.rs | 2 +- evm_testnet/README.md | 2 +- evm_testnet/src/main.rs | 9 +++------ evmlib/artifacts/ChunkPayments.json | 22 ++-------------------- evmlib/src/contract/chunk_payments/mod.rs | 13 ++++--------- evmlib/src/lib.rs | 2 +- evmlib/src/testnet.rs | 13 ++++--------- evmlib/src/transaction.rs | 1 - evmlib/src/utils.rs | 7 +------ evmlib/src/wallet.rs | 9 ++------- evmlib/tests/chunk_payments.rs | 4 +--- evmlib/tests/common/mod.rs | 5 ----- evmlib/tests/wallet.rs | 4 +--- 14 files changed, 25 insertions(+), 76 deletions(-) diff --git a/autonomi/README.md b/autonomi/README.md index 465ddc707c..2603768aea 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -23,7 +23,7 @@ autonomi = { path = "../autonomi", version = "0.1.0" } 2. Run a local EVM node: ```sh -cargo run --bin evm_testnet -- --royalties-wallet +cargo run --bin evm_testnet ``` Take note of the console output for the next step (`RPC URL`, `Payment token address` & `Chunk payments address`). @@ -57,9 +57,9 @@ cargo run --bin=safenode-manager --features=local-discovery -- local run --build payment tokens on the network (in this case Arbitrum One): ```sh -$ EVM_NETWORK=arbitrum-one PRIVATE_KEY= cargo test --package=autonomi --features=local +$ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture ``` ## Faucet (local) @@ -92,7 +92,7 @@ Alternatively, you can provide the wallet address that should own all the gas an startup command using the `--genesis-wallet` flag: ```sh -cargo run --bin evm_testnet -- --royalties-wallet --genesis-wallet +cargo run --bin evm_testnet -- --genesis-wallet ``` ```shell diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs index 3f62f45328..21dc3b8a98 100644 --- a/autonomi/tests/common/mod.rs +++ b/autonomi/tests/common/mod.rs @@ -182,7 +182,7 @@ pub fn evm_wallet_from_env_or_default(network: evmlib::Network) -> evmlib::walle const DEFAULT_WALLET_PRIVATE_KEY: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - let private_key = env::var("PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); evmlib::wallet::Wallet::new_from_private_key(network, &private_key) .expect("Invalid private key") diff --git a/evm_testnet/README.md b/evm_testnet/README.md index 3eab9ed3d5..c6b2b20820 100644 --- a/evm_testnet/README.md +++ b/evm_testnet/README.md @@ -9,7 +9,7 @@ Tool to run a local Ethereum node that automatically deploys all Autonomi smart ### Usage ```bash -cargo run --bin evm_testnet -- --royalties-wallet --genesis-wallet +cargo run --bin evm_testnet -- --genesis-wallet ``` Example output: diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index 09f38821e5..8cef9ceb98 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -16,9 +16,6 @@ use std::str::FromStr; #[derive(Debug, Parser)] #[clap(version, author, verbatim_doc_comment)] struct Args { - /// Address that will receive the chunk payments royalties. - #[clap(long, short)] - royalties_wallet: Address, /// Wallet that will hold ~all gas funds and payment tokens. #[clap(long, short)] genesis_wallet: Option
, @@ -27,11 +24,11 @@ struct Args { #[tokio::main] async fn main() { let args = Args::parse(); - start_node(args.genesis_wallet, args.royalties_wallet).await; + start_node(args.genesis_wallet).await; } -async fn start_node(genesis_wallet: Option
, royalties_wallet: Address) { - let testnet = Testnet::new(royalties_wallet).await; +async fn start_node(genesis_wallet: Option
) { + let testnet = Testnet::new().await; println!("*************************"); println!("* Ethereum node started *"); diff --git a/evmlib/artifacts/ChunkPayments.json b/evmlib/artifacts/ChunkPayments.json index 000c56318d..5b96ec5e9a 100644 --- a/evmlib/artifacts/ChunkPayments.json +++ b/evmlib/artifacts/ChunkPayments.json @@ -9,11 +9,6 @@ "internalType": "address", "name": "_tokenAddress", "type": "address" - }, - { - "internalType": "address", - "name": "_foundationWallet", - "type": "address" } ], "stateMutability": "nonpayable", @@ -57,19 +52,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "ROYALTIES_WALLET", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [ { @@ -101,8 +83,8 @@ "type": "function" } ], - "bytecode": "0x60c060405234801561001057600080fd5b5060405161075b38038061075b83398101604081905261002f91610130565b6001600160a01b0382166100965760405162461bcd60e51b8152602060048201526024808201527f546f6b656e20616464726573732063616e6e6f74206265207a65726f206164646044820152637265737360e01b60648201526084015b60405180910390fd5b6001600160a01b0381166100fd5760405162461bcd60e51b815260206004820152602860248201527f466f756e646174696f6e2077616c6c65742063616e6e6f74206265207a65726f604482015267206164647265737360c01b606482015260840161008d565b6001600160a01b039182166080521660a052610163565b80516001600160a01b038116811461012b57600080fd5b919050565b6000806040838503121561014357600080fd5b61014c83610114565b915061015a60208401610114565b90509250929050565b60805160a0516105b96101a2600039600081816060015261018401526000818160a3015281816101cf015281816102d101526103d101526105b96000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806305aa488a146100465780631c317b341461005b5780635c0d32861461009e575b600080fd5b610059610054366004610444565b6100c5565b005b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6000805b8281101561017d57368484838181106100e4576100e46104b9565b90506060020190506000600a82602001356100ff91906104cf565b905061010b81856104f1565b93506101283361011e6020850185610518565b84602001356101ae565b60408201356020830180359061013e9085610518565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450506001016100c9565b506101a9337f0000000000000000000000000000000000000000000000000000000000000000836101ae565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa158015610218573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061023c9190610548565b101561029a5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b03831630146103a357604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561031a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061033e9190610548565b10156103a35760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610291565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561041a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061043e9190610561565b50505050565b6000806020838503121561045757600080fd5b823567ffffffffffffffff8082111561046f57600080fd5b818501915085601f83011261048357600080fd5b81358181111561049257600080fd5b8660206060830285010111156104a757600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b6000826104ec57634e487b7160e01b600052601260045260246000fd5b500490565b8082018082111561051257634e487b7160e01b600052601160045260246000fd5b92915050565b60006020828403121561052a57600080fd5b81356001600160a01b038116811461054157600080fd5b9392505050565b60006020828403121561055a57600080fd5b5051919050565b60006020828403121561057357600080fd5b8151801515811461054157600080fdfea264697066735822122094c63e1f2c74507a86a2259c9b1cb5a11238724ae1164198b92142b5386eda6164736f6c63430008180033", - "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806305aa488a146100465780631c317b341461005b5780635c0d32861461009e575b600080fd5b610059610054366004610444565b6100c5565b005b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b6100827f000000000000000000000000000000000000000000000000000000000000000081565b6000805b8281101561017d57368484838181106100e4576100e46104b9565b90506060020190506000600a82602001356100ff91906104cf565b905061010b81856104f1565b93506101283361011e6020850185610518565b84602001356101ae565b60408201356020830180359061013e9085610518565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450506001016100c9565b506101a9337f0000000000000000000000000000000000000000000000000000000000000000836101ae565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa158015610218573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061023c9190610548565b101561029a5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b03831630146103a357604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561031a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061033e9190610548565b10156103a35760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610291565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561041a573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061043e9190610561565b50505050565b6000806020838503121561045757600080fd5b823567ffffffffffffffff8082111561046f57600080fd5b818501915085601f83011261048357600080fd5b81358181111561049257600080fd5b8660206060830285010111156104a757600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b6000826104ec57634e487b7160e01b600052601260045260246000fd5b500490565b8082018082111561051257634e487b7160e01b600052601160045260246000fd5b92915050565b60006020828403121561052a57600080fd5b81356001600160a01b038116811461054157600080fd5b9392505050565b60006020828403121561055a57600080fd5b5051919050565b60006020828403121561057357600080fd5b8151801515811461054157600080fdfea264697066735822122094c63e1f2c74507a86a2259c9b1cb5a11238724ae1164198b92142b5386eda6164736f6c63430008180033", + "bytecode": "0x60a060405234801561001057600080fd5b506040516105f73803806105f783398101604081905261002f916100a6565b6001600160a01b0381166100955760405162461bcd60e51b8152602060048201526024808201527f546f6b656e20616464726573732063616e6e6f74206265207a65726f206164646044820152637265737360e01b606482015260840160405180910390fd5b6001600160a01b03166080526100d6565b6000602082840312156100b857600080fd5b81516001600160a01b03811681146100cf57600080fd5b9392505050565b6080516104f26101056000396000818160550152818161015101528181610253015261035301526104f26000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806305aa488a1461003b5780635c0d328614610050575b600080fd5b61004e6100493660046103c6565b610093565b005b6100777f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b60005b8181101561012b57368383838181106100b1576100b161043b565b6060029190910191506100d79050336100cd6020840184610451565b8360200135610130565b6040810135602082018035906100ed9084610451565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450600101610096565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa15801561019a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101be9190610481565b101561021c5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b038316301461032557604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561029c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102c09190610481565b10156103255760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610213565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561039c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103c0919061049a565b50505050565b600080602083850312156103d957600080fd5b823567ffffffffffffffff808211156103f157600080fd5b818501915085601f83011261040557600080fd5b81358181111561041457600080fd5b86602060608302850101111561042957600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b60006020828403121561046357600080fd5b81356001600160a01b038116811461047a57600080fd5b9392505050565b60006020828403121561049357600080fd5b5051919050565b6000602082840312156104ac57600080fd5b8151801515811461047a57600080fdfea2646970667358221220f36a68bc214963ac01148fda122f884402375e68563223e639f0b88bf6a1aaf664736f6c63430008180033", + "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100365760003560e01c806305aa488a1461003b5780635c0d328614610050575b600080fd5b61004e6100493660046103c6565b610093565b005b6100777f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b03909116815260200160405180910390f35b60005b8181101561012b57368383838181106100b1576100b161043b565b6060029190910191506100d79050336100cd6020840184610451565b8360200135610130565b6040810135602082018035906100ed9084610451565b6001600160a01b03167fa6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f95860405160405180910390a450600101610096565b505050565b6040516370a0823160e01b81526001600160a01b03848116600483015282917f0000000000000000000000000000000000000000000000000000000000000000909116906370a0823190602401602060405180830381865afa15801561019a573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101be9190610481565b101561021c5760405162461bcd60e51b815260206004820152602260248201527f57616c6c657420646f6573206e6f74206861766520656e6f75676820746f6b656044820152616e7360f01b60648201526084015b60405180910390fd5b6001600160a01b038316301461032557604051636eb1769f60e11b81526001600160a01b03848116600483015230602483015282917f00000000000000000000000000000000000000000000000000000000000000009091169063dd62ed3e90604401602060405180830381865afa15801561029c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102c09190610481565b10156103255760405162461bcd60e51b815260206004820152602e60248201527f436f6e7472616374206973206e6f7420616c6c6f77656420746f207370656e6460448201526d20656e6f75676820746f6b656e7360901b6064820152608401610213565b6040516323b872dd60e01b81526001600160a01b0384811660048301528381166024830152604482018390527f000000000000000000000000000000000000000000000000000000000000000016906323b872dd906064016020604051808303816000875af115801561039c573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103c0919061049a565b50505050565b600080602083850312156103d957600080fd5b823567ffffffffffffffff808211156103f157600080fd5b818501915085601f83011261040557600080fd5b81358181111561041457600080fd5b86602060608302850101111561042957600080fd5b60209290920196919550909350505050565b634e487b7160e01b600052603260045260246000fd5b60006020828403121561046357600080fd5b81356001600160a01b038116811461047a57600080fd5b9392505050565b60006020828403121561049357600080fd5b5051919050565b6000602082840312156104ac57600080fd5b8151801515811461047a57600080fdfea2646970667358221220f36a68bc214963ac01148fda122f884402375e68563223e639f0b88bf6a1aaf664736f6c63430008180033", "linkReferences": {}, "deployedLinkReferences": {} } \ No newline at end of file diff --git a/evmlib/src/contract/chunk_payments/mod.rs b/evmlib/src/contract/chunk_payments/mod.rs index fb310ba5a1..9a8378c4bd 100644 --- a/evmlib/src/contract/chunk_payments/mod.rs +++ b/evmlib/src/contract/chunk_payments/mod.rs @@ -37,15 +37,10 @@ where /// Deploys the ChunkPayments smart contract to the network of the provider. /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! - pub async fn deploy( - provider: P, - payment_token_address: Address, - royalties_wallet: Address, - ) -> Self { - let contract = - ChunkPaymentsContract::deploy(provider, payment_token_address, royalties_wallet) - .await - .expect("Could not deploy contract"); + pub async fn deploy(provider: P, payment_token_address: Address) -> Self { + let contract = ChunkPaymentsContract::deploy(provider, payment_token_address) + .await + .expect("Could not deploy contract"); ChunkPayments { contract } } diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index b3995ebe72..cd853bbb96 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -25,7 +25,7 @@ const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = // Should be updated when the smart contract changes! const ARBITRUM_ONE_CHUNK_PAYMENTS_ADDRESS: Address = - address!("F15BfEA73b6a551C5c2e66026e4eB3b69c1F602c"); + address!("708353783756C62818aCdbce914d90E0245F7319"); #[derive(Clone, Debug, PartialEq)] pub struct CustomNetwork { diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 5a19ea8d85..015ee035c1 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -18,16 +18,12 @@ pub struct Testnet { impl Testnet { /// Starts an Anvil node and automatically deploys the network token and chunk payments smart contracts. - pub async fn new(royalties_wallet: Address) -> Self { + pub async fn new() -> Self { let anvil = start_node(); let network_token = deploy_network_token_contract(&anvil).await; - let chunk_payments = deploy_chunk_payments_contract( - &anvil, - *network_token.contract.address(), - royalties_wallet, - ) - .await; + let chunk_payments = + deploy_chunk_payments_contract(&anvil, *network_token.contract.address()).await; Testnet { anvil, @@ -96,7 +92,6 @@ pub async fn deploy_network_token_contract( pub async fn deploy_chunk_payments_contract( anvil: &AnvilInstance, token_address: Address, - royalties_wallet: Address, ) -> ChunkPayments< Http, FillProvider< @@ -119,5 +114,5 @@ pub async fn deploy_chunk_payments_contract( .on_http(rpc_url); // Deploy the contract. - ChunkPayments::deploy(provider, token_address, royalties_wallet).await + ChunkPayments::deploy(provider, token_address).await } diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index 87798ba1d3..b74c268450 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -121,7 +121,6 @@ pub async fn verify_chunk_payment( if let Ok(event) = ChunkPaymentEvent::try_from(log) { // Check if the event matches what we expect. - // The smart contract handles royalties, so we don't have to check that. if event.quote_hash == quote_hash && event.reward_address == reward_addr && event.amount >= amount diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 08378e875d..3346eb789e 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -1,11 +1,6 @@ -use crate::common::{Address, Amount, Hash}; +use crate::common::{Address, Hash}; use rand::Rng; -/// Returns the amount of royalties expected for a certain transfer amount. -pub fn calculate_royalties_from_amount(amount: Amount) -> Amount { - amount / Amount::from(10) -} - /// Generate a random Address. pub fn dummy_address() -> Address { Address::new(rand::rngs::OsRng.gen()) diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 69eb0d55b9..69c9644240 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -4,7 +4,6 @@ use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; use crate::contract::chunk_payments::{ChunkPayments, MAX_TRANSFERS_PER_TRANSACTION}; use crate::contract::network_token::NetworkToken; use crate::contract::{chunk_payments, network_token}; -use crate::utils::calculate_royalties_from_amount; use crate::Network; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; use alloy::providers::fillers::{ @@ -228,10 +227,6 @@ pub async fn pay_for_quotes>( ) -> Result, PayForQuotesError> { let payments: Vec<_> = payments.into_iter().collect(); let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); - let royalties = calculate_royalties_from_amount(total_amount); - - // 2 * royalties to have a small buffer for different rounding in the smart contract. - let total_amount_with_royalties = total_amount + (U256::from(2) * royalties); let mut tx_hashes_by_quote = BTreeMap::new(); @@ -240,7 +235,7 @@ pub async fn pay_for_quotes>( wallet.clone(), network, *network.chunk_payments_address(), - total_amount_with_royalties, + total_amount, ) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; @@ -291,7 +286,7 @@ mod tests { #[tokio::test] async fn test_transfer_gas_tokens() { - let testnet = Testnet::new(dummy_address()).await; + let testnet = Testnet::new().await; let network = testnet.to_network(); let wallet = Wallet::new_from_private_key(network.clone(), &testnet.default_wallet_private_key()) diff --git a/evmlib/tests/chunk_payments.rs b/evmlib/tests/chunk_payments.rs index 1b0a283245..244cceab43 100644 --- a/evmlib/tests/chunk_payments.rs +++ b/evmlib/tests/chunk_payments.rs @@ -1,7 +1,6 @@ mod common; use crate::common::quote::random_quote_payment; -use crate::common::ROYALTIES_WALLET; use alloy::network::{Ethereum, EthereumWallet}; use alloy::node_bindings::AnvilInstance; use alloy::primitives::utils::parse_ether; @@ -44,8 +43,7 @@ async fn setup() -> ( let network_token = deploy_network_token_contract(&anvil).await; let chunk_payments = - deploy_chunk_payments_contract(&anvil, *network_token.contract.address(), ROYALTIES_WALLET) - .await; + deploy_chunk_payments_contract(&anvil, *network_token.contract.address()).await; (anvil, network_token, chunk_payments) } diff --git a/evmlib/tests/common/mod.rs b/evmlib/tests/common/mod.rs index 48fab70355..9e1a6d4369 100644 --- a/evmlib/tests/common/mod.rs +++ b/evmlib/tests/common/mod.rs @@ -1,6 +1 @@ -use alloy::primitives::{address, Address}; - pub mod quote; - -#[allow(dead_code)] -pub const ROYALTIES_WALLET: Address = address!("385e7887E5b41750E3679Da787B943EC42f37d75"); diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs index a212170040..97531859b6 100644 --- a/evmlib/tests/wallet.rs +++ b/evmlib/tests/wallet.rs @@ -1,7 +1,6 @@ mod common; use crate::common::quote::random_quote_payment; -use crate::common::ROYALTIES_WALLET; use alloy::network::EthereumWallet; use alloy::node_bindings::AnvilInstance; use alloy::primitives::utils::parse_ether; @@ -22,8 +21,7 @@ async fn local_testnet() -> (AnvilInstance, Network, EthereumWallet) { let rpc_url = anvil.endpoint().parse().unwrap(); let network_token = deploy_network_token_contract(&anvil).await; let payment_token_address = *network_token.contract.address(); - let chunk_payments = - deploy_chunk_payments_contract(&anvil, payment_token_address, ROYALTIES_WALLET).await; + let chunk_payments = deploy_chunk_payments_contract(&anvil, payment_token_address).await; ( anvil, From 37982fb274efc86aadd800d89e1cec3cbfcd9b03 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 30 Sep 2024 18:36:39 +0200 Subject: [PATCH 071/255] feat(launchpad): reset confirmation when changing connection mode --- .../src/components/popup/connection_mode.rs | 377 ++++++++++++------ .../src/components/popup/port_range.rs | 167 +++++++- 2 files changed, 412 insertions(+), 132 deletions(-) diff --git a/node-launchpad/src/components/popup/connection_mode.rs b/node-launchpad/src/components/popup/connection_mode.rs index 0cff9bfbb3..71906a12a4 100644 --- a/node-launchpad/src/components/popup/connection_mode.rs +++ b/node-launchpad/src/components/popup/connection_mode.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use std::default::Default; +use std::{default::Default, rc::Rc}; use super::super::utils::centered_rect_fixed; @@ -16,7 +16,9 @@ use ratatui::{ layout::{Alignment, Constraint, Direction, Layout, Rect}, style::{Style, Stylize}, text::{Line, Span}, - widgets::{Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph}, + widgets::{ + Block, Borders, HighlightSpacing, List, ListItem, ListState, Padding, Paragraph, Wrap, + }, }; use strum::IntoEnumIterator; @@ -31,9 +33,17 @@ use crate::{ }, }; +#[derive(Default)] +enum ChangeConnectionModeState { + #[default] + Selection, + ConfirmChange, +} + #[derive(Default)] pub struct ChangeConnectionModePopUp { active: bool, + state: ChangeConnectionModeState, items: StatefulList, connection_mode_selection: ConnectionModeItem, connection_mode_initial_state: ConnectionModeItem, @@ -61,6 +71,7 @@ impl ChangeConnectionModePopUp { let items = StatefulList::with_items(connection_modes_items); Ok(Self { active: false, + state: ChangeConnectionModeState::Selection, items, connection_mode_selection: selected_connection_mode.clone(), connection_mode_initial_state: selected_connection_mode.clone(), @@ -106,120 +117,15 @@ impl ChangeConnectionModePopUp { } ConnectionModeItem::default() } -} -impl Component for ChangeConnectionModePopUp { - fn handle_key_events(&mut self, key: KeyEvent) -> Result> { - if !self.active { - return Ok(vec![]); - } - let send_back: Vec = match key.code { - KeyCode::Enter => { - // We allow action if we have more than one connection mode and the action is not - // over the connection mode already selected - let connection_mode = self.return_selection(); - if connection_mode.connection_mode != self.connection_mode_selection.connection_mode - { - debug!( - "Got Enter and there's a new selection, storing value and switching to Options" - ); - debug!("Connection Mode selected: {:?}", connection_mode); - self.connection_mode_initial_state = self.connection_mode_selection.clone(); - self.assign_connection_mode_selection(); - vec![ - Action::StoreConnectionMode(self.connection_mode_selection.connection_mode), - Action::OptionsActions(OptionsActions::UpdateConnectionMode( - connection_mode.clone().connection_mode, - )), - if connection_mode.connection_mode == ConnectionMode::CustomPorts { - Action::SwitchScene(Scene::ChangePortsPopUp { - connection_mode_old_value: Some( - self.connection_mode_initial_state.connection_mode, - ), - }) - } else { - Action::SwitchScene(Scene::Status) - }, - ] - } else { - debug!("Got Enter, but no new selection. We should not do anything"); - vec![Action::SwitchScene(Scene::ChangeConnectionModePopUp)] - } - } - KeyCode::Esc => { - debug!("Got Esc, switching to Options"); - vec![Action::SwitchScene(Scene::Options)] - } - KeyCode::Up => { - if self.items.items.len() > 1 { - self.items.previous(); - let connection_mode = self.return_selection(); - self.can_select = connection_mode.connection_mode - != self.connection_mode_selection.connection_mode; - } - vec![] - } - KeyCode::Down => { - if self.items.items.len() > 1 { - self.items.next(); - let connection_mode = self.return_selection(); - self.can_select = connection_mode.connection_mode - != self.connection_mode_selection.connection_mode; - } - vec![] - } - _ => { - vec![] - } - }; - Ok(send_back) - } - - fn update(&mut self, action: Action) -> Result> { - let send_back = match action { - Action::SwitchScene(scene) => match scene { - Scene::ChangeConnectionModePopUp => { - self.active = true; - self.can_select = false; - self.select_connection_mode(); - Some(Action::SwitchInputMode(InputMode::Entry)) - } - _ => { - self.active = false; - None - } - }, - // Useful when the user has selected a connection mode but didn't confirm it - Action::OptionsActions(OptionsActions::UpdateConnectionMode(connection_mode)) => { - self.connection_mode_selection.connection_mode = connection_mode; - self.select_connection_mode(); - None - } - _ => None, - }; - Ok(send_back) - } - - fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { - if !self.active { - return Ok(()); - } - - let layer_zero = centered_rect_fixed(52, 15, area); - - let layer_one = Layout::new( - Direction::Vertical, - [ - // Padding from title to the table - Constraint::Length(1), - // Table - Constraint::Min(1), - // for the pop_up_border - Constraint::Length(1), - ], - ) - .split(layer_zero); + // Draw functions + fn draw_selection_state( + &mut self, + f: &mut crate::tui::Frame<'_>, + layer_zero: Rect, + layer_one: Rc<[Rect]>, + ) -> Paragraph { let pop_up_border: Paragraph = Paragraph::new("").block( Block::default() .borders(Borders::ALL) @@ -305,7 +211,248 @@ impl Component for ChangeConnectionModePopUp { buttons_layer[1], ); - // We render now so the borders are on top of the other widgets + pop_up_border + } + + fn draw_confirm_change( + &mut self, + f: &mut crate::tui::Frame<'_>, + layer_zero: Rect, + layer_one: Rc<[Rect]>, + ) -> Paragraph { + // layer zero + let pop_up_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Confirm & Reset ") + .bold() + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(VIVID_SKY_BLUE)), + ); + clear_area(f, layer_zero); + + // split into 3 parts, paragraph, dash, buttons + let layer_two = Layout::new( + Direction::Vertical, + [ + // for the text + Constraint::Length(9), + // gap + Constraint::Length(3), + // for the buttons + Constraint::Length(1), + ], + ) + .split(layer_one[1]); + + let paragraph_text = Paragraph::new(vec![ + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(vec![ + Span::styled( + "Changing connection mode will ", + Style::default().fg(LIGHT_PERIWINKLE), + ), + Span::styled("reset all nodes.", Style::default().fg(GHOST_WHITE)), + ]), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(vec![ + Span::styled("You’ll need to ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Add", Style::default().fg(GHOST_WHITE)), + Span::styled(" and ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Start", Style::default().fg(GHOST_WHITE)), + Span::styled( + " them again afterwards. Are you sure you want to continue?", + Style::default().fg(LIGHT_PERIWINKLE), + ), + ]), + ]) + .alignment(Alignment::Left) + .wrap(Wrap { trim: true }) + .block(Block::default().padding(Padding::horizontal(2))); + + f.render_widget(paragraph_text, layer_two[0]); + + let dash = Block::new() + .borders(Borders::BOTTOM) + .border_style(Style::new().fg(GHOST_WHITE)); + f.render_widget(dash, layer_two[1]); + + let buttons_layer = + Layout::horizontal(vec![Constraint::Percentage(50), Constraint::Percentage(50)]) + .split(layer_two[2]); + + let button_no = Line::from(vec![Span::styled( + " Cancel [Esc]", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + let button_yes_style = if self.can_select { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }; + f.render_widget(button_no, buttons_layer[0]); + + let button_yes = Line::from(vec![ + Span::styled("Yes, Change Mode ", button_yes_style), + Span::styled("[Enter]", Style::default().fg(GHOST_WHITE)), + ]); + f.render_widget(button_yes, buttons_layer[1]); + + pop_up_border + } +} + +impl Component for ChangeConnectionModePopUp { + fn handle_key_events(&mut self, key: KeyEvent) -> Result> { + if !self.active { + return Ok(vec![]); + } + let send_back: Vec = match &self.state { + ChangeConnectionModeState::Selection => match key.code { + KeyCode::Enter => { + let connection_mode = self.return_selection(); + self.connection_mode_initial_state = self.connection_mode_selection.clone(); + if connection_mode.connection_mode == ConnectionMode::CustomPorts { + vec![ + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + ConnectionMode::CustomPorts, + )), + Action::SwitchScene(Scene::ChangePortsPopUp { + connection_mode_old_value: Some( + self.connection_mode_initial_state.connection_mode, + ), + }), + ] + } else { + self.state = ChangeConnectionModeState::ConfirmChange; + vec![] + } + } + KeyCode::Esc => { + debug!("Got Esc, switching to Options"); + vec![Action::SwitchScene(Scene::Options)] + } + KeyCode::Up => { + if self.items.items.len() > 1 { + self.items.previous(); + let connection_mode = self.return_selection(); + self.can_select = connection_mode.connection_mode + != self.connection_mode_selection.connection_mode; + } + vec![] + } + KeyCode::Down => { + if self.items.items.len() > 1 { + self.items.next(); + let connection_mode = self.return_selection(); + self.can_select = connection_mode.connection_mode + != self.connection_mode_selection.connection_mode; + } + vec![] + } + _ => { + vec![] + } + }, + ChangeConnectionModeState::ConfirmChange => match key.code { + KeyCode::Enter => { + self.state = ChangeConnectionModeState::Selection; + // We allow action if we have more than one connection mode and the action is not + // over the connection mode already selected + let connection_mode = self.return_selection(); + if connection_mode.connection_mode + != self.connection_mode_selection.connection_mode + { + debug!( + "Got Enter and there's a new selection, storing value and switching to Options" + ); + debug!("Connection Mode selected: {:?}", connection_mode); + self.connection_mode_initial_state = self.connection_mode_selection.clone(); + self.assign_connection_mode_selection(); + vec![ + Action::StoreConnectionMode( + self.connection_mode_selection.connection_mode, + ), + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + connection_mode.clone().connection_mode, + )), + Action::SwitchScene(Scene::Status), + ] + } else { + debug!("Got Enter, but no new selection. We should not do anything"); + vec![Action::SwitchScene(Scene::ChangeConnectionModePopUp)] + } + } + KeyCode::Esc => { + self.state = ChangeConnectionModeState::Selection; + vec![Action::SwitchScene(Scene::Options)] + } + _ => { + vec![] + } + }, + }; + Ok(send_back) + } + + fn update(&mut self, action: Action) -> Result> { + let send_back = match action { + Action::SwitchScene(scene) => match scene { + Scene::ChangeConnectionModePopUp => { + self.active = true; + self.can_select = false; + self.select_connection_mode(); + Some(Action::SwitchInputMode(InputMode::Entry)) + } + _ => { + self.active = false; + None + } + }, + // Useful when the user has selected a connection mode but didn't confirm it + Action::OptionsActions(OptionsActions::UpdateConnectionMode(connection_mode)) => { + self.connection_mode_selection.connection_mode = connection_mode; + self.select_connection_mode(); + None + } + _ => None, + }; + Ok(send_back) + } + + fn draw(&mut self, f: &mut crate::tui::Frame<'_>, area: Rect) -> Result<()> { + if !self.active { + return Ok(()); + } + + let layer_zero = centered_rect_fixed(52, 15, area); + + let layer_one = Layout::new( + Direction::Vertical, + [ + // Padding from title to the table + Constraint::Length(1), + // Table + Constraint::Min(1), + // for the pop_up_border + Constraint::Length(1), + ], + ) + .split(layer_zero); + + let pop_up_border: Paragraph = match self.state { + ChangeConnectionModeState::Selection => { + self.draw_selection_state(f, layer_zero, layer_one) + } + ChangeConnectionModeState::ConfirmChange => { + self.draw_confirm_change(f, layer_zero, layer_one) + } + }; + f.render_widget(pop_up_border, layer_zero); Ok(()) diff --git a/node-launchpad/src/components/popup/port_range.rs b/node-launchpad/src/components/popup/port_range.rs index 23b79870e5..da426b8e7b 100644 --- a/node-launchpad/src/components/popup/port_range.rs +++ b/node-launchpad/src/components/popup/port_range.rs @@ -33,6 +33,7 @@ enum PortRangeState { #[default] Selection, ConfirmChange, + PortForwardingInfo, } pub struct PortRangePopUp { @@ -185,8 +186,100 @@ impl PortRangePopUp { pop_up_border } - // Draws the Confirmation screen - fn draw_confirm_change_state( + // Draws Confirmation screen + fn draw_confirm_and_reset( + &mut self, + f: &mut crate::tui::Frame<'_>, + layer_zero: Rect, + layer_one: Rc<[Rect]>, + ) -> Paragraph { + // layer zero + let pop_up_border = Paragraph::new("").block( + Block::default() + .borders(Borders::ALL) + .title(" Confirm & Reset ") + .bold() + .title_style(Style::new().fg(VIVID_SKY_BLUE)) + .padding(Padding::uniform(2)) + .border_style(Style::new().fg(VIVID_SKY_BLUE)), + ); + clear_area(f, layer_zero); + + // split into 3 parts, paragraph, dash, buttons + let layer_two = Layout::new( + Direction::Vertical, + [ + // for the text + Constraint::Length(8), + // gap + Constraint::Length(3), + // for the buttons + Constraint::Length(1), + ], + ) + .split(layer_one[1]); + + let paragraph_text = Paragraph::new(vec![ + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(vec![ + Span::styled( + "Changing connection mode will ", + Style::default().fg(LIGHT_PERIWINKLE), + ), + Span::styled("reset all nodes.", Style::default().fg(GHOST_WHITE)), + ]), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(Span::styled("\n\n", Style::default())), + Line::from(vec![ + Span::styled("You’ll need to ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Add", Style::default().fg(GHOST_WHITE)), + Span::styled(" and ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Start", Style::default().fg(GHOST_WHITE)), + Span::styled( + " them again afterwards. Are you sure you want to continue?", + Style::default().fg(LIGHT_PERIWINKLE), + ), + ]), + ]) + .alignment(Alignment::Left) + .wrap(Wrap { trim: true }) + .block(block::Block::default().padding(Padding::horizontal(2))); + + f.render_widget(paragraph_text, layer_two[0]); + + let dash = Block::new() + .borders(Borders::BOTTOM) + .border_style(Style::new().fg(GHOST_WHITE)); + f.render_widget(dash, layer_two[1]); + + let buttons_layer = + Layout::horizontal(vec![Constraint::Percentage(50), Constraint::Percentage(50)]) + .split(layer_two[2]); + + let button_no = Line::from(vec![Span::styled( + " Cancel [Esc]", + Style::default().fg(LIGHT_PERIWINKLE), + )]); + let button_yes_style = if self.can_save { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }; + f.render_widget(button_no, buttons_layer[0]); + + let button_yes = Line::from(vec![ + Span::styled("Yes, Change Mode ", button_yes_style), + Span::styled("[Enter]", Style::default().fg(GHOST_WHITE)), + ]); + f.render_widget(button_yes, buttons_layer[1]); + + pop_up_border + } + + // Draws info regarding router and ports + fn draw_info_port_forwarding( &mut self, f: &mut crate::tui::Frame<'_>, layer_zero: Rect, @@ -204,7 +297,7 @@ impl PortRangePopUp { ); clear_area(f, layer_zero); - // split into 4 parts, for the prompt, input, text, dash , and buttons + // split into 3 parts, 1 paragraph, dash and buttons let layer_two = Layout::new( Direction::Vertical, [ @@ -270,10 +363,11 @@ impl Component for PortRangePopUp { == self.port_from.value().parse::().unwrap_or_default() && self.port_to_old_value == self.port_to.value().parse::().unwrap_or_default() + && self.connection_mode_old_value != Some(ConnectionMode::CustomPorts) && self.can_save { - debug!("Got Enter, but nothing changed, ignoring."); - return Ok(vec![Action::SwitchScene(Scene::Options)]); + self.state = PortRangeState::ConfirmChange; + return Ok(vec![]); } let port_from = self.port_from.value(); let port_to = self.port_to.value(); @@ -284,20 +378,12 @@ impl Component for PortRangePopUp { } debug!("Got Enter, saving the ports and switching to Options Screen",); self.state = PortRangeState::ConfirmChange; - vec![ - Action::StorePortRange( - self.port_from.value().parse().unwrap_or_default(), - self.port_to.value().parse().unwrap_or_default(), - ), - Action::OptionsActions(OptionsActions::UpdatePortRange( - self.port_from.value().parse().unwrap_or_default(), - self.port_to.value().parse().unwrap_or_default(), - )), - ] + vec![] } KeyCode::Esc => { debug!("Got Esc, restoring the old values and switching to actual screen"); if let Some(connection_mode_old_value) = self.connection_mode_old_value { + debug!("{:?}", connection_mode_old_value); vec![ Action::OptionsActions(OptionsActions::UpdateConnectionMode( connection_mode_old_value, @@ -401,6 +487,52 @@ impl Component for PortRangePopUp { } } PortRangeState::ConfirmChange => match key.code { + KeyCode::Enter => { + self.state = PortRangeState::PortForwardingInfo; + vec![ + Action::StoreConnectionMode(ConnectionMode::CustomPorts), + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + ConnectionMode::CustomPorts, + )), + Action::StorePortRange( + self.port_from.value().parse().unwrap_or_default(), + self.port_to.value().parse().unwrap_or_default(), + ), + Action::OptionsActions(OptionsActions::UpdatePortRange( + self.port_from.value().parse().unwrap_or_default(), + self.port_to.value().parse().unwrap_or_default(), + )), + ] + } + KeyCode::Esc => { + self.state = PortRangeState::Selection; + if let Some(connection_mode_old_value) = self.connection_mode_old_value { + if self.port_from_old_value != 0 && self.port_to_old_value != 0 { + vec![ + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + connection_mode_old_value, + )), + Action::OptionsActions(OptionsActions::UpdatePortRange( + self.port_from_old_value, + self.port_to_old_value, + )), + Action::SwitchScene(Scene::Options), + ] + } else { + vec![ + Action::OptionsActions(OptionsActions::UpdateConnectionMode( + connection_mode_old_value, + )), + Action::SwitchScene(Scene::Options), + ] + } + } else { + vec![Action::SwitchScene(Scene::Options)] + } + } + _ => vec![], + }, + PortRangeState::PortForwardingInfo => match key.code { KeyCode::Enter => { debug!("Got Enter, saving the ports and switching to Status Screen",); self.state = PortRangeState::Selection; @@ -471,8 +603,9 @@ impl Component for PortRangePopUp { let pop_up_border: Paragraph = match self.state { PortRangeState::Selection => self.draw_selection_state(f, layer_zero, layer_one), - PortRangeState::ConfirmChange => { - self.draw_confirm_change_state(f, layer_zero, layer_one) + PortRangeState::ConfirmChange => self.draw_confirm_and_reset(f, layer_zero, layer_one), + PortRangeState::PortForwardingInfo => { + self.draw_info_port_forwarding(f, layer_zero, layer_one) } }; // We render now so the borders are on top of the other widgets From a6ad2558709969046199972ac33c562afe500d75 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 1 Oct 2024 13:07:08 +0900 Subject: [PATCH 072/255] feat: disable node tests, remove sn_client dep Comments APIs that we need to surface in api --- Cargo.lock | 535 +------ Cargo.toml | 8 +- sn_client/Cargo.toml | 2 +- sn_node/Cargo.toml | 1 - sn_node/examples/register_inspect.rs | 232 --- sn_node/examples/registers.rs | 166 --- .../reactivate_examples/register_inspect.rs | 233 +++ sn_node/reactivate_examples/registers.rs | 167 +++ sn_node/tests/common/client.rs | 9 +- sn_node/tests/common/mod.rs | 505 +++---- sn_node/tests/data_with_churn.rs | 1286 ++++++++--------- sn_node/tests/verify_data_location.rs | 855 +++++------ sn_node/tests/verify_routing_table.rs | 204 +-- sn_node_rpc_client/Cargo.toml | 10 +- 14 files changed, 1859 insertions(+), 2354 deletions(-) delete mode 100644 sn_node/examples/register_inspect.rs delete mode 100644 sn_node/examples/registers.rs create mode 100644 sn_node/reactivate_examples/register_inspect.rs create mode 100644 sn_node/reactivate_examples/registers.rs diff --git a/Cargo.lock b/Cargo.lock index c0bf2820ca..d0b97edc11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,18 +33,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.1", -] - [[package]] name = "aes" version = "0.8.4" @@ -52,7 +40,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -63,8 +51,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "ghash", "subtle", @@ -77,8 +65,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae0784134ba9375416d469ec31e7c5f9fa94405049cf08c5ce5b4698be673e0d" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "polyval", "subtle", @@ -378,7 +366,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap 5.5.3", + "dashmap", "futures", "futures-utils-wasm", "lru", @@ -921,12 +909,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" -[[package]] -name = "ascii" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" - [[package]] name = "asn1-rs" version = "0.6.2" @@ -1285,12 +1267,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bech32" -version = "0.10.0-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" - [[package]] name = "better-panic" version = "0.3.0" @@ -1322,7 +1298,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes 0.11.0", + "bitcoin_hashes", "serde", "unicode-normalization", ] @@ -1342,43 +1318,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitcoin" -version = "0.31.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" -dependencies = [ - "base64 0.21.7", - "bech32", - "bitcoin-internals", - "bitcoin_hashes 0.13.0", - "hex-conservative", - "hex_lit", - "secp256k1 0.28.2", -] - -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" -[[package]] -name = "bitcoin_hashes" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals", - "hex-conservative", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -1446,16 +1391,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -1704,7 +1639,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1740,7 +1675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -1752,7 +1687,7 @@ checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", "chacha20", - "cipher 0.4.4", + "cipher", "poly1305", "zeroize", ] @@ -1771,12 +1706,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "chunked_transfer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" - [[package]] name = "ciborium" version = "0.2.2" @@ -1804,15 +1733,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -1992,16 +1912,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] - [[package]] name = "const-hex" version = "1.12.0" @@ -2120,8 +2030,6 @@ version = "7.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "387808c885b79055facbd4b2e806a683fe1bc37abc7dfa5fea1974ad2d4137b0" dependencies = [ - "num", - "quickcheck", "serde", "tiny-keccak", ] @@ -2296,7 +2204,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -2410,20 +2318,6 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - [[package]] name = "data-encoding" version = "2.6.0" @@ -2537,19 +2431,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "dialoguer" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" -dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror", - "zeroize", -] - [[package]] name = "diff" version = "0.1.13" @@ -2686,21 +2567,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" -[[package]] -name = "dot-generator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aaac7ada45f71873ebce336491d1c1bc4a7c8042c7cea978168ad59e805b871" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "dot-structures" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675e35c02a51bb4d4618cb4885b3839ce6d1787c97b664474d9208d074742e20" - [[package]] name = "downcast" version = "0.11.0" @@ -3903,22 +3769,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "graphviz-rust" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c33d03804e2ce21db5821f2beb4e54f844a8f90326e6bd99a1771dc54aef427" -dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "into-attr-derive", - "pest", - "pest_derive", - "rand 0.8.5", - "tempfile", -] - [[package]] name = "group" version = "0.12.1" @@ -4069,12 +3919,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-conservative" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" - [[package]] name = "hex-literal" version = "0.4.1" @@ -4087,12 +3931,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - [[package]] name = "hickory-proto" version = "0.24.1" @@ -4613,28 +4451,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "into-attr" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b48c537e49a709e678caec3753a7dba6854661a1eaa27675024283b3f8b376" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "into-attr-derive" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecac7c1ae6cd2c6a3a64d1061a8bdc7f52ff62c26a831a2301e54c1b5d70d5b1" -dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "quote", - "syn 1.0.109", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -5622,19 +5438,6 @@ dependencies = [ "adler2", ] -[[package]] -name = "minreq" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763d142cdff44aaadd9268bebddb156ef6c65a0e13486bb81673cf2d8739f9b0" -dependencies = [ - "log", - "once_cell", - "rustls 0.21.12", - "rustls-webpki 0.101.7", - "webpki-roots 0.25.4", -] - [[package]] name = "mio" version = "0.8.11" @@ -6000,20 +5803,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint 0.4.6", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -6036,16 +5825,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", - "serde", -] - [[package]] name = "num-conv" version = "0.1.0" @@ -6071,29 +5850,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg 1.3.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "serde", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -6522,8 +6278,6 @@ checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.5.0", - "serde", - "serde_derive", ] [[package]] @@ -7618,17 +7372,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "rpassword" -version = "7.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" -dependencies = [ - "libc", - "rtoolbox", - "windows-sys 0.48.0", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -7644,16 +7387,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtoolbox" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ruint" version = "1.12.3" @@ -7762,19 +7495,7 @@ dependencies = [ "log", "ring 0.16.20", "sct 0.6.1", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" -dependencies = [ - "log", - "ring 0.16.20", - "sct 0.7.1", - "webpki 0.22.4", + "webpki", ] [[package]] @@ -7803,15 +7524,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -7969,21 +7681,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" dependencies = [ "rand 0.6.5", - "secp256k1-sys 0.4.2", + "secp256k1-sys", "serde", ] -[[package]] -name = "secp256k1" -version = "0.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" -dependencies = [ - "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - [[package]] name = "secp256k1-sys" version = "0.4.2" @@ -7993,15 +7694,6 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -8017,7 +7709,7 @@ version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "894da3241a9e426c16fb8cb28b19416eae5fafdc7742e4bc505c1821661c140f" dependencies = [ - "aes 0.8.4", + "aes", "bincode", "brotli", "bytes", @@ -8256,12 +7948,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - [[package]] name = "shlex" version = "1.3.0" @@ -8409,30 +8095,6 @@ dependencies = [ "zip", ] -[[package]] -name = "sn_auditor" -version = "0.3.1" -dependencies = [ - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "futures", - "graphviz-rust", - "lazy_static", - "serde", - "serde_json", - "sn_build_info", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "tiny_http", - "tokio", - "tracing", - "urlencoding", -] - [[package]] name = "sn_bls_ckd" version = "0.2.1" @@ -8454,99 +8116,6 @@ dependencies = [ "vergen", ] -[[package]] -name = "sn_cli" -version = "0.95.1" -dependencies = [ - "aes 0.7.5", - "base64 0.22.1", - "bitcoin", - "block-modes", - "blsttc", - "bytes", - "chrono", - "clap", - "color-eyre", - "criterion", - "custom_debug", - "dialoguer", - "dirs-next", - "eyre", - "futures", - "hex 0.4.3", - "indicatif", - "libp2p 0.54.1", - "rand 0.8.5", - "rayon", - "reqwest 0.12.7", - "rmp-serde", - "rpassword", - "serde", - "sn_build_info", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "tempfile", - "tiny-keccak", - "tokio", - "tracing", - "url", - "walkdir", - "xor_name", -] - -[[package]] -name = "sn_client" -version = "0.110.1" -dependencies = [ - "assert_matches", - "async-trait", - "backoff", - "bip39", - "blsttc", - "bytes", - "console_error_panic_hook", - "crdts", - "custom_debug", - "dashmap 6.1.0", - "dirs-next", - "eyre", - "futures", - "getrandom 0.2.15", - "hex 0.4.3", - "itertools 0.12.1", - "libp2p 0.54.1", - "libp2p-identity", - "petgraph", - "prometheus-client", - "rand 0.8.5", - "rayon", - "rmp-serde", - "self_encryption", - "serde", - "sn_bls_ckd", - "sn_client", - "sn_curv", - "sn_logging", - "sn_networking", - "sn_peers_acquisition", - "sn_protocol", - "sn_registers", - "sn_transfers", - "tempfile", - "thiserror", - "tiny-keccak", - "tokio", - "tracing", - "tracing-wasm", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasmtimer", - "web-sys", - "xor_name", -] - [[package]] name = "sn_curv" version = "0.10.1" @@ -8568,7 +8137,7 @@ dependencies = [ "pairing-plus", "rand 0.6.5", "rand 0.7.3", - "secp256k1 0.20.3", + "secp256k1", "serde", "serde_bytes", "serde_derive", @@ -8602,38 +8171,6 @@ dependencies = [ "xor_name", ] -[[package]] -name = "sn_faucet" -version = "0.5.1" -dependencies = [ - "assert_fs", - "base64 0.22.1", - "bitcoin", - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "fs2", - "futures", - "hex 0.4.3", - "indicatif", - "minreq", - "reqwest 0.12.7", - "serde", - "serde_json", - "sn_build_info", - "sn_cli", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "sn_transfers", - "tokio", - "tracing", - "url", - "warp", -] - [[package]] name = "sn_logging" version = "0.2.34" @@ -8748,7 +8285,6 @@ dependencies = [ "serde", "serde_json", "sn_build_info", - "sn_client", "sn_evm", "sn_logging", "sn_networking", @@ -8785,7 +8321,6 @@ dependencies = [ "libp2p 0.54.1", "libp2p-identity", "sn_build_info", - "sn_client", "sn_logging", "sn_node", "sn_peers_acquisition", @@ -9336,21 +8871,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tiny_http" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" -dependencies = [ - "ascii", - "chunked_transfer", - "httpdate", - "log", - "rustls 0.20.9", - "rustls-pemfile 0.2.1", - "zeroize", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -9435,7 +8955,7 @@ checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls 0.19.1", "tokio", - "webpki 0.21.4", + "webpki", ] [[package]] @@ -9819,17 +9339,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "tracing-wasm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" -dependencies = [ - "tracing", - "tracing-subscriber", - "wasm-bindgen", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -10301,16 +9810,6 @@ dependencies = [ "untrusted 0.7.1", ] -[[package]] -name = "webpki" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "webpki-roots" version = "0.25.4" @@ -10827,7 +10326,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ - "aes 0.8.4", + "aes", "byteorder", "bzip2", "constant_time_eq", diff --git a/Cargo.toml b/Cargo.toml index fb86e31a39..79cc1a5945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,12 +4,12 @@ members = [ "autonomi", "evmlib", "evm_testnet", - "sn_auditor", + # "sn_auditor", "sn_build_info", "sn_evm", - "sn_cli", - "sn_client", - "sn_faucet", + # "sn_cli", + # "sn_client", + # "sn_faucet", "sn_logging", "sn_metrics", "nat-detection", diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index c69626873a..da0eeaf5d3 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -67,7 +67,7 @@ assert_matches = "1.5.0" dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", features = ["test-utils"] } +# sn_client = { path = "../sn_client", features = ["test-utils"] } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_registers = { path = "../sn_registers", version = "0.3.19", features = [ "test-utils", diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index bed23167bb..58159455f1 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -85,7 +85,6 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.110.1" } sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ "rpc", ] } diff --git a/sn_node/examples/register_inspect.rs b/sn_node/examples/register_inspect.rs deleted file mode 100644 index 3c3d70a36b..0000000000 --- a/sn_node/examples/register_inspect.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crdts::merkle_reg::{Hash, MerkleReg, Node}; -use std::collections::HashMap; -use std::io; - -use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; -use sn_registers::{Entry, Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; - -#[derive(Parser, Debug)] -#[clap(name = "register inspect cli")] -struct Opt { - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // The address of the register to be displayed - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet (for ClientRegister::sync()). - // The wallet can have ZERO balance in this example, - // but the ClientRegister::sync() API requires a wallet and will - // create the register if not found even though we don't want that. - // - // The only want to avoid unwanted creation of a Register seems to - // be to supply an empty wallet. - // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // Repeatedly display of the register structure on command - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - if prompt_user() { - return Ok(()); - } - - // Sync with network after a delay - println!("Syncing with SAFE..."); - reg_replica.sync(&mut wallet_client, true, None).await?; - let merkle_reg = reg_replica.merkle_reg(); - let content = merkle_reg.read(); - println!("synced!"); - - // Show the Register structure - - // Index nodes to make it easier to see where a - // node appears multiple times in the output. - // Note: it isn't related to the order of insertion - // which is hard to determine. - let mut index: usize = 0; - let mut node_ordering: HashMap = HashMap::new(); - for (_hash, node) in content.hashes_and_nodes() { - index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); - } - - println!("======================"); - println!("Root (Latest) Node(s):"); - for node in content.nodes() { - let _ = print_node(0, node, &node_ordering); - } - - println!("======================"); - println!("Register Structure:"); - println!("(In general, earlier nodes are more indented)"); - let mut indents = 0; - for (_hash, node) in content.hashes_and_nodes() { - print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); - } - - println!("======================"); - } -} - -fn index_node_and_descendants( - node: &Node, - index: &mut usize, - node_ordering: &mut HashMap, - merkle_reg: &MerkleReg, -) { - let node_hash = node.hash(); - if node_ordering.get(&node_hash).is_none() { - node_ordering.insert(node_hash, *index); - *index += 1; - } - - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - index_node_and_descendants(child_node, index, node_ordering, merkle_reg); - } else { - println!("ERROR looking up hash of child"); - } - } -} - -fn print_node_and_descendants( - indents: &mut usize, - node: &Node, - node_ordering: &HashMap, - merkle_reg: &MerkleReg, -) { - let _ = print_node(*indents, node, node_ordering); - - *indents += 1; - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); - } - } - *indents -= 1; -} - -fn print_node( - indents: usize, - node: &Node, - node_ordering: &HashMap, -) -> Result<()> { - let order = match node_ordering.get(&node.hash()) { - Some(order) => format!("{order}"), - None => String::new(), - }; - let indentation = " ".repeat(indents); - println!( - "{indentation}[{:>2}] Node({:?}..) Entry({:?})", - order, - hex::encode(&node.hash()[0..3]), - String::from_utf8(node.value.clone())? - ); - Ok(()) -} - -fn prompt_user() -> bool { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - let string = input_text.trim().to_string(); - - string.contains('Q') || string.contains('q') -} diff --git a/sn_node/examples/registers.rs b/sn_node/examples/registers.rs deleted file mode 100644 index 70d3177a1c..0000000000 --- a/sn_node/examples/registers.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, -}; -use sn_registers::{Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; -use std::{io, time::Duration}; -use tokio::time::sleep; - -#[derive(Parser, Debug)] -#[clap(name = "registers cli")] -struct Opt { - // A name for this user in the example - #[clap(long)] - user: String, - - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, - - // Delay before synchronising local register with the network - #[clap(long, default_value_t = 2000)] - delay_millis: u64, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let user = opt.user; - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - let delay = Duration::from_millis(opt.delay_millis); - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // We'll retrieve (or create if not found) a Register, and write on it - // in offline mode, syncing with the network periodically. - - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet. It needs to have a non-zero balance for - // this example to be able to pay for the Register's storage. - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // We'll loop asking for new msg to write onto the Register offline, - // then we'll be syncing the offline Register with the network, i.e. - // both pushing and ulling all changes made to it by us and other clients/users. - // If we detect branches when trying to write, after we synced with remote - // replicas of the Register, we'll merge them all back into a single value. - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - let input_text = prompt_user(); - if !input_text.is_empty() { - println!("Writing msg (offline) to Register: '{input_text}'"); - let msg = format!("[{user}]: {input_text}"); - match reg_replica.write(msg.as_bytes()) { - Ok(_) => {} - Err(Error::ContentBranchDetected(branches)) => { - println!( - "Branches ({}) detected in Register, let's merge them all...", - branches.len() - ); - reg_replica.write_merging_branches(msg.as_bytes())?; - } - Err(err) => return Err(err.into()), - } - } - - // Sync with network after a delay - println!("Syncing with SAFE in {delay:?}..."); - sleep(delay).await; - reg_replica.sync(&mut wallet_client, true, None).await?; - println!("synced!"); - } -} - -fn prompt_user() -> String { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to receive updates, or some text to be written."); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - input_text.trim().to_string() -} diff --git a/sn_node/reactivate_examples/register_inspect.rs b/sn_node/reactivate_examples/register_inspect.rs new file mode 100644 index 0000000000..2873aa1139 --- /dev/null +++ b/sn_node/reactivate_examples/register_inspect.rs @@ -0,0 +1,233 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// use crdts::merkle_reg::{Hash, MerkleReg, Node}; +// use std::collections::HashMap; +// use std::io; + +// // TODO: use autonomi API here +// // use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; +// use sn_registers::{Entry, Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; + +// #[derive(Parser, Debug)] +// #[clap(name = "register inspect cli")] +// struct Opt { +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // The address of the register to be displayed +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet (for ClientRegister::sync()). +// // The wallet can have ZERO balance in this example, +// // but the ClientRegister::sync() API requires a wallet and will +// // create the register if not found even though we don't want that. +// // +// // The only want to avoid unwanted creation of a Register seems to +// // be to supply an empty wallet. +// // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; + +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // Repeatedly display of the register structure on command +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// if prompt_user() { +// return Ok(()); +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE..."); +// reg_replica.sync(&mut wallet_client, true, None).await?; +// let merkle_reg = reg_replica.merkle_reg(); +// let content = merkle_reg.read(); +// println!("synced!"); + +// // Show the Register structure + +// // Index nodes to make it easier to see where a +// // node appears multiple times in the output. +// // Note: it isn't related to the order of insertion +// // which is hard to determine. +// let mut index: usize = 0; +// let mut node_ordering: HashMap = HashMap::new(); +// for (_hash, node) in content.hashes_and_nodes() { +// index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); +// } + +// println!("======================"); +// println!("Root (Latest) Node(s):"); +// for node in content.nodes() { +// let _ = print_node(0, node, &node_ordering); +// } + +// println!("======================"); +// println!("Register Structure:"); +// println!("(In general, earlier nodes are more indented)"); +// let mut indents = 0; +// for (_hash, node) in content.hashes_and_nodes() { +// print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); +// } + +// println!("======================"); +// } +// } + +// fn index_node_and_descendants( +// node: &Node, +// index: &mut usize, +// node_ordering: &mut HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let node_hash = node.hash(); +// if node_ordering.get(&node_hash).is_none() { +// node_ordering.insert(node_hash, *index); +// *index += 1; +// } + +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// index_node_and_descendants(child_node, index, node_ordering, merkle_reg); +// } else { +// println!("ERROR looking up hash of child"); +// } +// } +// } + +// fn print_node_and_descendants( +// indents: &mut usize, +// node: &Node, +// node_ordering: &HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let _ = print_node(*indents, node, node_ordering); + +// *indents += 1; +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); +// } +// } +// *indents -= 1; +// } + +// fn print_node( +// indents: usize, +// node: &Node, +// node_ordering: &HashMap, +// ) -> Result<()> { +// let order = match node_ordering.get(&node.hash()) { +// Some(order) => format!("{order}"), +// None => String::new(), +// }; +// let indentation = " ".repeat(indents); +// println!( +// "{indentation}[{:>2}] Node({:?}..) Entry({:?})", +// order, +// hex::encode(&node.hash()[0..3]), +// String::from_utf8(node.value.clone())? +// ); +// Ok(()) +// } + +// fn prompt_user() -> bool { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// let string = input_text.trim().to_string(); + +// string.contains('Q') || string.contains('q') +// } diff --git a/sn_node/reactivate_examples/registers.rs b/sn_node/reactivate_examples/registers.rs new file mode 100644 index 0000000000..6fa6c51045 --- /dev/null +++ b/sn_node/reactivate_examples/registers.rs @@ -0,0 +1,167 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// // TODO: use autonomi API here. +// // use sn_client::{ +// // acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, +// // }; +// use sn_registers::{Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; +// use std::{io, time::Duration}; +// use tokio::time::sleep; + +// #[derive(Parser, Debug)] +// #[clap(name = "registers cli")] +// struct Opt { +// // A name for this user in the example +// #[clap(long)] +// user: String, + +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, + +// // Delay before synchronising local register with the network +// #[clap(long, default_value_t = 2000)] +// delay_millis: u64, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let user = opt.user; +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; +// let delay = Duration::from_millis(opt.delay_millis); + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // We'll retrieve (or create if not found) a Register, and write on it +// // in offline mode, syncing with the network periodically. + +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet. It needs to have a non-zero balance for +// // this example to be able to pay for the Register's storage. +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // We'll loop asking for new msg to write onto the Register offline, +// // then we'll be syncing the offline Register with the network, i.e. +// // both pushing and ulling all changes made to it by us and other clients/users. +// // If we detect branches when trying to write, after we synced with remote +// // replicas of the Register, we'll merge them all back into a single value. +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// let input_text = prompt_user(); +// if !input_text.is_empty() { +// println!("Writing msg (offline) to Register: '{input_text}'"); +// let msg = format!("[{user}]: {input_text}"); +// match reg_replica.write(msg.as_bytes()) { +// Ok(_) => {} +// Err(Error::ContentBranchDetected(branches)) => { +// println!( +// "Branches ({}) detected in Register, let's merge them all...", +// branches.len() +// ); +// reg_replica.write_merging_branches(msg.as_bytes())?; +// } +// Err(err) => return Err(err.into()), +// } +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE in {delay:?}..."); +// sleep(delay).await; +// reg_replica.sync(&mut wallet_client, true, None).await?; +// println!("synced!"); +// } +// } + +// fn prompt_user() -> String { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to receive updates, or some text to be written."); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// input_text.trim().to_string() +// } diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index bff2c8d333..297b103d27 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -8,10 +8,11 @@ use eyre::{bail, OptionExt, Result}; use libp2p::PeerId; -use sn_client::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, -}; +/// TODO: Update to use autonomi API here +// use sn_client::{ +// acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, +// send, Client, +// }; use sn_peers_acquisition::parse_peer_addr; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; use sn_service_management::{ diff --git a/sn_node/tests/common/mod.rs b/sn_node/tests/common/mod.rs index 6366e2092c..452d506379 100644 --- a/sn_node/tests/common/mod.rs +++ b/sn_node/tests/common/mod.rs @@ -1,275 +1,276 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. -#![allow(dead_code)] +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. +// #![allow(dead_code)] -pub mod client; +// pub mod client; -use self::client::{Droplet, NonDroplet}; -use bytes::Bytes; -use eyre::{bail, eyre, OptionExt, Result}; -use itertools::Either; -use libp2p::PeerId; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_client::{Client, FilesApi}; -use sn_protocol::{ - safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, - storage::ChunkAddress, -}; -use sn_service_management::{ - get_local_node_registry_path, - safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, -}; -use std::{ - fs::File, - io::Write, - net::SocketAddr, - path::{Path, PathBuf}, - time::Duration, -}; -use test_utils::testnet::DeploymentInventory; -use tonic::Request; -use tracing::{debug, error, warn}; -use xor_name::XorName; +// use self::client::{Droplet, NonDroplet}; +// use bytes::Bytes; +// use eyre::{bail, eyre, OptionExt, Result}; +// use itertools::Either; +// use libp2p::PeerId; +// use rand::{ +// distributions::{Distribution, Standard}, +// Rng, +// }; +// use self_encryption::MIN_ENCRYPTABLE_BYTES; +// // TODO: Use autonimi API here +// // use sn_client::{Client, FilesApi}; +// use sn_protocol::{ +// safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, +// storage::ChunkAddress, +// }; +// use sn_service_management::{ +// get_local_node_registry_path, +// safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, +// }; +// use std::{ +// fs::File, +// io::Write, +// net::SocketAddr, +// path::{Path, PathBuf}, +// time::Duration, +// }; +// use test_utils::testnet::DeploymentInventory; +// use tonic::Request; +// use tracing::{debug, error, warn}; +// use xor_name::XorName; -type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; +// type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; -pub fn random_content( - client: &Client, - wallet_dir: PathBuf, - chunk_dir: &Path, -) -> ResultRandomContent { - let mut rng = rand::thread_rng(); +// pub fn random_content( +// client: &Client, +// wallet_dir: PathBuf, +// chunk_dir: &Path, +// ) -> ResultRandomContent { +// let mut rng = rand::thread_rng(); - let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); - let random_length_content: Vec = - >::sample_iter(Standard, &mut rng) - .take(random_len) - .collect(); +// let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); +// let random_length_content: Vec = +// >::sample_iter(Standard, &mut rng) +// .take(random_len) +// .collect(); - let file_path = chunk_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_length_content)?; +// let file_path = chunk_dir.join("random_content"); +// let mut output_file = File::create(file_path.clone())?; +// output_file.write_all(&random_length_content)?; - let files_api = FilesApi::new(client.clone(), wallet_dir); - let (head_chunk_address, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunk_dir, true)?; +// let files_api = FilesApi::new(client.clone(), wallet_dir); +// let (head_chunk_address, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, chunk_dir, true)?; - Ok(( - files_api, - random_length_content.into(), - head_chunk_address, - chunks, - )) -} +// Ok(( +// files_api, +// random_length_content.into(), +// head_chunk_address, +// chunks, +// )) +// } -// Connect to a RPC socket addr with retry -pub async fn get_safenode_rpc_client( - socket_addr: SocketAddr, -) -> Result> { - // get the new PeerId for the current NodeIndex - let endpoint = format!("https://{socket_addr}"); - let mut attempts = 0; - loop { - if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { - break Ok(rpc_client); - } - attempts += 1; - println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - tokio::time::sleep(Duration::from_secs(1)).await; - if attempts >= 10 { - bail!("Failed to connect to {endpoint:?} even after 10 retries"); - } - } -} +// // Connect to a RPC socket addr with retry +// pub async fn get_safenode_rpc_client( +// socket_addr: SocketAddr, +// ) -> Result> { +// // get the new PeerId for the current NodeIndex +// let endpoint = format!("https://{socket_addr}"); +// let mut attempts = 0; +// loop { +// if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { +// break Ok(rpc_client); +// } +// attempts += 1; +// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// tokio::time::sleep(Duration::from_secs(1)).await; +// if attempts >= 10 { +// bail!("Failed to connect to {endpoint:?} even after 10 retries"); +// } +// } +// } -// Connect to a RPC socket addr with retry -pub async fn get_safenode_manager_rpc_client( - socket_addr: SocketAddr, -) -> Result> { - // get the new PeerId for the current NodeIndex - let endpoint = format!("https://{socket_addr}"); - let mut attempts = 0; - loop { - if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { - break Ok(rpc_client); - } - attempts += 1; - println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - tokio::time::sleep(Duration::from_secs(1)).await; - if attempts >= 10 { - bail!("Failed to connect to {endpoint:?} even after 10 retries"); - } - } -} +// // Connect to a RPC socket addr with retry +// pub async fn get_safenode_manager_rpc_client( +// socket_addr: SocketAddr, +// ) -> Result> { +// // get the new PeerId for the current NodeIndex +// let endpoint = format!("https://{socket_addr}"); +// let mut attempts = 0; +// loop { +// if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { +// break Ok(rpc_client); +// } +// attempts += 1; +// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// tokio::time::sleep(Duration::from_secs(1)).await; +// if attempts >= 10 { +// bail!("Failed to connect to {endpoint:?} even after 10 retries"); +// } +// } +// } -// Returns all the PeerId for all the running nodes -pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { - let mut all_peers = Vec::new(); +// // Returns all the PeerId for all the running nodes +// pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { +// let mut all_peers = Vec::new(); - for addr in node_rpc_addresses { - let mut rpc_client = get_safenode_rpc_client(*addr).await?; +// for addr in node_rpc_addresses { +// let mut rpc_client = get_safenode_rpc_client(*addr).await?; - // get the peer_id - let response = rpc_client - .node_info(Request::new(NodeInfoRequest {})) - .await?; - let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; - all_peers.push(peer_id); - } - debug!( - "Obtained the PeerId list for the running network with a node count of {}", - node_rpc_addresses.len() - ); - Ok(all_peers) -} +// // get the peer_id +// let response = rpc_client +// .node_info(Request::new(NodeInfoRequest {})) +// .await?; +// let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; +// all_peers.push(peer_id); +// } +// debug!( +// "Obtained the PeerId list for the running network with a node count of {}", +// node_rpc_addresses.len() +// ); +// Ok(all_peers) +// } -/// A struct to facilitate restart of droplet/local nodes -pub struct NodeRestart { - // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. - inventory_file: Either, - next_to_restart_idx: usize, - skip_genesis_for_droplet: bool, - retain_peer_id: bool, -} +// /// A struct to facilitate restart of droplet/local nodes +// pub struct NodeRestart { +// // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. +// inventory_file: Either, +// next_to_restart_idx: usize, +// skip_genesis_for_droplet: bool, +// retain_peer_id: bool, +// } -impl NodeRestart { - /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. - /// The restarted node relies on the genesis multiaddr to bootstrap after restart. - /// - /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. - pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { - let inventory_file = match DeploymentInventory::load() { - Ok(inv) => Either::Left(inv), - Err(_) => { - let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; - Either::Right(reg) - } - }; +// impl NodeRestart { +// /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. +// /// The restarted node relies on the genesis multiaddr to bootstrap after restart. +// /// +// /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. +// pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { +// let inventory_file = match DeploymentInventory::load() { +// Ok(inv) => Either::Left(inv), +// Err(_) => { +// let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; +// Either::Right(reg) +// } +// }; - Ok(Self { - inventory_file, - next_to_restart_idx: 0, - skip_genesis_for_droplet, - retain_peer_id, - }) - } +// Ok(Self { +// inventory_file, +// next_to_restart_idx: 0, +// skip_genesis_for_droplet, +// retain_peer_id, +// }) +// } - /// Restart the next node in the list. - /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all - /// the nodes. - /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. - /// Else we'll be retrying the same node on the next call. - /// - /// Returns the `safenode's RPC addr` if we have restarted a node successfully. - /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. - pub async fn restart_next( - &mut self, - loop_over: bool, - progress_on_error: bool, - ) -> Result> { - let safenode_rpc_endpoint = match self.inventory_file.clone() { - Either::Left(inv) => { - // check if we've reached the end - if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { - self.next_to_restart_idx = 0; - } +// /// Restart the next node in the list. +// /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all +// /// the nodes. +// /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. +// /// Else we'll be retrying the same node on the next call. +// /// +// /// Returns the `safenode's RPC addr` if we have restarted a node successfully. +// /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. +// pub async fn restart_next( +// &mut self, +// loop_over: bool, +// progress_on_error: bool, +// ) -> Result> { +// let safenode_rpc_endpoint = match self.inventory_file.clone() { +// Either::Left(inv) => { +// // check if we've reached the end +// if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { +// self.next_to_restart_idx = 0; +// } - if let Some((peer_id, daemon_endpoint)) = inv - .safenodemand_endpoints - .iter() - .nth(self.next_to_restart_idx) - { - self.restart(*peer_id, *daemon_endpoint, progress_on_error) - .await?; +// if let Some((peer_id, daemon_endpoint)) = inv +// .safenodemand_endpoints +// .iter() +// .nth(self.next_to_restart_idx) +// { +// self.restart(*peer_id, *daemon_endpoint, progress_on_error) +// .await?; - let safenode_rpc_endpoint = inv - .rpc_endpoints - .get(peer_id) - .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; - Some(*safenode_rpc_endpoint) - } else { - warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); - None - } - } - Either::Right(reg) => { - // check if we've reached the end - if loop_over && self.next_to_restart_idx > reg.nodes.len() { - self.next_to_restart_idx = 0; - } +// let safenode_rpc_endpoint = inv +// .rpc_endpoints +// .get(peer_id) +// .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; +// Some(*safenode_rpc_endpoint) +// } else { +// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); +// None +// } +// } +// Either::Right(reg) => { +// // check if we've reached the end +// if loop_over && self.next_to_restart_idx > reg.nodes.len() { +// self.next_to_restart_idx = 0; +// } - if let Some((peer_id, safenode_rpc_endpoint)) = reg - .nodes - .get(self.next_to_restart_idx) - .map(|node| (node.peer_id, node.rpc_socket_addr)) - { - let peer_id = - peer_id.ok_or_eyre("PeerId should be present for a local node")?; - self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) - .await?; - Some(safenode_rpc_endpoint) - } else { - warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); - None - } - } - }; +// if let Some((peer_id, safenode_rpc_endpoint)) = reg +// .nodes +// .get(self.next_to_restart_idx) +// .map(|node| (node.peer_id, node.rpc_socket_addr)) +// { +// let peer_id = +// peer_id.ok_or_eyre("PeerId should be present for a local node")?; +// self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) +// .await?; +// Some(safenode_rpc_endpoint) +// } else { +// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); +// None +// } +// } +// }; - Ok(safenode_rpc_endpoint) - } +// Ok(safenode_rpc_endpoint) +// } - async fn restart( - &mut self, - peer_id: PeerId, - endpoint: SocketAddr, - progress_on_error: bool, - ) -> Result<()> { - match &self.inventory_file { - Either::Left(_inv) => { - match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) - .await - .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { - Ok(_) => { - self.next_to_restart_idx += 1; - }, - Err(err) => { - if progress_on_error { - self.next_to_restart_idx += 1; - } - return Err(err); - }, - } - }, - Either::Right(_reg) => { - match NonDroplet::restart_node(endpoint, self.retain_peer_id).await - .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { - Ok(_) => { - self.next_to_restart_idx += 1; - }, - Err(err) => { - if progress_on_error { - self.next_to_restart_idx += 1; - } - return Err(err); - } - } - } - } - Ok(()) - } +// async fn restart( +// &mut self, +// peer_id: PeerId, +// endpoint: SocketAddr, +// progress_on_error: bool, +// ) -> Result<()> { +// match &self.inventory_file { +// Either::Left(_inv) => { +// match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) +// .await +// .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { +// Ok(_) => { +// self.next_to_restart_idx += 1; +// }, +// Err(err) => { +// if progress_on_error { +// self.next_to_restart_idx += 1; +// } +// return Err(err); +// }, +// } +// }, +// Either::Right(_reg) => { +// match NonDroplet::restart_node(endpoint, self.retain_peer_id).await +// .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { +// Ok(_) => { +// self.next_to_restart_idx += 1; +// }, +// Err(err) => { +// if progress_on_error { +// self.next_to_restart_idx += 1; +// } +// return Err(err); +// } +// } +// } +// } +// Ok(()) +// } - pub fn reset_index(&mut self) { - self.next_to_restart_idx = 0; - } -} +// pub fn reset_index(&mut self) { +// self.next_to_restart_idx = 0; +// } +// } diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 36626b920d..64d014f5dc 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -1,643 +1,643 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use crate::common::client::{add_funds_to_wallet, get_client_and_funded_wallet}; -use assert_fs::TempDir; -use common::{ - client::{get_node_count, get_wallet}, - NodeRestart, -}; -use eyre::{bail, eyre, Result}; -use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_protocol::{ - storage::{ChunkAddress, RegisterAddress, SpendAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; -use std::{ - collections::{BTreeMap, VecDeque}, - fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, - sync::Arc, - time::{Duration, Instant}, -}; -use tempfile::tempdir; -use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; -use tracing::{debug, error, info, trace, warn}; -use xor_name::XorName; - -const EXTRA_CHURN_COUNT: u32 = 5; -const CHURN_CYCLES: u32 = 2; -const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; -const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; -const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; - -const CHUNKS_SIZE: usize = 1024 * 1024; - -const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; -const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; - -// Default total amount of time we run the checks for before reporting the outcome. -// It can be overriden by setting the 'TEST_DURATION_MINS' env var. -const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr - -type ContentList = Arc>>; -type CashNoteMap = Arc>>; - -struct ContentError { - net_addr: NetworkAddress, - attempts: u8, - last_err: Error, -} - -impl fmt::Debug for ContentError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{:?}, attempts: {}, last error: {:?}", - self.net_addr, self.attempts, self.last_err - ) - } -} - -type ContentErredList = Arc>>; - -#[tokio::test(flavor = "multi_thread")] -async fn data_availability_during_churn() -> Result<()> { - let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); - - let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { - Duration::from_secs(60 * str.parse::()?) - } else { - TEST_DURATION - }; - let node_count = get_node_count(); - - let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { - println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); - info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); - let cycles = str.parse::()?; - test_duration / cycles - } else { - // Ensure at least some nodes got churned twice. - test_duration - / std::cmp::max( - CHURN_CYCLES * node_count as u32, - node_count as u32 + EXTRA_CHURN_COUNT, - ) - }; - println!("Nodes will churn every {churn_period:?}"); - info!("Nodes will churn every {churn_period:?}"); - - // Create a cross thread usize for tracking churned nodes - let churn_count = Arc::new(RwLock::new(0_usize)); - - // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. - // Default to be not carry out chunks only during churn. - let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); - - println!( - "Running this test for {test_duration:?}{}...", - if chunks_only { " (Chunks only)" } else { "" } - ); - info!( - "Running this test for {test_duration:?}{}...", - if chunks_only { " (Chunks only)" } else { "" } - ); - - // The testnet will create a `faucet` at last. To avoid mess up with that, - // wait for a while to ensure the spends of that got settled. - sleep(std::time::Duration::from_secs(10)).await; - - info!("Creating a client and paying wallet..."); - let paying_wallet_dir = TempDir::new()?; - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - // Waiting for the paying_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; - - info!( - "Client and paying_wallet created with signing key: {:?}", - client.signer_pk() - ); - - // Shared bucket where we keep track of content created/stored on the network - let content = ContentList::default(); - - // Shared bucket where we keep track of CashNotes created/stored on the network - let cash_notes = CashNoteMap::default(); - - // Spawn a task to create Registers and CashNotes at random locations, - // at a higher frequency than the churning events - if !chunks_only { - info!("Creating transfer wallet taking balance from the payment wallet"); - let transfers_wallet_dir = TempDir::new()?; - let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; - info!("Transfer wallet created"); - - // Waiting for the transfers_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; - - create_registers_task( - client.clone(), - Arc::clone(&content), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - create_cash_note_task( - client.clone(), - transfers_wallet, - Arc::clone(&content), - Arc::clone(&cash_notes), - churn_period, - ); - } - - println!("Uploading some chunks before carry out node churning"); - info!("Uploading some chunks before carry out node churning"); - - // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events - store_chunks_task( - client.clone(), - Arc::clone(&content), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - // Spawn a task to churn nodes - churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); - - // Shared bucket where we keep track of the content which erred when creating/storing/fetching. - // We remove them from this bucket if we are then able to query/fetch them successfully. - // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. - let content_erred = ContentErredList::default(); - - // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. - let failures = ContentErredList::default(); - - // Spawn a task to randomly query/fetch the content we create/store - query_content_task( - client.clone(), - Arc::clone(&content), - Arc::clone(&content_erred), - Arc::clone(&cash_notes), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, - // and mark them as failures if they effectivelly cannot be retrieved. - retry_query_content_task( - client.clone(), - Arc::clone(&content_erred), - Arc::clone(&failures), - Arc::clone(&cash_notes), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - info!("All tasks have been spawned. The test is now running..."); - println!("All tasks have been spawned. The test is now running..."); - - let start_time = Instant::now(); - while start_time.elapsed() < test_duration { - let failed = failures.read().await; - info!( - "Current failures after {:?} ({}): {:?}", - start_time.elapsed(), - failed.len(), - failed.values() - ); - sleep(churn_period).await; - } - - println!(); - println!( - ">>>>>> Test stopping after running for {:?}. <<<<<<", - start_time.elapsed() - ); - println!("{:?} churn events happened.", *churn_count.read().await); - println!(); - - // The churning of storing_chunk/querying_chunk are all random, - // which will have a high chance that newly stored chunk got queried BEFORE - // the original holders churned out. - // i.e. the test may pass even without any replication - // Hence, we carry out a final round of query all data to confirm storage. - println!("Final querying confirmation of content"); - info!("Final querying confirmation of content"); - - // take one read lock to avoid holding the lock for the whole loop - // prevent any late content uploads being added to the list - let content = content.read().await; - let uploaded_content_count = content.len(); - let mut handles = Vec::new(); - for net_addr in content.iter() { - let client = client.clone(); - let net_addr = net_addr.clone(); - let cash_notes = Arc::clone(&cash_notes); - - let failures = Arc::clone(&failures); - let wallet_dir = paying_wallet_dir.to_path_buf().clone(); - let handle = tokio::spawn(async move { - final_retry_query_content( - &client, - &net_addr, - cash_notes, - churn_period, - failures, - &wallet_dir, - ) - .await - }); - handles.push(handle); - } - let results: Vec<_> = futures::future::join_all(handles).await; - - let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); - assert_eq!( - content_queried_count, uploaded_content_count, - "Not all content was queried successfully" - ); - - println!("{content_queried_count:?} pieces of content queried"); - - assert_eq!( - content_queried_count, uploaded_content_count, - "Not all content was queried" - ); - - let failed = failures.read().await; - if failed.len() > 0 { - bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); - } - - println!("Test passed after running for {:?}.", start_time.elapsed()); - Ok(()) -} - -// Spawns a task which periodically creates CashNotes at random locations. -fn create_cash_note_task( - client: Client, - transfers_wallet: HotWallet, - content: ContentList, - cash_notes: CashNoteMap, - churn_period: Duration, -) { - let _handle = tokio::spawn(async move { - // Create CashNote at a higher frequency than the churning events - let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; - - let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); - - loop { - sleep(delay).await; - - let dest_pk = MainSecretKey::random().main_pubkey(); - let cash_note = wallet_client - .send_cash_note(NanoTokens::from(10), dest_pk, true) - .await - .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); - - let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - let net_addr = NetworkAddress::SpendAddress(cash_note_addr); - println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - content.write().await.push_back(net_addr); - let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); - } - }); -} - -// Spawns a task which periodically creates Registers at random locations. -fn create_registers_task( - client: Client, - content: ContentList, - churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - // Create Registers at a higher frequency than the churning events - let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; - - let paying_wallet = get_wallet(&paying_wallet_dir); - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - loop { - let meta = XorName(rand::random()); - let owner = client.signer_pk(); - - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?} in {delay:?}"); - debug!("Creating Register at {addr:?} in {delay:?}"); - sleep(delay).await; - - match client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) - .await - { - Ok(_) => content - .write() - .await - .push_back(NetworkAddress::RegisterAddress(addr)), - Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), - } - } - }); -} - -// Spawns a task which periodically stores Chunks at random locations. -fn store_chunks_task( - client: Client, - content: ContentList, - churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle: JoinHandle> = tokio::spawn(async move { - let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); - let output_dir = temp_dir.path().join("chunk_path"); - create_dir_all(output_dir.clone()) - .expect("failed to create output dir for encrypted chunks"); - - // Store Chunks at a higher frequency than the churning events - let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; - - let mut rng = OsRng; - - loop { - let random_bytes: Vec = ::std::iter::repeat(()) - .map(|()| rng.gen::()) - .take(CHUNKS_SIZE) - .collect(); - let chunk_size = random_bytes.len(); - - let chunk_name = XorName::from_content(&random_bytes); - - let file_path = temp_dir.path().join(hex::encode(chunk_name)); - let mut chunk_file = - File::create(&file_path).expect("failed to create temp chunk file"); - chunk_file - .write_all(&random_bytes) - .expect("failed to write to temp chunk file"); - - let (addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); - - info!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", - chunks.len(), - chunk_size - ); - sleep(delay).await; - - let chunks_len = chunks.len(); - let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - - let cost = match uploader.start_upload().await { - Ok(stats) => stats - .royalty_fees - .checked_add(stats.storage_cost) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?, - Err(err) => { - bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); - } - }; - - println!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); - info!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); - sleep(delay).await; - - for chunk_name in chunks_name { - content - .write() - .await - .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); - } - } - }); -} - -// Spawns a task which periodically queries a content by randomly choosing it from the list -// of content created by another task. -fn query_content_task( - client: Client, - content: ContentList, - content_erred: ContentErredList, - cash_notes: CashNoteMap, - churn_period: Duration, - root_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; - loop { - let len = content.read().await.len(); - if len == 0 { - println!("No content created/stored just yet, let's try in {delay:?} ..."); - info!("No content created/stored just yet, let's try in {delay:?} ..."); - sleep(delay).await; - continue; - } - - // let's choose a random content to query, picking it from the list of created - let index = rand::thread_rng().gen_range(0..len); - let net_addr = content.read().await[index].clone(); - trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); - sleep(delay).await; - - match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { - Ok(_) => { - let _ = content_erred.write().await.remove(&net_addr); - } - Err(last_err) => { - println!( - "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" - ); - error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); - // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. - let _ = content_erred - .write() - .await - .entry(net_addr.clone()) - .and_modify(|curr| curr.attempts += 1) - .or_insert(ContentError { - net_addr, - attempts: 1, - last_err, - }); - } - } - } - }); -} - -// Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. -fn churn_nodes_task( - churn_count: Arc>, - test_duration: Duration, - churn_period: Duration, -) { - let start = Instant::now(); - let _handle: JoinHandle> = tokio::spawn(async move { - let mut node_restart = NodeRestart::new(true, false)?; - - loop { - sleep(churn_period).await; - - // break out if we've run the duration of churn - if start.elapsed() > test_duration { - debug!("Test duration reached, stopping churn nodes task"); - break; - } - - if let Err(err) = node_restart.restart_next(true, true).await { - println!("Failed to restart node {err}"); - info!("Failed to restart node {err}"); - continue; - } - - *churn_count.write().await += 1; - } - Ok(()) - }); -} - -// Checks (periodically) for any content that an error was reported either at the moment of its creation or -// in a later query attempt. -fn retry_query_content_task( - client: Client, - content_erred: ContentErredList, - failures: ContentErredList, - cash_notes: CashNoteMap, - churn_period: Duration, - wallet_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - let delay = 2 * churn_period; - loop { - sleep(delay).await; - - // let's try to query from the bucket of those that erred upon creation/query - let erred = content_erred.write().await.pop_first(); - - if let Some((net_addr, mut content_error)) = erred { - let attempts = content_error.attempts + 1; - - println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); - info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { - println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, - // otherwise report it effectivelly as failure. - content_error.attempts = attempts; - content_error.last_err = last_err; - - if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { - let _ = failures.write().await.insert(net_addr, content_error); - } else { - let _ = content_erred.write().await.insert(net_addr, content_error); - } - } else { - // remove from fails and errs if we had a success and it was added meanwhile perchance - let _ = failures.write().await.remove(&net_addr); - let _ = content_erred.write().await.remove(&net_addr); - } - } - } - }); -} - -async fn final_retry_query_content( - client: &Client, - net_addr: &NetworkAddress, - cash_notes: CashNoteMap, - churn_period: Duration, - failures: ContentErredList, - wallet_dir: &Path, -) -> Result<()> { - let mut attempts = 1; - let net_addr = net_addr.clone(); - loop { - println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); - debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { - if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { - println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - } else { - attempts += 1; - let delay = 2 * churn_period; - debug!("Delaying last check for {delay:?} ..."); - sleep(delay).await; - continue; - } - } else { - failures.write().await.remove(&net_addr); - // content retrieved fine - return Ok(()); - } - } -} - -async fn query_content( - client: &Client, - wallet_dir: &Path, - net_addr: &NetworkAddress, - cash_notes: CashNoteMap, -) -> Result<(), Error> { - match net_addr { - NetworkAddress::SpendAddress(addr) => { - if let Some(cash_note) = cash_notes.read().await.get(addr) { - match client.verify_cashnote(cash_note).await { - Ok(_) => Ok(()), - Err(err) => Err(Error::CouldNotVerifyTransfer(format!( - "Verification of cash_note {addr:?} failed with error: {err:?}" - ))), - } - } else { - Err(Error::CouldNotVerifyTransfer(format!( - "Do not have the CashNote: {addr:?}" - ))) - } - } - NetworkAddress::RegisterAddress(addr) => { - let _ = client.get_register(*addr).await?; - Ok(()) - } - NetworkAddress::ChunkAddress(addr) => { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - let mut file_download = FilesDownload::new(files_api); - let _ = file_download.download_file(*addr, None).await?; - - Ok(()) - } - _other => Ok(()), // we don't create/store any other type of content in this test yet - } -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use crate::common::{ +// client::{add_funds_to_wallet, get_client_and_funded_wallet, get_node_count, get_wallet}, +// NodeRestart, +// }; +// use assert_fs::TempDir; +// use eyre::{bail, eyre, Result}; +// use rand::{rngs::OsRng, Rng}; +// // TODO: Update `autonomi` to have relevant types here +// // use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; +// use sn_logging::LogBuilder; +// use sn_protocol::{ +// storage::{ChunkAddress, RegisterAddress, SpendAddress}, +// NetworkAddress, +// }; +// use sn_registers::Permissions; +// use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; +// use std::{ +// collections::{BTreeMap, VecDeque}, +// fmt, +// fs::{create_dir_all, File}, +// io::Write, +// path::{Path, PathBuf}, +// sync::Arc, +// time::{Duration, Instant}, +// }; +// use tempfile::tempdir; +// use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; +// use tracing::{debug, error, info, trace, warn}; +// use xor_name::XorName; + +// const EXTRA_CHURN_COUNT: u32 = 5; +// const CHURN_CYCLES: u32 = 2; +// const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; +// const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; +// const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; + +// const CHUNKS_SIZE: usize = 1024 * 1024; + +// const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; +// const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; + +// // Default total amount of time we run the checks for before reporting the outcome. +// // It can be overriden by setting the 'TEST_DURATION_MINS' env var. +// const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr + +// type ContentList = Arc>>; +// type CashNoteMap = Arc>>; + +// struct ContentError { +// net_addr: NetworkAddress, +// attempts: u8, +// last_err: Error, +// } + +// impl fmt::Debug for ContentError { +// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +// write!( +// f, +// "{:?}, attempts: {}, last error: {:?}", +// self.net_addr, self.attempts, self.last_err +// ) +// } +// } + +// type ContentErredList = Arc>>; + +// #[tokio::test(flavor = "multi_thread")] +// async fn data_availability_during_churn() -> Result<()> { +// let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); + +// let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { +// Duration::from_secs(60 * str.parse::()?) +// } else { +// TEST_DURATION +// }; +// let node_count = get_node_count(); + +// let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { +// println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); +// info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); +// let cycles = str.parse::()?; +// test_duration / cycles +// } else { +// // Ensure at least some nodes got churned twice. +// test_duration +// / std::cmp::max( +// CHURN_CYCLES * node_count as u32, +// node_count as u32 + EXTRA_CHURN_COUNT, +// ) +// }; +// println!("Nodes will churn every {churn_period:?}"); +// info!("Nodes will churn every {churn_period:?}"); + +// // Create a cross thread usize for tracking churned nodes +// let churn_count = Arc::new(RwLock::new(0_usize)); + +// // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. +// // Default to be not carry out chunks only during churn. +// let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); + +// println!( +// "Running this test for {test_duration:?}{}...", +// if chunks_only { " (Chunks only)" } else { "" } +// ); +// info!( +// "Running this test for {test_duration:?}{}...", +// if chunks_only { " (Chunks only)" } else { "" } +// ); + +// // The testnet will create a `faucet` at last. To avoid mess up with that, +// // wait for a while to ensure the spends of that got settled. +// sleep(std::time::Duration::from_secs(10)).await; + +// info!("Creating a client and paying wallet..."); +// let paying_wallet_dir = TempDir::new()?; +// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// // Waiting for the paying_wallet funded. +// sleep(std::time::Duration::from_secs(10)).await; + +// info!( +// "Client and paying_wallet created with signing key: {:?}", +// client.signer_pk() +// ); + +// // Shared bucket where we keep track of content created/stored on the network +// let content = ContentList::default(); + +// // Shared bucket where we keep track of CashNotes created/stored on the network +// let cash_notes = CashNoteMap::default(); + +// // Spawn a task to create Registers and CashNotes at random locations, +// // at a higher frequency than the churning events +// if !chunks_only { +// info!("Creating transfer wallet taking balance from the payment wallet"); +// let transfers_wallet_dir = TempDir::new()?; +// let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; +// info!("Transfer wallet created"); + +// // Waiting for the transfers_wallet funded. +// sleep(std::time::Duration::from_secs(10)).await; + +// create_registers_task( +// client.clone(), +// Arc::clone(&content), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// create_cash_note_task( +// client.clone(), +// transfers_wallet, +// Arc::clone(&content), +// Arc::clone(&cash_notes), +// churn_period, +// ); +// } + +// println!("Uploading some chunks before carry out node churning"); +// info!("Uploading some chunks before carry out node churning"); + +// // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events +// store_chunks_task( +// client.clone(), +// Arc::clone(&content), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// // Spawn a task to churn nodes +// churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); + +// // Shared bucket where we keep track of the content which erred when creating/storing/fetching. +// // We remove them from this bucket if we are then able to query/fetch them successfully. +// // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. +// let content_erred = ContentErredList::default(); + +// // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. +// let failures = ContentErredList::default(); + +// // Spawn a task to randomly query/fetch the content we create/store +// query_content_task( +// client.clone(), +// Arc::clone(&content), +// Arc::clone(&content_erred), +// Arc::clone(&cash_notes), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, +// // and mark them as failures if they effectivelly cannot be retrieved. +// retry_query_content_task( +// client.clone(), +// Arc::clone(&content_erred), +// Arc::clone(&failures), +// Arc::clone(&cash_notes), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// info!("All tasks have been spawned. The test is now running..."); +// println!("All tasks have been spawned. The test is now running..."); + +// let start_time = Instant::now(); +// while start_time.elapsed() < test_duration { +// let failed = failures.read().await; +// info!( +// "Current failures after {:?} ({}): {:?}", +// start_time.elapsed(), +// failed.len(), +// failed.values() +// ); +// sleep(churn_period).await; +// } + +// println!(); +// println!( +// ">>>>>> Test stopping after running for {:?}. <<<<<<", +// start_time.elapsed() +// ); +// println!("{:?} churn events happened.", *churn_count.read().await); +// println!(); + +// // The churning of storing_chunk/querying_chunk are all random, +// // which will have a high chance that newly stored chunk got queried BEFORE +// // the original holders churned out. +// // i.e. the test may pass even without any replication +// // Hence, we carry out a final round of query all data to confirm storage. +// println!("Final querying confirmation of content"); +// info!("Final querying confirmation of content"); + +// // take one read lock to avoid holding the lock for the whole loop +// // prevent any late content uploads being added to the list +// let content = content.read().await; +// let uploaded_content_count = content.len(); +// let mut handles = Vec::new(); +// for net_addr in content.iter() { +// let client = client.clone(); +// let net_addr = net_addr.clone(); +// let cash_notes = Arc::clone(&cash_notes); + +// let failures = Arc::clone(&failures); +// let wallet_dir = paying_wallet_dir.to_path_buf().clone(); +// let handle = tokio::spawn(async move { +// final_retry_query_content( +// &client, +// &net_addr, +// cash_notes, +// churn_period, +// failures, +// &wallet_dir, +// ) +// .await +// }); +// handles.push(handle); +// } +// let results: Vec<_> = futures::future::join_all(handles).await; + +// let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); +// assert_eq!( +// content_queried_count, uploaded_content_count, +// "Not all content was queried successfully" +// ); + +// println!("{content_queried_count:?} pieces of content queried"); + +// assert_eq!( +// content_queried_count, uploaded_content_count, +// "Not all content was queried" +// ); + +// let failed = failures.read().await; +// if failed.len() > 0 { +// bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); +// } + +// println!("Test passed after running for {:?}.", start_time.elapsed()); +// Ok(()) +// } + +// // Spawns a task which periodically creates CashNotes at random locations. +// fn create_cash_note_task( +// client: Client, +// transfers_wallet: HotWallet, +// content: ContentList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// ) { +// let _handle = tokio::spawn(async move { +// // Create CashNote at a higher frequency than the churning events +// let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; + +// let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); + +// loop { +// sleep(delay).await; + +// let dest_pk = MainSecretKey::random().main_pubkey(); +// let cash_note = wallet_client +// .send_cash_note(NanoTokens::from(10), dest_pk, true) +// .await +// .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); + +// let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); +// let net_addr = NetworkAddress::SpendAddress(cash_note_addr); +// println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); +// debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); +// content.write().await.push_back(net_addr); +// let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); +// } +// }); +// } + +// // Spawns a task which periodically creates Registers at random locations. +// fn create_registers_task( +// client: Client, +// content: ContentList, +// churn_period: Duration, +// paying_wallet_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// // Create Registers at a higher frequency than the churning events +// let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; + +// let paying_wallet = get_wallet(&paying_wallet_dir); + +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// loop { +// let meta = XorName(rand::random()); +// let owner = client.signer_pk(); + +// let addr = RegisterAddress::new(meta, owner); +// println!("Creating Register at {addr:?} in {delay:?}"); +// debug!("Creating Register at {addr:?} in {delay:?}"); +// sleep(delay).await; + +// match client +// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) +// .await +// { +// Ok(_) => content +// .write() +// .await +// .push_back(NetworkAddress::RegisterAddress(addr)), +// Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), +// } +// } +// }); +// } + +// // Spawns a task which periodically stores Chunks at random locations. +// fn store_chunks_task( +// client: Client, +// content: ContentList, +// churn_period: Duration, +// paying_wallet_dir: PathBuf, +// ) { +// let _handle: JoinHandle> = tokio::spawn(async move { +// let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); +// let output_dir = temp_dir.path().join("chunk_path"); +// create_dir_all(output_dir.clone()) +// .expect("failed to create output dir for encrypted chunks"); + +// // Store Chunks at a higher frequency than the churning events +// let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; + +// let mut rng = OsRng; + +// loop { +// let random_bytes: Vec = ::std::iter::repeat(()) +// .map(|()| rng.gen::()) +// .take(CHUNKS_SIZE) +// .collect(); +// let chunk_size = random_bytes.len(); + +// let chunk_name = XorName::from_content(&random_bytes); + +// let file_path = temp_dir.path().join(hex::encode(chunk_name)); +// let mut chunk_file = +// File::create(&file_path).expect("failed to create temp chunk file"); +// chunk_file +// .write_all(&random_bytes) +// .expect("failed to write to temp chunk file"); + +// let (addr, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); + +// info!( +// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", +// chunks.len(), +// chunk_size +// ); +// sleep(delay).await; + +// let chunks_len = chunks.len(); +// let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); + +// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); +// uploader.set_show_holders(true); +// uploader.insert_chunk_paths(chunks); + +// let cost = match uploader.start_upload().await { +// Ok(stats) => stats +// .royalty_fees +// .checked_add(stats.storage_cost) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?, +// Err(err) => { +// bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); +// } +// }; + +// println!( +// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" +// ); +// info!( +// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" +// ); +// sleep(delay).await; + +// for chunk_name in chunks_name { +// content +// .write() +// .await +// .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); +// } +// } +// }); +// } + +// // Spawns a task which periodically queries a content by randomly choosing it from the list +// // of content created by another task. +// fn query_content_task( +// client: Client, +// content: ContentList, +// content_erred: ContentErredList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// root_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; +// loop { +// let len = content.read().await.len(); +// if len == 0 { +// println!("No content created/stored just yet, let's try in {delay:?} ..."); +// info!("No content created/stored just yet, let's try in {delay:?} ..."); +// sleep(delay).await; +// continue; +// } + +// // let's choose a random content to query, picking it from the list of created +// let index = rand::thread_rng().gen_range(0..len); +// let net_addr = content.read().await[index].clone(); +// trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); +// sleep(delay).await; + +// match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { +// Ok(_) => { +// let _ = content_erred.write().await.remove(&net_addr); +// } +// Err(last_err) => { +// println!( +// "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" +// ); +// error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); +// // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. +// let _ = content_erred +// .write() +// .await +// .entry(net_addr.clone()) +// .and_modify(|curr| curr.attempts += 1) +// .or_insert(ContentError { +// net_addr, +// attempts: 1, +// last_err, +// }); +// } +// } +// } +// }); +// } + +// // Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. +// fn churn_nodes_task( +// churn_count: Arc>, +// test_duration: Duration, +// churn_period: Duration, +// ) { +// let start = Instant::now(); +// let _handle: JoinHandle> = tokio::spawn(async move { +// let mut node_restart = NodeRestart::new(true, false)?; + +// loop { +// sleep(churn_period).await; + +// // break out if we've run the duration of churn +// if start.elapsed() > test_duration { +// debug!("Test duration reached, stopping churn nodes task"); +// break; +// } + +// if let Err(err) = node_restart.restart_next(true, true).await { +// println!("Failed to restart node {err}"); +// info!("Failed to restart node {err}"); +// continue; +// } + +// *churn_count.write().await += 1; +// } +// Ok(()) +// }); +// } + +// // Checks (periodically) for any content that an error was reported either at the moment of its creation or +// // in a later query attempt. +// fn retry_query_content_task( +// client: Client, +// content_erred: ContentErredList, +// failures: ContentErredList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// wallet_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// let delay = 2 * churn_period; +// loop { +// sleep(delay).await; + +// // let's try to query from the bucket of those that erred upon creation/query +// let erred = content_erred.write().await.pop_first(); + +// if let Some((net_addr, mut content_error)) = erred { +// let attempts = content_error.attempts + 1; + +// println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); +// info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); +// if let Err(last_err) = +// query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await +// { +// println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, +// // otherwise report it effectivelly as failure. +// content_error.attempts = attempts; +// content_error.last_err = last_err; + +// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { +// let _ = failures.write().await.insert(net_addr, content_error); +// } else { +// let _ = content_erred.write().await.insert(net_addr, content_error); +// } +// } else { +// // remove from fails and errs if we had a success and it was added meanwhile perchance +// let _ = failures.write().await.remove(&net_addr); +// let _ = content_erred.write().await.remove(&net_addr); +// } +// } +// } +// }); +// } + +// async fn final_retry_query_content( +// client: &Client, +// net_addr: &NetworkAddress, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// failures: ContentErredList, +// wallet_dir: &Path, +// ) -> Result<()> { +// let mut attempts = 1; +// let net_addr = net_addr.clone(); +// loop { +// println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); +// debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); +// if let Err(last_err) = +// query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await +// { +// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { +// println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// } else { +// attempts += 1; +// let delay = 2 * churn_period; +// debug!("Delaying last check for {delay:?} ..."); +// sleep(delay).await; +// continue; +// } +// } else { +// failures.write().await.remove(&net_addr); +// // content retrieved fine +// return Ok(()); +// } +// } +// } + +// async fn query_content( +// client: &Client, +// wallet_dir: &Path, +// net_addr: &NetworkAddress, +// cash_notes: CashNoteMap, +// ) -> Result<(), Error> { +// match net_addr { +// NetworkAddress::SpendAddress(addr) => { +// if let Some(cash_note) = cash_notes.read().await.get(addr) { +// match client.verify_cashnote(cash_note).await { +// Ok(_) => Ok(()), +// Err(err) => Err(Error::CouldNotVerifyTransfer(format!( +// "Verification of cash_note {addr:?} failed with error: {err:?}" +// ))), +// } +// } else { +// Err(Error::CouldNotVerifyTransfer(format!( +// "Do not have the CashNote: {addr:?}" +// ))) +// } +// } +// NetworkAddress::RegisterAddress(addr) => { +// let _ = client.get_register(*addr).await?; +// Ok(()) +// } +// NetworkAddress::ChunkAddress(addr) => { +// let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); +// let mut file_download = FilesDownload::new(files_api); +// let _ = file_download.download_file(*addr, None).await?; + +// Ok(()) +// } +// _other => Ok(()), // we don't create/store any other type of content in this test yet +// } +// } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 3abf477b18..d81cc8a8d6 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -1,427 +1,428 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#![allow(clippy::mutable_key_type)] -mod common; - -use crate::common::{ - client::{get_all_rpc_addresses, get_client_and_funded_wallet}, - get_all_peer_ids, get_safenode_rpc_client, NodeRestart, -}; -use assert_fs::TempDir; -use common::client::get_wallet; -use eyre::{eyre, Result}; -use libp2p::{ - kad::{KBucketKey, RecordKey}, - PeerId, -}; -use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, FilesApi, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_networking::sort_peers_by_key; -use sn_protocol::{ - safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -}; -use sn_registers::{Permissions, RegisterAddress}; -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - fs::File, - io::Write, - net::SocketAddr, - path::PathBuf, - time::{Duration, Instant}, -}; -use tonic::Request; -use tracing::{debug, error, info}; -use xor_name::XorName; - -const CHUNK_SIZE: usize = 1024; - -// VERIFICATION_DELAY is set based on the dead peer detection interval -// Once a node has been restarted, it takes VERIFICATION_DELAY time -// for the old peer to be removed from the routing table. -// Replication is then kicked off to distribute the data to the new closest -// nodes, hence verification has to be performed after this. -const VERIFICATION_DELAY: Duration = Duration::from_secs(60); - -/// Number of times to retry verification if it fails -const VERIFICATION_ATTEMPTS: usize = 5; - -/// Length of time to wait before re-verifying the data location -const REVERIFICATION_DELAY: Duration = - Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); - -// Default number of churns that should be performed. After each churn, we -// wait for VERIFICATION_DELAY time before verifying the data location. -// It can be overridden by setting the 'CHURN_COUNT' env var. -const CHURN_COUNT: u8 = 20; - -/// Default number of chunks that should be PUT to the network. -/// It can be overridden by setting the 'CHUNK_COUNT' env var. -const CHUNK_COUNT: usize = 5; -/// Default number of registers that should be PUT to the network. -/// It can be overridden by setting the 'REGISTER_COUNT' env var. -const REGISTER_COUNT: usize = 5; - -type NodeIndex = usize; -type RecordHolders = HashMap>; - -#[tokio::test(flavor = "multi_thread")] -async fn verify_data_location() -> Result<()> { - let _log_appender_guard = - LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); - - let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { - str.parse::()? - } else { - CHURN_COUNT - }; - let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { - str.parse::()? - } else { - CHUNK_COUNT - }; - let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { - str.parse::()? - } else { - REGISTER_COUNT - }; - println!( - "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", - VERIFICATION_DELAY*churn_count as u32 - ); - info!( - "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", - VERIFICATION_DELAY*churn_count as u32 - ); - let node_rpc_address = get_all_rpc_addresses(true)?; - let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; - - // Store chunks - println!("Creating a client and paying wallet..."); - debug!("Creating a client and paying wallet..."); - - let paying_wallet_dir = TempDir::new()?; - - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; - store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; - - // Verify data location initially - verify_location(&all_peers, &node_rpc_address).await?; - - // Churn nodes and verify the location of the data after VERIFICATION_DELAY - let mut current_churn_count = 0; - - let mut node_restart = NodeRestart::new(true, false)?; - let mut node_index = 0; - 'main: loop { - if current_churn_count >= churn_count { - break 'main Ok(()); - } - current_churn_count += 1; - - let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { - None => { - // we have reached the end. - break 'main Ok(()); - } - Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, - }; - - // wait for the dead peer to be removed from the RT and the replication flow to finish - println!( - "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" - ); - info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); - tokio::time::sleep(VERIFICATION_DELAY).await; - - // get the new PeerId for the current NodeIndex - let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; - - let response = rpc_client - .node_info(Request::new(NodeInfoRequest {})) - .await?; - let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; - // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses - // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. - // todo: make this more cleaner. - if all_peers[node_index] == new_peer_id { - println!("new and old peer id are the same {new_peer_id:?}"); - return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); - } - all_peers[node_index] = new_peer_id; - node_index += 1; - - print_node_close_groups(&all_peers); - - verify_location(&all_peers, &node_rpc_address).await?; - } -} - -fn print_node_close_groups(all_peers: &[PeerId]) { - let all_peers = all_peers.to_vec(); - info!("\nNode close groups:"); - - for (node_index, peer) in all_peers.iter().enumerate() { - let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = - sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); - let closest_peers_idx = closest_peers - .iter() - .map(|&&peer| { - all_peers - .iter() - .position(|&p| p == peer) - .expect("peer to be in iterator") - }) - .collect::>(); - info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); - } -} - -async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { - let mut record_holders = RecordHolders::default(); - - for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { - let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; - - let records_response = rpc_client - .record_addresses(Request::new(RecordAddressesRequest {})) - .await?; - - for bytes in records_response.get_ref().addresses.iter() { - let key = RecordKey::from(bytes.clone()); - let holders = record_holders.entry(key).or_insert(HashSet::new()); - holders.insert(node_index); - } - } - debug!("Obtained the current set of Record Key holders"); - Ok(record_holders) -} - -// Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey -// It has a retry loop built in. -async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { - let mut failed = HashMap::new(); - - println!("*********************************************"); - println!("Verifying data across all peers {all_peers:?}"); - info!("*********************************************"); - info!("Verifying data across all peers {all_peers:?}"); - - let mut verification_attempts = 0; - while verification_attempts < VERIFICATION_ATTEMPTS { - failed.clear(); - let record_holders = get_records_and_holders(node_rpc_addresses).await?; - for (key, actual_holders_idx) in record_holders.iter() { - println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_key = KBucketKey::from(key.to_vec()); - let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); - - let actual_holders = actual_holders_idx - .iter() - .map(|i| all_peers[*i]) - .collect::>(); - - info!( - "Expected to be held by {:?} nodes: {expected_holders:?}", - expected_holders.len() - ); - info!( - "Actually held by {:?} nodes : {actual_holders:?}", - actual_holders.len() - ); - - if actual_holders != expected_holders { - // print any expect holders that are not in actual holders - let mut missing_peers = Vec::new(); - expected_holders - .iter() - .filter(|expected| !actual_holders.contains(expected)) - .for_each(|expected| missing_peers.push(*expected)); - - if !missing_peers.is_empty() { - error!( - "Record {:?} is not stored by {missing_peers:?}", - PrettyPrintRecordKey::from(key), - ); - println!( - "Record {:?} is not stored by {missing_peers:?}", - PrettyPrintRecordKey::from(key), - ); - } - } - - let mut failed_peers = Vec::new(); - expected_holders - .iter() - .filter(|expected| !actual_holders.contains(expected)) - .for_each(|expected| failed_peers.push(*expected)); - - if !failed_peers.is_empty() { - failed.insert(key.clone(), failed_peers); - } - } - - if !failed.is_empty() { - error!("Verification failed for {:?} entries", failed.len()); - println!("Verification failed for {:?} entries", failed.len()); - - failed.iter().for_each(|(key, failed_peers)| { - let key_addr = NetworkAddress::from_record_key(key); - let pretty_key = PrettyPrintRecordKey::from(key); - failed_peers.iter().for_each(|peer| { - let peer_addr = NetworkAddress::from_peer(*peer); - let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); - println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); - error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); - }); - }); - info!("State of each node:"); - record_holders.iter().for_each(|(key, node_index)| { - info!( - "Record {:?} is currently held by node indices {node_index:?}", - PrettyPrintRecordKey::from(key) - ); - }); - info!("Node index map:"); - all_peers - .iter() - .enumerate() - .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); - verification_attempts += 1; - println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); - info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); - if verification_attempts < VERIFICATION_ATTEMPTS { - tokio::time::sleep(REVERIFICATION_DELAY).await; - } - } else { - // if successful, break out of the loop - break; - } - } - - if !failed.is_empty() { - println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); - error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); - Err(eyre!("Verification failed for: {failed:?}")) - } else { - println!("All the Records have been verified!"); - info!("All the Records have been verified!"); - Ok(()) - } -} - -// Generate random Chunks and store them to the Network -async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { - let start = Instant::now(); - let mut rng = OsRng; - - let mut uploaded_chunks_count = 0; - loop { - if uploaded_chunks_count >= chunk_count { - break; - } - - let chunks_dir = TempDir::new()?; - - let random_bytes: Vec = ::std::iter::repeat(()) - .map(|()| rng.gen::()) - .take(CHUNK_SIZE) - .collect(); - - let file_path = chunks_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_bytes)?; - - let (head_chunk_addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; - - debug!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", - chunks.len(), - random_bytes.len() - ); - - let key = - PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); - - let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.set_verify_store(false); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; - - uploaded_chunks_count += 1; - - println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); - info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); - } - - println!( - "{chunk_count:?} Chunks were stored in {:?}", - start.elapsed() - ); - info!( - "{chunk_count:?} Chunks were stored in {:?}", - start.elapsed() - ); - - // to make sure the last chunk was stored - tokio::time::sleep(Duration::from_secs(10)).await; - - Ok(()) -} - -async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { - let start = Instant::now(); - let paying_wallet = get_wallet(&wallet_dir); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let mut uploaded_registers_count = 0; - loop { - if uploaded_registers_count >= register_count { - break; - } - let meta = XorName(rand::random()); - let owner = client.signer_pk(); - - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?}"); - debug!("Creating Register at {addr:?}"); - - let (mut register, ..) = client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) - .await?; - - println!("Editing Register at {addr:?}"); - debug!("Editing Register at {addr:?}"); - register.write_online("entry".as_bytes(), true).await?; - - uploaded_registers_count += 1; - } - println!( - "{register_count:?} Registers were stored in {:?}", - start.elapsed() - ); - info!( - "{register_count:?} Registers were stored in {:?}", - start.elapsed() - ); - - // to make sure the last register was stored - tokio::time::sleep(Duration::from_secs(10)).await; - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// #![allow(clippy::mutable_key_type)] +// mod common; + +// use crate::common::{ +// client::{get_all_rpc_addresses, get_client_and_funded_wallet}, +// get_all_peer_ids, get_safenode_rpc_client, NodeRestart, +// }; +// use assert_fs::TempDir; +// use common::client::get_wallet; +// use eyre::{eyre, Result}; +// use libp2p::{ +// kad::{KBucketKey, RecordKey}, +// PeerId, +// }; +// use rand::{rngs::OsRng, Rng}; +// // TODO: update autonomi API here +// // use sn_client::{Client, FilesApi, Uploader, WalletClient}; +// use sn_logging::LogBuilder; +// use sn_networking::sort_peers_by_key; +// use sn_protocol::{ +// safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, +// NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, +// }; +// use sn_registers::{Permissions, RegisterAddress}; +// use std::{ +// collections::{BTreeSet, HashMap, HashSet}, +// fs::File, +// io::Write, +// net::SocketAddr, +// path::PathBuf, +// time::{Duration, Instant}, +// }; +// use tonic::Request; +// use tracing::{debug, error, info}; +// use xor_name::XorName; + +// const CHUNK_SIZE: usize = 1024; + +// // VERIFICATION_DELAY is set based on the dead peer detection interval +// // Once a node has been restarted, it takes VERIFICATION_DELAY time +// // for the old peer to be removed from the routing table. +// // Replication is then kicked off to distribute the data to the new closest +// // nodes, hence verification has to be performed after this. +// const VERIFICATION_DELAY: Duration = Duration::from_secs(60); + +// /// Number of times to retry verification if it fails +// const VERIFICATION_ATTEMPTS: usize = 5; + +// /// Length of time to wait before re-verifying the data location +// const REVERIFICATION_DELAY: Duration = +// Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); + +// // Default number of churns that should be performed. After each churn, we +// // wait for VERIFICATION_DELAY time before verifying the data location. +// // It can be overridden by setting the 'CHURN_COUNT' env var. +// const CHURN_COUNT: u8 = 20; + +// /// Default number of chunks that should be PUT to the network. +// /// It can be overridden by setting the 'CHUNK_COUNT' env var. +// const CHUNK_COUNT: usize = 5; +// /// Default number of registers that should be PUT to the network. +// /// It can be overridden by setting the 'REGISTER_COUNT' env var. +// const REGISTER_COUNT: usize = 5; + +// type NodeIndex = usize; +// type RecordHolders = HashMap>; + +// #[tokio::test(flavor = "multi_thread")] +// async fn verify_data_location() -> Result<()> { +// let _log_appender_guard = +// LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); + +// let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { +// str.parse::()? +// } else { +// CHURN_COUNT +// }; +// let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { +// str.parse::()? +// } else { +// CHUNK_COUNT +// }; +// let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { +// str.parse::()? +// } else { +// REGISTER_COUNT +// }; +// println!( +// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", +// VERIFICATION_DELAY*churn_count as u32 +// ); +// info!( +// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", +// VERIFICATION_DELAY*churn_count as u32 +// ); +// let node_rpc_address = get_all_rpc_addresses(true)?; +// let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; + +// // Store chunks +// println!("Creating a client and paying wallet..."); +// debug!("Creating a client and paying wallet..."); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; +// store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; + +// // Verify data location initially +// verify_location(&all_peers, &node_rpc_address).await?; + +// // Churn nodes and verify the location of the data after VERIFICATION_DELAY +// let mut current_churn_count = 0; + +// let mut node_restart = NodeRestart::new(true, false)?; +// let mut node_index = 0; +// 'main: loop { +// if current_churn_count >= churn_count { +// break 'main Ok(()); +// } +// current_churn_count += 1; + +// let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { +// None => { +// // we have reached the end. +// break 'main Ok(()); +// } +// Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, +// }; + +// // wait for the dead peer to be removed from the RT and the replication flow to finish +// println!( +// "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" +// ); +// info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); +// tokio::time::sleep(VERIFICATION_DELAY).await; + +// // get the new PeerId for the current NodeIndex +// let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; + +// let response = rpc_client +// .node_info(Request::new(NodeInfoRequest {})) +// .await?; +// let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; +// // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses +// // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. +// // todo: make this more cleaner. +// if all_peers[node_index] == new_peer_id { +// println!("new and old peer id are the same {new_peer_id:?}"); +// return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); +// } +// all_peers[node_index] = new_peer_id; +// node_index += 1; + +// print_node_close_groups(&all_peers); + +// verify_location(&all_peers, &node_rpc_address).await?; +// } +// } + +// fn print_node_close_groups(all_peers: &[PeerId]) { +// let all_peers = all_peers.to_vec(); +// info!("\nNode close groups:"); + +// for (node_index, peer) in all_peers.iter().enumerate() { +// let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); +// let closest_peers = +// sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); +// let closest_peers_idx = closest_peers +// .iter() +// .map(|&&peer| { +// all_peers +// .iter() +// .position(|&p| p == peer) +// .expect("peer to be in iterator") +// }) +// .collect::>(); +// info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); +// } +// } + +// async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { +// let mut record_holders = RecordHolders::default(); + +// for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { +// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; + +// let records_response = rpc_client +// .record_addresses(Request::new(RecordAddressesRequest {})) +// .await?; + +// for bytes in records_response.get_ref().addresses.iter() { +// let key = RecordKey::from(bytes.clone()); +// let holders = record_holders.entry(key).or_insert(HashSet::new()); +// holders.insert(node_index); +// } +// } +// debug!("Obtained the current set of Record Key holders"); +// Ok(record_holders) +// } + +// // Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey +// // It has a retry loop built in. +// async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { +// let mut failed = HashMap::new(); + +// println!("*********************************************"); +// println!("Verifying data across all peers {all_peers:?}"); +// info!("*********************************************"); +// info!("Verifying data across all peers {all_peers:?}"); + +// let mut verification_attempts = 0; +// while verification_attempts < VERIFICATION_ATTEMPTS { +// failed.clear(); +// let record_holders = get_records_and_holders(node_rpc_addresses).await?; +// for (key, actual_holders_idx) in record_holders.iter() { +// println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); +// info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); +// let record_key = KBucketKey::from(key.to_vec()); +// let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? +// .into_iter() +// .cloned() +// .collect::>(); + +// let actual_holders = actual_holders_idx +// .iter() +// .map(|i| all_peers[*i]) +// .collect::>(); + +// info!( +// "Expected to be held by {:?} nodes: {expected_holders:?}", +// expected_holders.len() +// ); +// info!( +// "Actually held by {:?} nodes : {actual_holders:?}", +// actual_holders.len() +// ); + +// if actual_holders != expected_holders { +// // print any expect holders that are not in actual holders +// let mut missing_peers = Vec::new(); +// expected_holders +// .iter() +// .filter(|expected| !actual_holders.contains(expected)) +// .for_each(|expected| missing_peers.push(*expected)); + +// if !missing_peers.is_empty() { +// error!( +// "Record {:?} is not stored by {missing_peers:?}", +// PrettyPrintRecordKey::from(key), +// ); +// println!( +// "Record {:?} is not stored by {missing_peers:?}", +// PrettyPrintRecordKey::from(key), +// ); +// } +// } + +// let mut failed_peers = Vec::new(); +// expected_holders +// .iter() +// .filter(|expected| !actual_holders.contains(expected)) +// .for_each(|expected| failed_peers.push(*expected)); + +// if !failed_peers.is_empty() { +// failed.insert(key.clone(), failed_peers); +// } +// } + +// if !failed.is_empty() { +// error!("Verification failed for {:?} entries", failed.len()); +// println!("Verification failed for {:?} entries", failed.len()); + +// failed.iter().for_each(|(key, failed_peers)| { +// let key_addr = NetworkAddress::from_record_key(key); +// let pretty_key = PrettyPrintRecordKey::from(key); +// failed_peers.iter().for_each(|peer| { +// let peer_addr = NetworkAddress::from_peer(*peer); +// let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); +// println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); +// error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); +// }); +// }); +// info!("State of each node:"); +// record_holders.iter().for_each(|(key, node_index)| { +// info!( +// "Record {:?} is currently held by node indices {node_index:?}", +// PrettyPrintRecordKey::from(key) +// ); +// }); +// info!("Node index map:"); +// all_peers +// .iter() +// .enumerate() +// .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); +// verification_attempts += 1; +// println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); +// info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); +// if verification_attempts < VERIFICATION_ATTEMPTS { +// tokio::time::sleep(REVERIFICATION_DELAY).await; +// } +// } else { +// // if successful, break out of the loop +// break; +// } +// } + +// if !failed.is_empty() { +// println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); +// error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); +// Err(eyre!("Verification failed for: {failed:?}")) +// } else { +// println!("All the Records have been verified!"); +// info!("All the Records have been verified!"); +// Ok(()) +// } +// } + +// // Generate random Chunks and store them to the Network +// async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { +// let start = Instant::now(); +// let mut rng = OsRng; + +// let mut uploaded_chunks_count = 0; +// loop { +// if uploaded_chunks_count >= chunk_count { +// break; +// } + +// let chunks_dir = TempDir::new()?; + +// let random_bytes: Vec = ::std::iter::repeat(()) +// .map(|()| rng.gen::()) +// .take(CHUNK_SIZE) +// .collect(); + +// let file_path = chunks_dir.join("random_content"); +// let mut output_file = File::create(file_path.clone())?; +// output_file.write_all(&random_bytes)?; + +// let (head_chunk_addr, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; + +// debug!( +// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", +// chunks.len(), +// random_bytes.len() +// ); + +// let key = +// PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); + +// let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); +// uploader.set_show_holders(true); +// uploader.set_verify_store(false); +// uploader.insert_chunk_paths(chunks); +// let _upload_stats = uploader.start_upload().await?; + +// uploaded_chunks_count += 1; + +// println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); +// info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); +// } + +// println!( +// "{chunk_count:?} Chunks were stored in {:?}", +// start.elapsed() +// ); +// info!( +// "{chunk_count:?} Chunks were stored in {:?}", +// start.elapsed() +// ); + +// // to make sure the last chunk was stored +// tokio::time::sleep(Duration::from_secs(10)).await; + +// Ok(()) +// } + +// async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { +// let start = Instant::now(); +// let paying_wallet = get_wallet(&wallet_dir); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let mut uploaded_registers_count = 0; +// loop { +// if uploaded_registers_count >= register_count { +// break; +// } +// let meta = XorName(rand::random()); +// let owner = client.signer_pk(); + +// let addr = RegisterAddress::new(meta, owner); +// println!("Creating Register at {addr:?}"); +// debug!("Creating Register at {addr:?}"); + +// let (mut register, ..) = client +// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) +// .await?; + +// println!("Editing Register at {addr:?}"); +// debug!("Editing Register at {addr:?}"); +// register.write_online("entry".as_bytes(), true).await?; + +// uploaded_registers_count += 1; +// } +// println!( +// "{register_count:?} Registers were stored in {:?}", +// start.elapsed() +// ); +// info!( +// "{register_count:?} Registers were stored in {:?}", +// start.elapsed() +// ); + +// // to make sure the last register was stored +// tokio::time::sleep(Duration::from_secs(10)).await; +// Ok(()) +// } diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index da19270b69..8f01c1a24a 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -1,114 +1,114 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. -#![allow(clippy::mutable_key_type)] -mod common; +// #![allow(clippy::mutable_key_type)] +// mod common; -use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; -use color_eyre::Result; -use libp2p::{ - kad::{KBucketKey, K_VALUE}, - PeerId, -}; -use sn_logging::LogBuilder; -use sn_protocol::safenode_proto::KBucketsRequest; -use std::{ - collections::{BTreeMap, HashSet}, - time::Duration, -}; -use tonic::Request; -use tracing::{error, info, trace}; +// use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; +// use color_eyre::Result; +// use libp2p::{ +// kad::{KBucketKey, K_VALUE}, +// PeerId, +// }; +// use sn_logging::LogBuilder; +// use sn_protocol::safenode_proto::KBucketsRequest; +// use std::{ +// collections::{BTreeMap, HashSet}, +// time::Duration, +// }; +// use tonic::Request; +// use tracing::{error, info, trace}; -/// Sleep for sometime for the nodes for discover each other before verification -/// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +// /// Sleep for sometime for the nodes for discover each other before verification +// /// Also can be set through the env variable of the same name. +// const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); -#[tokio::test(flavor = "multi_thread")] -async fn verify_routing_table() -> Result<()> { - let _log_appender_guard = - LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); +// #[tokio::test(flavor = "multi_thread")] +// async fn verify_routing_table() -> Result<()> { +// let _log_appender_guard = +// LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); - let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") - .map(|value| { - value - .parse::() - .expect("Failed to prase sleep value into u64") - }) - .map(Duration::from_secs) - .unwrap_or(SLEEP_BEFORE_VERIFICATION); - info!("Sleeping for {sleep_duration:?} before verification"); - tokio::time::sleep(sleep_duration).await; +// let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") +// .map(|value| { +// value +// .parse::() +// .expect("Failed to prase sleep value into u64") +// }) +// .map(Duration::from_secs) +// .unwrap_or(SLEEP_BEFORE_VERIFICATION); +// info!("Sleeping for {sleep_duration:?} before verification"); +// tokio::time::sleep(sleep_duration).await; - let node_rpc_address = get_all_rpc_addresses(false)?; +// let node_rpc_address = get_all_rpc_addresses(false)?; - let all_peers = get_all_peer_ids(&node_rpc_address).await?; - trace!("All peers: {all_peers:?}"); - let mut all_failed_list = BTreeMap::new(); +// let all_peers = get_all_peer_ids(&node_rpc_address).await?; +// trace!("All peers: {all_peers:?}"); +// let mut all_failed_list = BTreeMap::new(); - for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { - let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; +// for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { +// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; - let response = rpc_client - .k_buckets(Request::new(KBucketsRequest {})) - .await?; +// let response = rpc_client +// .k_buckets(Request::new(KBucketsRequest {})) +// .await?; - let k_buckets = response.get_ref().kbuckets.clone(); - let k_buckets = k_buckets - .into_iter() - .map(|(ilog2, peers)| { - let peers = peers - .peers - .into_iter() - .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) - .collect::>(); - (ilog2, peers) - }) - .collect::>(); +// let k_buckets = response.get_ref().kbuckets.clone(); +// let k_buckets = k_buckets +// .into_iter() +// .map(|(ilog2, peers)| { +// let peers = peers +// .peers +// .into_iter() +// .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) +// .collect::>(); +// (ilog2, peers) +// }) +// .collect::>(); - let current_peer = all_peers[node_index]; - let current_peer_key = KBucketKey::from(current_peer); - trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); +// let current_peer = all_peers[node_index]; +// let current_peer_key = KBucketKey::from(current_peer); +// trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); - let mut failed_list = Vec::new(); - for peer in all_peers.iter() { - let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { - Some(distance) => distance, - // None if same key - None => continue, - }; - match k_buckets.get(&ilog2_distance) { - Some(bucket) => { - if bucket.contains(peer) { - println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - continue; - } else if bucket.len() == K_VALUE.get() { - println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); - info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); - continue; - } else { - println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - failed_list.push(*peer); - } - } - None => { - info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); - failed_list.push(*peer); - } - } - } - if !failed_list.is_empty() { - all_failed_list.insert(current_peer, failed_list); - } - } - if !all_failed_list.is_empty() { - error!("Failed to verify routing table:\n{all_failed_list:?}"); - panic!("Failed to verify routing table."); - } - Ok(()) -} +// let mut failed_list = Vec::new(); +// for peer in all_peers.iter() { +// let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { +// Some(distance) => distance, +// // None if same key +// None => continue, +// }; +// match k_buckets.get(&ilog2_distance) { +// Some(bucket) => { +// if bucket.contains(peer) { +// println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// continue; +// } else if bucket.len() == K_VALUE.get() { +// println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); +// info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); +// continue; +// } else { +// println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// failed_list.push(*peer); +// } +// } +// None => { +// info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); +// failed_list.push(*peer); +// } +// } +// } +// if !failed_list.is_empty() { +// all_failed_list.insert(current_peer, failed_list); +// } +// } +// if !all_failed_list.is_empty() { +// error!("Failed to verify routing table:\n{all_failed_list:?}"); +// panic!("Failed to verify routing table."); +// } +// Ok(()) +// } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 055f1913b9..60c9f79134 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -24,14 +24,16 @@ bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.2" hex = "~0.4.3" -libp2p = { version = "0.54.1", features = ["kad"]} -libp2p-identity = { version="0.2.7", features = ["rand"] } +libp2p = { version = "0.54.1", features = ["kad"] } +libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_build_info = { path = "../sn_build_info", version = "0.1.13" } -sn_client = { path = "../sn_client", version = "0.110.1" } +# sn_client = { path = "../sn_client", version = "0.110.1" } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_node = { path = "../sn_node", version = "0.111.2" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.9", features=["rpc"] } +sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ + "rpc", +] } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } thiserror = "1.0.23" From 804bce11aef6967e32b740888a7a7f52ecb49fad Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 1 Oct 2024 13:16:03 +0900 Subject: [PATCH 073/255] fix: clippy issues --- sn_node_manager/src/cmd/local.rs | 8 ++++---- sn_node_manager/src/local.rs | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 8e1ba90c31..b77ed0b36e 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -28,8 +28,8 @@ pub async fn join( build: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, @@ -143,8 +143,8 @@ pub async fn run( clean: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ed39f67c12..3f31ac899e 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; @@ -22,10 +22,8 @@ use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, - rpc::{RpcActions, RpcClient}, - FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, + rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, From bd589da2c3556722e3442bd5b697cb1e13224923 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 1 Oct 2024 13:49:18 +0900 Subject: [PATCH 074/255] chore(aut): refactor away client traits seems like the Wallet is the interface we might need traits over... even then if we're not exposing that to the world we can just use feat-gate imports here over the APIs needed, I think --- autonomi/src/client/data.rs | 230 ++++++++++++++++++++++++--- autonomi/src/client/files.rs | 53 +++++- autonomi/src/client/mod.rs | 22 --- autonomi/src/client/registers.rs | 127 ++++++++++++++- autonomi/src/client/vault.rs | 102 ++++++++++-- autonomi/src/evm/client/data.rs | 10 +- autonomi/src/evm/client/files.rs | 60 ------- autonomi/src/evm/client/mod.rs | 33 ---- autonomi/src/evm/client/registers.rs | 153 ------------------ autonomi/src/evm/client/vault.rs | 107 ------------- autonomi/src/evm/mod.rs | 5 - autonomi/src/lib.rs | 14 +- autonomi/tests/evm/file.rs | 118 +++++++------- autonomi/tests/evm/mod.rs | 4 - autonomi/tests/evm/put.rs | 4 +- sn_networking/src/lib.rs | 4 +- sn_networking/src/record_store.rs | 10 +- sn_node/src/put_validation.rs | 3 +- sn_node_manager/src/local.rs | 6 +- 19 files changed, 550 insertions(+), 515 deletions(-) delete mode 100644 autonomi/src/evm/client/files.rs delete mode 100644 autonomi/src/evm/client/mod.rs delete mode 100644 autonomi/src/evm/client/registers.rs delete mode 100644 autonomi/src/evm/client/vault.rs delete mode 100644 autonomi/src/evm/mod.rs diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 9ee4559c20..167a4bd872 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -1,16 +1,28 @@ -use crate::client::{Client, ClientWrapper}; use crate::self_encryption::DataMapLevel; use bytes::Bytes; use evmlib::wallet; -use libp2p::kad::Quorum; +use libp2p::kad::{Quorum, Record}; + use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; -use sn_networking::{GetRecordCfg, NetworkError}; -use sn_protocol::storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}; -use sn_protocol::NetworkAddress; use std::collections::HashSet; use tokio::task::JoinError; use xor_name::XorName; +use crate::{self_encryption::encrypt, Client}; +use evmlib::common::{QuoteHash, QuotePayment, TxHash}; +use evmlib::wallet::Wallet; +use libp2p::futures; +use sn_evm::ProofOfPayment; +use sn_networking::PutRecordCfg; +use sn_networking::{GetRecordCfg, Network, NetworkError, PayeeQuote}; +use sn_protocol::{ + storage::{ + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, + }, + NetworkAddress, +}; +use std::collections::{BTreeMap, HashMap}; + /// Errors that can occur during the put operation. #[derive(Debug, thiserror::Error)] pub enum PutError { @@ -22,14 +34,18 @@ pub enum PutError { VaultXorName, #[error("A network error occurred.")] Network(#[from] NetworkError), + #[error("Error occurred during payment.")] + PayError(#[from] PayError), + + // native token #[cfg(feature = "native-payments")] #[error("A wallet error occurred.")] Wallet(#[from] sn_transfers::WalletError), + + // evm token #[cfg(feature = "evm-payments")] #[error("A wallet error occurred.")] - EvmWallet(#[from] sn_evm::EvmError), - #[error("Error occurred during payment.")] - PayError(#[from] PayError), + Wallet(#[from] sn_evm::EvmError), } /// Errors that can occur during the pay operation. @@ -144,24 +160,198 @@ impl Client { }; } } -} -pub trait Data: ClientWrapper { - async fn get(&self, data_map_addr: XorName) -> Result { - self.client().get(data_map_addr).await + /// Upload a piece of data to the network. This data will be self-encrypted, + /// and the data map XOR address will be returned. + pub async fn put(&mut self, data: Bytes, wallet: &Wallet) -> Result { + let now = std::time::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + + let map_xor_name = *data_map_chunk.address().xorname(); + let mut xor_names = vec![map_xor_name]; + + for chunk in &chunks { + xor_names.push(*chunk.name()); + } + + // Pay for all chunks + data map chunk + let (payment_proofs, _free_chunks) = self.pay(xor_names.into_iter(), wallet).await?; + + // Upload data map + if let Some(proof) = payment_proofs.get(&map_xor_name) { + self.upload_chunk(data_map_chunk.clone(), proof.clone()) + .await?; + } + + // Upload the rest of the chunks + for chunk in chunks { + if let Some(proof) = payment_proofs.get(chunk.name()) { + self.upload_chunk(chunk, proof.clone()).await?; + } + } + + Ok(map_xor_name) } - async fn fetch_chunk(&self, addr: XorName) -> Result { - self.client().fetch_chunk(addr).await + pub(crate) async fn pay( + &mut self, + content_addrs: impl Iterator, + wallet: &Wallet, + ) -> Result<(HashMap, Vec), PayError> { + let cost_map = self.get_store_quotes(content_addrs).await?; + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. + // TODO: retry when it fails? + // Execute chunk payments + let payments = wallet + .pay_for_quotes(quote_payments) + .await + .map_err(|err| PayError::from(err.0))?; + + let proofs = construct_proofs(&cost_map, &payments); + + tracing::trace!( + "Chunk payments of {} chunks completed. {} chunks were free / already paid for", + proofs.len(), + skipped_chunks.len() + ); + + Ok((proofs, skipped_chunks)) } - async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { - self.client().fetch_from_data_map(data_map).await + async fn get_store_quotes( + &mut self, + content_addrs: impl Iterator, + ) -> Result, PayError> { + let futures: Vec<_> = content_addrs + .into_iter() + .map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr)) + .collect(); + + let quotes = futures::future::try_join_all(futures).await?; + + Ok(quotes.into_iter().collect::>()) } - async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { - self.client() - .fetch_from_data_map_chunk(data_map_bytes) - .await + /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. + async fn upload_chunk( + &self, + chunk: Chunk, + proof_of_payment: ProofOfPayment, + ) -> Result<(), PutError> { + self.store_chunk(chunk, proof_of_payment).await?; + Ok(()) + } + + /// Actually store a chunk to a peer. + async fn store_chunk(&self, chunk: Chunk, payment: ProofOfPayment) -> Result<(), PutError> { + let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); + + tracing::debug!("Storing chunk: {chunk:?} to {:?}", storing_node); + + let key = chunk.network_address().to_record_key(); + + let record_kind = RecordKind::ChunkWithPayment; + let record = Record { + key: key.clone(), + value: try_serialize_record(&(payment, chunk.clone()), record_kind) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: None, + use_put_record_to: Some(vec![storing_node]), + verification: None, + }; + Ok(self.network.put_record(record, &put_cfg).await?) } } + +/// Fetch a store quote for a content address with a retry strategy. +async fn fetch_store_quote_with_retries( + network: &Network, + content_addr: XorName, +) -> Result<(XorName, PayeeQuote), PayError> { + let mut retries = 0; + + loop { + match fetch_store_quote(network, content_addr).await { + Ok(quote) => { + break Ok((content_addr, quote)); + } + Err(err) if retries < 2 => { + retries += 1; + tracing::error!("Error while fetching store quote: {err:?}, retry #{retries}"); + } + Err(err) => { + tracing::error!( + "Error while fetching store quote: {err:?}, stopping after {retries} retries" + ); + break Err(PayError::CouldNotGetStoreQuote(content_addr)); + } + } + } +} + +/// Fetch a store quote for a content address. +async fn fetch_store_quote( + network: &Network, + content_addr: XorName, +) -> Result { + network + .get_store_costs_from_network( + NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), + vec![], + ) + .await +} + +/// Form to be executed payments and already executed payments from a cost map. +fn extract_quote_payments( + cost_map: &HashMap, +) -> (Vec, Vec) { + let mut to_be_paid = vec![]; + let mut already_paid = vec![]; + + for (chunk_address, quote) in cost_map.iter() { + if quote.2.cost.is_zero() { + already_paid.push(*chunk_address); + } else { + to_be_paid.push(( + quote.2.hash(), + quote.2.rewards_address, + quote.2.cost.as_atto(), + )); + } + } + + (to_be_paid, already_paid) +} + +/// Construct payment proofs from cost map and payments map. +fn construct_proofs( + cost_map: &HashMap, + payments: &BTreeMap, +) -> HashMap { + cost_map + .iter() + .filter_map(|(xor_name, (_, _, quote))| { + payments.get("e.hash()).map(|tx_hash| { + ( + *xor_name, + ProofOfPayment { + quote: quote.clone(), + tx_hash: *tx_hash, + }, + ) + }) + }) + .collect() +} diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 7c01776e47..41cf3a328a 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -53,14 +53,53 @@ impl Client { let data = self.get(file.data_map).await?; Ok(data) } -} -pub trait Files: ClientWrapper { - async fn fetch_root(&mut self, address: XorName) -> Result { - self.client_mut().fetch_root(address).await - } + /// Upload a directory to the network. The directory is recursively walked. + #[cfg(feature = "fs")] + pub async fn upload_from_dir( + &mut self, + path: PathBuf, + wallet: &Wallet, + ) -> Result<(Root, XorName), UploadError> { + let mut map = HashMap::new(); + + for entry in WalkDir::new(path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path().to_path_buf(); + tracing::info!("Uploading file: {path:?}"); + let file = upload_from_file(self, path.clone(), wallet).await?; + + map.insert(path, file); + } - async fn fetch_file(&mut self, file: &FilePointer) -> Result { - self.client_mut().fetch_file(file).await + let root = Root { map }; + let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + + let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; + + Ok((root, xor_name)) } } + +async fn upload_from_file( + client: &mut Client, + path: PathBuf, + wallet: &Wallet, +) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + + let addr = client.put(data, wallet).await?; + + // TODO: Set created_at and modified_at + Ok(FilePointer { + data_map: addr, + created_at: 0, + modified_at: 0, + }) +} diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 292bf53275..2900ae12b7 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -169,25 +169,3 @@ async fn handle_event_receiver( // TODO: Handle closing of network events sender } - -pub trait ClientWrapper { - fn from_client(client: Client) -> Self; - - fn client(&self) -> &Client; - - fn client_mut(&mut self) -> &mut Client; - - fn into_client(self) -> Client; - - fn network(&self) -> &Network { - &self.client().network - } - - async fn connect(peers: &[Multiaddr]) -> Result - where - Self: Sized, - { - let client = Client::connect(peers).await?; - Ok(Self::from_client(client)) - } -} diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index fb87071a42..47d4dbdf74 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -143,21 +143,132 @@ impl Client { Ok(()) } -} -pub trait Registers: ClientWrapper { - async fn fetch_register(&self, address: RegisterAddress) -> Result { - self.client().fetch_register(address).await + /// Creates a new Register with an initial value and uploads it to the network. + pub async fn create_register( + &mut self, + value: Bytes, + name: XorName, + owner: SecretKey, + wallet: &Wallet, + ) -> Result { + let pk = owner.public_key(); + + // Owner can write to the register. + let permissions = Permissions::new_with([pk]); + let mut register = ClientRegister::new(pk, name, permissions); + let address = NetworkAddress::from_register_address(*register.address()); + + let entries = register + .read() + .into_iter() + .map(|(entry_hash, _value)| entry_hash) + .collect(); + + // TODO: Handle error. + let _ = register.write(value.into(), &entries, &owner); + let reg_xor = register.address().xorname(); + let (payment_proofs, _) = self.pay(std::iter::once(reg_xor), wallet).await?; + // Should always be there, else it would have failed on the payment step. + let proof = payment_proofs.get(®_xor).expect("Missing proof"); + let payee = proof.to_peer_id_payee().expect("Missing payee Peer ID"); + let signed_register = register.clone().into_signed(&owner).expect("TODO"); + + let record = Record { + key: address.to_record_key(), + value: try_serialize_record( + &(proof, &signed_register), + RecordKind::RegisterWithPayment, + ) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: Some(vec![payee]), + verification: None, + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Register { + inner: signed_register, + }) + } + + /// Fetches a Register from the network. + pub async fn fetch_register( + &self, + address: RegisterAddress, + ) -> Result { + let network_address = NetworkAddress::from_register_address(address); + let key = network_address.to_record_key(); + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::One, + retry_strategy: None, + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; + + let record = self + .network() + .get_record_from_network(key, &get_cfg) + .await?; + + let register: SignedRegister = + try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + + Ok(Register { inner: register }) } - async fn update_register( + /// Updates a Register on the network with a new value. This will overwrite existing value(s). + pub async fn update_register( &self, register: Register, new_value: Bytes, owner: SecretKey, ) -> Result<(), RegisterError> { - self.client() - .update_register(register, new_value, owner) - .await + // Fetch the current register + let mut signed_register = register.inner; + let mut register = signed_register.clone().register().expect("TODO"); + + // Get all current branches + let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); + + // Write the new value to all branches + let (_, op) = register + .write(new_value.to_vec(), &children, &owner) + .expect("TODO"); + + // Apply the operation to the register + signed_register.add_op(op.clone()).expect("TODO"); + + // Prepare the record for network storage + let record = Record { + key: NetworkAddress::from_register_address(*register.address()).to_record_key(), + value: try_serialize_record(&signed_register, RecordKind::Register) + .map_err(|_| RegisterError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::All, + retry_strategy: None, + use_put_record_to: None, + verification: None, + }; + + // Store the updated register on the network + self.network().put_record(record, &put_cfg).await?; + + Ok(()) } } diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 7cc1d080bb..779aba4b59 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -77,22 +77,96 @@ impl Client { Ok(pad) } -} -pub trait Vault: ClientWrapper { - fn with_vault_entropy(self, bytes: Bytes) -> Result - where - Self: Sized, - { - let client = self.into_client().with_vault_entropy(bytes)?; - Ok(Self::from_client(client)) - } + /// Put data into the client's VaultPacket + /// + /// Returns Ok(None) early if no vault packet is defined. + /// + /// Pays for a new VaultPacket if none yet created for the client. Returns the current version + /// of the data on success. + pub async fn write_bytes_to_vault_if_defined( + &mut self, + data: Bytes, + wallet: &mut Wallet, + ) -> Result, PutError> { + // Exit early if no vault packet defined + let Some(client_sk) = self.client().vault_secret_key.as_ref() else { + return Ok(None); + }; - async fn fetch_and_decrypt_vault(&self) -> Result, VaultError> { - self.client().fetch_and_decrypt_vault().await - } + let client_pk = client_sk.public_key(); - async fn get_vault_from_network(&self) -> Result { - self.client().get_vault_from_network().await + let pad_res = self.get_vault_from_network().await; + let mut is_new = true; + + let mut scratch = if let Ok(existing_data) = pad_res { + tracing::info!("Scratchpad already exists, returning existing data"); + + info!( + "scratch already exists, is version {:?}", + existing_data.count() + ); + + is_new = false; + existing_data + } else { + tracing::trace!("new scratchpad creation"); + Scratchpad::new(client_pk) + }; + + let next_count = scratch.update_and_sign(data, client_sk); + let scratch_address = scratch.network_address(); + let scratch_key = scratch_address.to_record_key(); + + let record = if is_new { + self.pay( + [&scratch_address].iter().filter_map(|f| f.as_xorname()), + wallet, + ) + .await?; + + let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; + let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; + // Should always be there, else it would have failed on the payment step. + let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); + + Record { + key: scratch_key, + value: try_serialize_record(&(proof, scratch), RecordKind::ScratchpadWithPayment) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + } else { + Record { + key: scratch_key, + value: try_serialize_record(&scratch, RecordKind::Scratchpad) + .map_err(|_| PutError::Serialization)? + .to_vec(), + publisher: None, + expires: None, + } + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: None, + verification: Some(( + VerificationKind::Network, + GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }, + )), + }; + + self.network().put_record(record, &put_cfg).await?; + + Ok(Some(next_count)) } } diff --git a/autonomi/src/evm/client/data.rs b/autonomi/src/evm/client/data.rs index 9061b6fb19..1f724fe127 100644 --- a/autonomi/src/evm/client/data.rs +++ b/autonomi/src/evm/client/data.rs @@ -1,9 +1,10 @@ use crate::client::data::{Data, PayError, PutError}; -use crate::client::ClientWrapper; -use crate::evm::client::EvmClient; +use crate::evm::client::Client; use crate::self_encryption::encrypt; use bytes::Bytes; use evmlib::common::{QuoteHash, QuotePayment, TxHash}; + +#[cfg(feature = "evm-payments")] use evmlib::wallet::Wallet; use libp2p::futures; use libp2p::kad::{Quorum, Record}; @@ -17,9 +18,7 @@ use sn_protocol::{ use std::collections::{BTreeMap, HashMap}; use xor_name::XorName; -impl Data for EvmClient {} - -impl EvmClient { +impl Client { /// Upload a piece of data to the network. This data will be self-encrypted, /// and the data map XOR address will be returned. pub async fn put(&mut self, data: Bytes, wallet: &Wallet) -> Result { @@ -57,6 +56,7 @@ impl EvmClient { pub(crate) async fn pay( &mut self, content_addrs: impl Iterator, + [cfg::feature = "vault"] wallet: &Wallet, ) -> Result<(HashMap, Vec), PayError> { let cost_map = self.get_store_quotes(content_addrs).await?; diff --git a/autonomi/src/evm/client/files.rs b/autonomi/src/evm/client/files.rs deleted file mode 100644 index be8e02e1cb..0000000000 --- a/autonomi/src/evm/client/files.rs +++ /dev/null @@ -1,60 +0,0 @@ -use crate::client::files::{FilePointer, Files, Root, UploadError}; -use crate::evm::client::EvmClient; -use bytes::{BufMut, Bytes}; -use evmlib::wallet::Wallet; -use std::{collections::HashMap, path::PathBuf}; -use walkdir::WalkDir; -use xor_name::XorName; - -impl Files for EvmClient {} - -impl EvmClient { - /// Upload a directory to the network. The directory is recursively walked. - #[cfg(feature = "fs")] - pub async fn upload_from_dir( - &mut self, - path: PathBuf, - wallet: &Wallet, - ) -> Result<(Root, XorName), UploadError> { - let mut map = HashMap::new(); - - for entry in WalkDir::new(path) { - let entry = entry?; - - if !entry.file_type().is_file() { - continue; - } - - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - let file = upload_from_file(self, path.clone(), wallet).await?; - - map.insert(path, file); - } - - let root = Root { map }; - let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); - - let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; - - Ok((root, xor_name)) - } -} - -async fn upload_from_file( - client: &mut EvmClient, - path: PathBuf, - wallet: &Wallet, -) -> Result { - let data = tokio::fs::read(path).await?; - let data = Bytes::from(data); - - let addr = client.put(data, wallet).await?; - - // TODO: Set created_at and modified_at - Ok(FilePointer { - data_map: addr, - created_at: 0, - modified_at: 0, - }) -} diff --git a/autonomi/src/evm/client/mod.rs b/autonomi/src/evm/client/mod.rs deleted file mode 100644 index 855dc30256..0000000000 --- a/autonomi/src/evm/client/mod.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::client::{Client, ClientWrapper}; - -#[cfg(feature = "data")] -pub mod data; -#[cfg(feature = "files")] -pub mod files; -#[cfg(feature = "registers")] -pub mod registers; -#[cfg(feature = "vault")] -pub mod vault; - -#[derive(Clone)] -pub struct EvmClient { - client: Client, -} - -impl ClientWrapper for EvmClient { - fn from_client(client: Client) -> Self { - EvmClient { client } - } - - fn client(&self) -> &Client { - &self.client - } - - fn client_mut(&mut self) -> &mut Client { - &mut self.client - } - - fn into_client(self) -> Client { - self.client - } -} diff --git a/autonomi/src/evm/client/registers.rs b/autonomi/src/evm/client/registers.rs deleted file mode 100644 index 60687c478b..0000000000 --- a/autonomi/src/evm/client/registers.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::collections::BTreeSet; - -use crate::client::registers::{Register, RegisterError, Registers}; -use crate::client::ClientWrapper; -use crate::evm::client::EvmClient; -use bls::SecretKey; -use bytes::Bytes; -use evmlib::wallet::Wallet; -use libp2p::kad::{Quorum, Record}; -use sn_networking::GetRecordCfg; -use sn_networking::PutRecordCfg; -use sn_protocol::storage::try_deserialize_record; -use sn_protocol::storage::try_serialize_record; -use sn_protocol::storage::RecordKind; -use sn_protocol::storage::RegisterAddress; -use sn_protocol::NetworkAddress; -use sn_registers::EntryHash; -use sn_registers::Permissions; -use sn_registers::Register as ClientRegister; -use sn_registers::SignedRegister; -use xor_name::XorName; - -impl Registers for EvmClient {} - -impl EvmClient { - /// Creates a new Register with an initial value and uploads it to the network. - pub async fn create_register( - &mut self, - value: Bytes, - name: XorName, - owner: SecretKey, - wallet: &Wallet, - ) -> Result { - let pk = owner.public_key(); - - // Owner can write to the register. - let permissions = Permissions::new_with([pk]); - let mut register = ClientRegister::new(pk, name, permissions); - let address = NetworkAddress::from_register_address(*register.address()); - - let entries = register - .read() - .into_iter() - .map(|(entry_hash, _value)| entry_hash) - .collect(); - - // TODO: Handle error. - let _ = register.write(value.into(), &entries, &owner); - let reg_xor = register.address().xorname(); - let (payment_proofs, _) = self.pay(std::iter::once(reg_xor), wallet).await?; - // Should always be there, else it would have failed on the payment step. - let proof = payment_proofs.get(®_xor).expect("Missing proof"); - let payee = proof.to_peer_id_payee().expect("Missing payee Peer ID"); - let signed_register = register.clone().into_signed(&owner).expect("TODO"); - - let record = Record { - key: address.to_record_key(), - value: try_serialize_record( - &(proof, &signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - - self.network().put_record(record, &put_cfg).await?; - - Ok(Register { - inner: signed_register, - }) - } - - /// Fetches a Register from the network. - pub async fn fetch_register( - &self, - address: RegisterAddress, - ) -> Result { - let network_address = NetworkAddress::from_register_address(address); - let key = network_address.to_record_key(); - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - - let record = self - .network() - .get_record_from_network(key, &get_cfg) - .await?; - - let register: SignedRegister = - try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; - - Ok(Register { inner: register }) - } - - /// Updates a Register on the network with a new value. This will overwrite existing value(s). - pub async fn update_register( - &self, - register: Register, - new_value: Bytes, - owner: SecretKey, - ) -> Result<(), RegisterError> { - // Fetch the current register - let mut signed_register = register.inner; - let mut register = signed_register.clone().register().expect("TODO"); - - // Get all current branches - let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); - - // Write the new value to all branches - let (_, op) = register - .write(new_value.to_vec(), &children, &owner) - .expect("TODO"); - - // Apply the operation to the register - signed_register.add_op(op.clone()).expect("TODO"); - - // Prepare the record for network storage - let record = Record { - key: NetworkAddress::from_register_address(*register.address()).to_record_key(), - value: try_serialize_record(&signed_register, RecordKind::Register) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: None, - verification: None, - }; - - // Store the updated register on the network - self.network().put_record(record, &put_cfg).await?; - - Ok(()) - } -} diff --git a/autonomi/src/evm/client/vault.rs b/autonomi/src/evm/client/vault.rs deleted file mode 100644 index c71b9803b7..0000000000 --- a/autonomi/src/evm/client/vault.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::client::data::PutError; -use crate::client::vault::Vault; -use crate::client::ClientWrapper; -use crate::evm::client::EvmClient; -use bytes::Bytes; -use evmlib::wallet::Wallet; -use libp2p::kad::{Quorum, Record}; -use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; -use sn_protocol::storage::{try_serialize_record, RecordKind, RetryStrategy, Scratchpad}; -use std::collections::HashSet; -use tracing::info; - -impl Vault for EvmClient {} - -impl EvmClient { - /// Put data into the client's VaultPacket - /// - /// Returns Ok(None) early if no vault packet is defined. - /// - /// Pays for a new VaultPacket if none yet created for the client. Returns the current version - /// of the data on success. - pub async fn write_bytes_to_vault_if_defined( - &mut self, - data: Bytes, - wallet: &mut Wallet, - ) -> Result, PutError> { - // Exit early if no vault packet defined - let Some(client_sk) = self.client().vault_secret_key.as_ref() else { - return Ok(None); - }; - - let client_pk = client_sk.public_key(); - - let pad_res = self.get_vault_from_network().await; - let mut is_new = true; - - let mut scratch = if let Ok(existing_data) = pad_res { - tracing::info!("Scratchpad already exists, returning existing data"); - - info!( - "scratch already exists, is version {:?}", - existing_data.count() - ); - - is_new = false; - existing_data - } else { - tracing::trace!("new scratchpad creation"); - Scratchpad::new(client_pk) - }; - - let next_count = scratch.update_and_sign(data, client_sk); - let scratch_address = scratch.network_address(); - let scratch_key = scratch_address.to_record_key(); - - let record = if is_new { - self.pay( - [&scratch_address].iter().filter_map(|f| f.as_xorname()), - wallet, - ) - .await?; - - let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; - let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; - // Should always be there, else it would have failed on the payment step. - let proof = payment_proofs.get(&scratch_xor).expect("Missing proof"); - - Record { - key: scratch_key, - value: try_serialize_record(&(proof, scratch), RecordKind::ScratchpadWithPayment) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - } else { - Record { - key: scratch_key, - value: try_serialize_record(&scratch, RecordKind::Scratchpad) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: None, - verification: Some(( - VerificationKind::Network, - GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: None, - expected_holders: HashSet::new(), - is_register: false, - }, - )), - }; - - self.network().put_record(record, &put_cfg).await?; - - Ok(Some(next_count)) - } -} diff --git a/autonomi/src/evm/mod.rs b/autonomi/src/evm/mod.rs deleted file mode 100644 index a7c160ea53..0000000000 --- a/autonomi/src/evm/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub use crate::client::Client; - -pub mod client; - -pub type EvmWallet = evmlib::wallet::Wallet; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 295c3ca576..f156d7aadc 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -21,23 +21,19 @@ // docs.rs generation will enable unstable `doc_cfg` feature #![cfg_attr(docsrs, feature(doc_cfg))] +pub mod client; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; -pub mod client; -#[cfg(feature = "evm-payments")] -pub mod evm; +pub use client::Client; + #[cfg(feature = "native-payments")] pub mod native; + +#[cfg(feature = "data")] mod self_encryption; #[cfg(feature = "transfers")] const VERIFY_STORE: bool = true; - -#[cfg(all(feature = "native-payments", not(feature = "evm-payments")))] -pub type Client = native::Client; - -#[cfg(all(feature = "evm-payments", not(feature = "native-payments")))] -pub type Client = evm::Client; diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs index 4b4abfee35..6a85ff3f07 100644 --- a/autonomi/tests/evm/file.rs +++ b/autonomi/tests/evm/file.rs @@ -1,78 +1,82 @@ -use crate::common; -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; -use crate::evm::Client; -use bytes::Bytes; -use eyre::bail; -use std::time::Duration; -use tokio::time::sleep; +#[cfg(feature = "evm-payments")] +mod test { -#[tokio::test] -async fn file() -> Result<(), Box> { - common::enable_logging(); + use crate::common; + use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; + use crate::evm::Client; + use bytes::Bytes; + use eyre::bail; + use std::time::Duration; + use tokio::time::sleep; - let network = evm_network_from_env(); - let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = evm_wallet_from_env_or_default(network); + #[tokio::test] + async fn file() -> Result<(), Box> { + common::enable_logging(); - // let data = common::gen_random_data(1024 * 1024 * 1000); - // let user_key = common::gen_random_data(32); + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = evm_wallet_from_env_or_default(network); - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) - .await?; + // let data = common::gen_random_data(1024 * 1024 * 1000); + // let user_key = common::gen_random_data(32); - sleep(Duration::from_secs(10)).await; + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; - let root_fetched = client.fetch_root(addr).await?; + sleep(Duration::from_secs(10)).await; - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); + let root_fetched = client.fetch_root(addr).await?; - Ok(()) -} - -#[cfg(feature = "vault")] -#[tokio::test] -async fn file_into_vault() -> eyre::Result<()> { - common::enable_logging(); - - let network = evm_network_from_env(); + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); - let mut client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + Ok(()) + } - let mut wallet = evm_wallet_from_env_or_default(network); + #[cfg(feature = "vault")] + #[tokio::test] + async fn file_into_vault() -> eyre::Result<()> { + common::enable_logging(); - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) - .await?; - sleep(Duration::from_secs(2)).await; + let network = evm_network_from_env(); - let root_fetched = client.fetch_root(addr).await?; + let mut client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); + let mut wallet = evm_wallet_from_env_or_default(network); - // now assert over the stored account packet - let new_client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + sleep(Duration::from_secs(2)).await; - if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { - let ap_root_fetched = Client::deserialise_root(ap)?; + let root_fetched = client.fetch_root(addr).await?; assert_eq!( - root.map, ap_root_fetched.map, + root.map, root_fetched.map, "root fetched should match root put" ); - } else { - bail!("No account packet found"); - } - Ok(()) + // now assert over the stored account packet + let new_client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { + let ap_root_fetched = Client::deserialise_root(ap)?; + + assert_eq!( + root.map, ap_root_fetched.map, + "root fetched should match root put" + ); + } else { + bail!("No account packet found"); + } + + Ok(()) + } } diff --git a/autonomi/tests/evm/mod.rs b/autonomi/tests/evm/mod.rs index fa74db16b4..cdddaa504a 100644 --- a/autonomi/tests/evm/mod.rs +++ b/autonomi/tests/evm/mod.rs @@ -1,5 +1,3 @@ -use autonomi; - #[cfg(feature = "files")] mod file; #[cfg(feature = "data")] @@ -7,5 +5,3 @@ mod put; #[cfg(feature = "registers")] mod register; mod wallet; - -pub type Client = autonomi::evm::client::EvmClient; diff --git a/autonomi/tests/evm/put.rs b/autonomi/tests/evm/put.rs index 7502ceef4d..9d6a236f85 100644 --- a/autonomi/tests/evm/put.rs +++ b/autonomi/tests/evm/put.rs @@ -2,9 +2,7 @@ use std::time::Duration; use crate::common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; -use crate::evm::Client; -use autonomi::client::data::Data; -use autonomi::client::ClientWrapper; +use autonomi::Client; use tokio::time::sleep; #[tokio::test] diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 8369665c12..7ddad3cdce 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -379,7 +379,9 @@ impl Network { }) => { // Check the quote itself is valid. if quote.cost - != AttoTokens::from_u64(calculate_cost_for_records(quote.quoting_metrics.close_records_stored)) + != AttoTokens::from_u64(calculate_cost_for_records( + quote.quoting_metrics.close_records_stored, + )) { warn!("Received invalid quote from {peer_address:?}, {quote:?}"); continue; diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 7ce96c2e41..fb58db61e8 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -964,7 +964,9 @@ mod tests { use quickcheck::*; use sn_evm::utils::dummy_address; use sn_evm::{PaymentQuote, RewardsAddress}; - use sn_protocol::storage::{try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, Scratchpad}; + use sn_protocol::storage::{ + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, Scratchpad, + }; use std::collections::BTreeMap; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use tokio::runtime::Runtime; @@ -1656,8 +1658,10 @@ mod tests { peer.records_stored.fetch_add(1, Ordering::Relaxed); if peer_index == payee_index { - peer.nanos_earned - .fetch_add(cost.as_atto().try_into().unwrap_or(u64::MAX), Ordering::Relaxed); + peer.nanos_earned.fetch_add( + cost.as_atto().try_into().unwrap_or(u64::MAX), + Ordering::Relaxed, + ); peer.payments_received.fetch_add(1, Ordering::Relaxed); } } diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index f78d0990fa..85a38a6c1c 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -12,7 +12,8 @@ use sn_evm::ProofOfPayment; use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ storage::{ - try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, Scratchpad, SpendAddress + try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, + Scratchpad, SpendAddress, }, NetworkAddress, PrettyPrintRecordKey, }; diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 3f31ac899e..6373ba46d4 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,8 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, - increment_port_option, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; @@ -22,7 +21,8 @@ use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, - rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, + rpc::{RpcActions, RpcClient}, + NodeRegistry, NodeServiceData, ServiceStatus, }; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, From e9b7ecf47f33ec913b751cba1015199ce08a96e9 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 1 Oct 2024 09:00:50 +0200 Subject: [PATCH 075/255] fix(launchpad): typo on error popup --- node-launchpad/src/components/status.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3283a16de9..7d31f1fc92 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -374,7 +374,7 @@ impl Component for Status { } StatusActions::ErrorScalingUpNodes { raw_error } => { self.error_popup = Some(ErrorPopup::new( - "Erro ".to_string(), + "Error".to_string(), "Error adding new nodes".to_string(), raw_error, )); From d5e0ccae23851b2f5838ba0c0040168493a6279d Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 1 Oct 2024 11:01:00 +0200 Subject: [PATCH 076/255] refactor(autonomi): get rid of the native api --- autonomi/Cargo.toml | 7 +- autonomi/src/client/data.rs | 17 +- autonomi/src/client/files.rs | 9 +- autonomi/src/client/registers.rs | 87 +------ autonomi/src/client/vault.rs | 19 +- autonomi/src/evm/client/data.rs | 216 ---------------- autonomi/src/lib.rs | 12 +- autonomi/src/native/client/data.rs | 211 --------------- autonomi/src/native/client/files.rs | 64 ----- autonomi/src/native/client/mod.rs | 35 --- autonomi/src/native/client/registers.rs | 80 ------ autonomi/src/native/client/transfers.rs | 327 ------------------------ autonomi/src/native/client/vault.rs | 107 -------- autonomi/src/native/mod.rs | 5 - autonomi/src/native/wallet/error.rs | 15 -- autonomi/src/native/wallet/mod.rs | 151 ----------- autonomi/tests/common.rs | 78 ++++++ autonomi/tests/common/mod.rs | 189 -------------- autonomi/tests/evm/file.rs | 82 ------ autonomi/tests/evm/mod.rs | 7 - autonomi/tests/{native => }/file.rs | 34 ++- autonomi/tests/integration.rs | 5 - autonomi/tests/native/mod.rs | 10 - autonomi/tests/native/put.rs | 25 -- autonomi/tests/native/register.rs | 48 ---- autonomi/tests/{evm => }/put.rs | 8 +- autonomi/tests/{evm => }/register.rs | 10 +- autonomi/tests/{evm => }/wallet.rs | 2 + 28 files changed, 145 insertions(+), 1715 deletions(-) delete mode 100644 autonomi/src/evm/client/data.rs delete mode 100644 autonomi/src/native/client/data.rs delete mode 100644 autonomi/src/native/client/files.rs delete mode 100644 autonomi/src/native/client/mod.rs delete mode 100644 autonomi/src/native/client/registers.rs delete mode 100644 autonomi/src/native/client/transfers.rs delete mode 100644 autonomi/src/native/client/vault.rs delete mode 100644 autonomi/src/native/mod.rs delete mode 100644 autonomi/src/native/wallet/error.rs delete mode 100644 autonomi/src/native/wallet/mod.rs create mode 100644 autonomi/tests/common.rs delete mode 100644 autonomi/tests/common/mod.rs delete mode 100644 autonomi/tests/evm/file.rs delete mode 100644 autonomi/tests/evm/mod.rs rename autonomi/tests/{native => }/file.rs (66%) delete mode 100644 autonomi/tests/integration.rs delete mode 100644 autonomi/tests/native/mod.rs delete mode 100644 autonomi/tests/native/put.rs delete mode 100644 autonomi/tests/native/register.rs rename autonomi/tests/{evm => }/put.rs (80%) rename autonomi/tests/{evm => }/register.rs (89%) rename autonomi/tests/{evm => }/wallet.rs (98%) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 8e6dc03f72..aeeebe5aaf 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -10,17 +10,14 @@ readme = "README.md" repository = "https://github.com/maidsafe/safe_network" [features] -default = ["evm-payments", "data"] -full = ["data", "registers", "vault", "evm-payments"] +default = ["data"] +full = ["data", "registers", "vault"] data = [] vault = ["data"] files = ["fs", "data"] fs = [] local = ["sn_networking/local-discovery"] registers = [] -transfers = [] -native-payments = [] -evm-payments = [] [dependencies] bip39 = "2.0.0" diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 167a4bd872..fa1114f37e 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -36,14 +36,6 @@ pub enum PutError { Network(#[from] NetworkError), #[error("Error occurred during payment.")] PayError(#[from] PayError), - - // native token - #[cfg(feature = "native-payments")] - #[error("A wallet error occurred.")] - Wallet(#[from] sn_transfers::WalletError), - - // evm token - #[cfg(feature = "evm-payments")] #[error("A wallet error occurred.")] Wallet(#[from] sn_evm::EvmError), } @@ -57,15 +49,8 @@ pub enum PayError { CouldNotGetStoreCosts(NetworkError), #[error("Could not simultaneously fetch store costs: {0:?}")] JoinError(JoinError), - #[cfg(feature = "native-payments")] - #[error("Hot wallet error")] - WalletError(#[from] sn_transfers::WalletError), - #[cfg(feature = "evm-payments")] #[error("Wallet error: {0:?}")] - EvmWalletError(#[from] wallet::Error), - #[cfg(feature = "native-payments")] - #[error("Failed to send spends")] - SendSpendsError(#[from] crate::native::client::transfers::SendSpendsError), + WalletError(#[from] wallet::Error), } /// Errors that can occur during the get operation. diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 41cf3a328a..524fc6fb7e 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -1,9 +1,11 @@ use crate::client::data::{GetError, PutError}; -use crate::client::{Client, ClientWrapper}; +use crate::client::Client; use bytes::Bytes; +use evmlib::wallet::Wallet; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::PathBuf; +use walkdir::WalkDir; use xor_name::XorName; /// Directory-like structure that containing file paths and their metadata. @@ -43,6 +45,11 @@ impl Client { /// Fetch a directory from the network. pub async fn fetch_root(&mut self, address: XorName) -> Result { let data = self.get(address).await?; + + Self::deserialize_root(data) + } + + pub fn deserialize_root(data: Bytes) -> Result { let root: Root = rmp_serde::from_slice(&data[..]).expect("TODO"); Ok(root) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 47d4dbdf74..43f35c40db 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -1,9 +1,10 @@ use std::collections::BTreeSet; use crate::client::data::PayError; -use crate::client::{Client, ClientWrapper}; +use crate::client::Client; use bls::SecretKey; use bytes::Bytes; +use evmlib::wallet::Wallet; use libp2p::kad::{Quorum, Record}; use sn_networking::GetRecordCfg; use sn_networking::NetworkError; @@ -13,8 +14,10 @@ use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; -use sn_registers::EntryHash; +use sn_registers::Register as ClientRegister; use sn_registers::SignedRegister; +use sn_registers::{EntryHash, Permissions}; +use xor_name::XorName; #[derive(Debug, thiserror::Error)] pub enum RegisterError { @@ -26,12 +29,8 @@ pub enum RegisterError { FailedVerification, #[error("Payment failure occurred during register creation.")] Pay(#[from] PayError), - #[cfg(feature = "native-payments")] #[error("Failed to retrieve wallet payment")] - Wallet(#[from] sn_transfers::WalletError), - #[cfg(feature = "evm-payments")] - #[error("Failed to retrieve wallet payment")] - EvmWallet(#[from] evmlib::wallet::Error), + Wallet(#[from] evmlib::wallet::Error), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] @@ -193,82 +192,10 @@ impl Client { verification: None, }; - self.network().put_record(record, &put_cfg).await?; + self.network.put_record(record, &put_cfg).await?; Ok(Register { inner: signed_register, }) } - - /// Fetches a Register from the network. - pub async fn fetch_register( - &self, - address: RegisterAddress, - ) -> Result { - let network_address = NetworkAddress::from_register_address(address); - let key = network_address.to_record_key(); - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - - let record = self - .network() - .get_record_from_network(key, &get_cfg) - .await?; - - let register: SignedRegister = - try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; - - Ok(Register { inner: register }) - } - - /// Updates a Register on the network with a new value. This will overwrite existing value(s). - pub async fn update_register( - &self, - register: Register, - new_value: Bytes, - owner: SecretKey, - ) -> Result<(), RegisterError> { - // Fetch the current register - let mut signed_register = register.inner; - let mut register = signed_register.clone().register().expect("TODO"); - - // Get all current branches - let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); - - // Write the new value to all branches - let (_, op) = register - .write(new_value.to_vec(), &children, &owner) - .expect("TODO"); - - // Apply the operation to the register - signed_register.add_op(op.clone()).expect("TODO"); - - // Prepare the record for network storage - let record = Record { - key: NetworkAddress::from_register_address(*register.address()).to_record_key(), - value: try_serialize_record(&signed_register, RecordKind::Register) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: None, - verification: None, - }; - - // Store the updated register on the network - self.network().put_record(record, &put_cfg).await?; - - Ok(()) - } } diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 779aba4b59..527ca2b9b7 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -1,12 +1,17 @@ use std::collections::HashSet; -use crate::client::{Client, ClientWrapper}; +use crate::client::data::PutError; +use crate::client::Client; use bls::SecretKey; use bytes::Bytes; -use libp2p::kad::Quorum; -use sn_networking::{GetRecordCfg, NetworkError}; -use sn_protocol::storage::{Scratchpad, ScratchpadAddress}; +use evmlib::wallet::Wallet; +use libp2p::kad::{Quorum, Record}; +use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; +use sn_protocol::storage::{ + try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, +}; use sn_protocol::{storage::try_deserialize_record, NetworkAddress}; +use tracing::info; #[derive(Debug, thiserror::Error)] pub enum VaultError { @@ -90,7 +95,7 @@ impl Client { wallet: &mut Wallet, ) -> Result, PutError> { // Exit early if no vault packet defined - let Some(client_sk) = self.client().vault_secret_key.as_ref() else { + let Some(client_sk) = self.vault_secret_key.as_ref() else { return Ok(None); }; @@ -100,7 +105,7 @@ impl Client { let mut is_new = true; let mut scratch = if let Ok(existing_data) = pad_res { - tracing::info!("Scratchpad already exists, returning existing data"); + info!("Scratchpad already exists, returning existing data"); info!( "scratch already exists, is version {:?}", @@ -165,7 +170,7 @@ impl Client { )), }; - self.network().put_record(record, &put_cfg).await?; + self.network.put_record(record, &put_cfg).await?; Ok(Some(next_count)) } diff --git a/autonomi/src/evm/client/data.rs b/autonomi/src/evm/client/data.rs deleted file mode 100644 index 1f724fe127..0000000000 --- a/autonomi/src/evm/client/data.rs +++ /dev/null @@ -1,216 +0,0 @@ -use crate::client::data::{Data, PayError, PutError}; -use crate::evm::client::Client; -use crate::self_encryption::encrypt; -use bytes::Bytes; -use evmlib::common::{QuoteHash, QuotePayment, TxHash}; - -#[cfg(feature = "evm-payments")] -use evmlib::wallet::Wallet; -use libp2p::futures; -use libp2p::kad::{Quorum, Record}; -use sn_evm::ProofOfPayment; -use sn_networking::PutRecordCfg; -use sn_networking::{Network, NetworkError, PayeeQuote}; -use sn_protocol::{ - storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind}, - NetworkAddress, -}; -use std::collections::{BTreeMap, HashMap}; -use xor_name::XorName; - -impl Client { - /// Upload a piece of data to the network. This data will be self-encrypted, - /// and the data map XOR address will be returned. - pub async fn put(&mut self, data: Bytes, wallet: &Wallet) -> Result { - let now = std::time::Instant::now(); - let (data_map_chunk, chunks) = encrypt(data)?; - - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); - - let map_xor_name = *data_map_chunk.address().xorname(); - let mut xor_names = vec![map_xor_name]; - - for chunk in &chunks { - xor_names.push(*chunk.name()); - } - - // Pay for all chunks + data map chunk - let (payment_proofs, _free_chunks) = self.pay(xor_names.into_iter(), wallet).await?; - - // Upload data map - if let Some(proof) = payment_proofs.get(&map_xor_name) { - self.upload_chunk(data_map_chunk.clone(), proof.clone()) - .await?; - } - - // Upload the rest of the chunks - for chunk in chunks { - if let Some(proof) = payment_proofs.get(chunk.name()) { - self.upload_chunk(chunk, proof.clone()).await?; - } - } - - Ok(map_xor_name) - } - - pub(crate) async fn pay( - &mut self, - content_addrs: impl Iterator, - [cfg::feature = "vault"] - wallet: &Wallet, - ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self.get_store_quotes(content_addrs).await?; - let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); - - // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. - // TODO: retry when it fails? - // Execute chunk payments - let payments = wallet - .pay_for_quotes(quote_payments) - .await - .map_err(|err| PayError::from(err.0))?; - - let proofs = construct_proofs(&cost_map, &payments); - - tracing::trace!( - "Chunk payments of {} chunks completed. {} chunks were free / already paid for", - proofs.len(), - skipped_chunks.len() - ); - - Ok((proofs, skipped_chunks)) - } - - async fn get_store_quotes( - &mut self, - content_addrs: impl Iterator, - ) -> Result, PayError> { - let futures: Vec<_> = content_addrs - .into_iter() - .map(|content_addr| fetch_store_quote_with_retries(&self.network(), content_addr)) - .collect(); - - let quotes = futures::future::try_join_all(futures).await?; - - Ok(quotes.into_iter().collect::>()) - } - - /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. - async fn upload_chunk( - &self, - chunk: Chunk, - proof_of_payment: ProofOfPayment, - ) -> Result<(), PutError> { - self.store_chunk(chunk, proof_of_payment).await?; - Ok(()) - } - - /// Actually store a chunk to a peer. - async fn store_chunk(&self, chunk: Chunk, payment: ProofOfPayment) -> Result<(), PutError> { - let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); - - tracing::debug!("Storing chunk: {chunk:?} to {:?}", storing_node); - - let key = chunk.network_address().to_record_key(); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: None, - use_put_record_to: Some(vec![storing_node]), - verification: None, - }; - Ok(self.network().put_record(record, &put_cfg).await?) - } -} - -/// Fetch a store quote for a content address with a retry strategy. -async fn fetch_store_quote_with_retries( - network: &Network, - content_addr: XorName, -) -> Result<(XorName, PayeeQuote), PayError> { - let mut retries = 0; - - loop { - match fetch_store_quote(network, content_addr).await { - Ok(quote) => { - break Ok((content_addr, quote)); - } - Err(err) if retries < 2 => { - retries += 1; - tracing::error!("Error while fetching store quote: {err:?}, retry #{retries}"); - } - Err(err) => { - tracing::error!( - "Error while fetching store quote: {err:?}, stopping after {retries} retries" - ); - break Err(PayError::CouldNotGetStoreQuote(content_addr)); - } - } - } -} - -/// Fetch a store quote for a content address. -async fn fetch_store_quote( - network: &Network, - content_addr: XorName, -) -> Result { - network - .get_store_costs_from_network( - NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), - vec![], - ) - .await -} - -/// Form to be executed payments and already executed payments from a cost map. -fn extract_quote_payments( - cost_map: &HashMap, -) -> (Vec, Vec) { - let mut to_be_paid = vec![]; - let mut already_paid = vec![]; - - for (chunk_address, quote) in cost_map.iter() { - if quote.2.cost.is_zero() { - already_paid.push(*chunk_address); - } else { - to_be_paid.push(( - quote.2.hash(), - quote.2.rewards_address, - quote.2.cost.as_atto(), - )); - } - } - - (to_be_paid, already_paid) -} - -/// Construct payment proofs from cost map and payments map. -fn construct_proofs( - cost_map: &HashMap, - payments: &BTreeMap, -) -> HashMap { - cost_map - .iter() - .filter_map(|(xor_name, (_, _, quote))| { - payments.get("e.hash()).map(|tx_hash| { - ( - *xor_name, - ProofOfPayment { - quote: quote.clone(), - tx_hash: *tx_hash, - }, - ) - }) - }) - .collect() -} diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index f156d7aadc..0e28f17dcb 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -22,18 +22,12 @@ #![cfg_attr(docsrs, feature(doc_cfg))] pub mod client; +#[cfg(feature = "data")] +mod self_encryption; + #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; pub use client::Client; - -#[cfg(feature = "native-payments")] -pub mod native; - -#[cfg(feature = "data")] -mod self_encryption; - -#[cfg(feature = "transfers")] -const VERIFY_STORE: bool = true; diff --git a/autonomi/src/native/client/data.rs b/autonomi/src/native/client/data.rs deleted file mode 100644 index 2b0a96bfb7..0000000000 --- a/autonomi/src/native/client/data.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::collections::BTreeMap; - -use super::transfers::SendSpendsError; -use crate::client::data::{Data, PayError, PutError}; -use crate::client::ClientWrapper; -use crate::native::client::NativeClient; -use crate::self_encryption::encrypt; -use bytes::Bytes; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::PutRecordCfg; -use sn_protocol::{ - storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind}, - NetworkAddress, -}; -use sn_transfers::{HotWallet, MainPubkey, NanoTokens, Payment, PaymentQuote}; -use tokio::task::JoinSet; -use xor_name::XorName; - -impl Data for NativeClient {} - -impl NativeClient { - /// Upload a piece of data to the network. This data will be self-encrypted, - /// and the data map XOR address will be returned. - pub async fn put(&mut self, data: Bytes, wallet: &mut HotWallet) -> Result { - let now = std::time::Instant::now(); - let (map, chunks) = encrypt(data)?; - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); - - let map_xor_name = *map.address().xorname(); - - let mut xor_names = vec![]; - xor_names.push(map_xor_name); - - for chunk in &chunks { - xor_names.push(*chunk.name()); - } - - let (_, skipped_chunks) = self.pay(xor_names.into_iter(), wallet).await?; - - // TODO: Upload in parallel - if !skipped_chunks.contains(map.name()) { - self.upload_chunk(map, wallet).await?; - } - - for chunk in chunks { - if skipped_chunks.contains(chunk.name()) { - continue; - } - self.upload_chunk(chunk, wallet).await?; - } - - Ok(map_xor_name) - } - - pub(crate) async fn pay( - &mut self, - content_addrs: impl Iterator, - wallet: &mut HotWallet, - ) -> Result<(NanoTokens, Vec), PayError> { - let mut tasks = JoinSet::new(); - - for content_addr in content_addrs { - let network = self.network().clone(); - - tasks.spawn(async move { - // TODO: retry, but where? - let cost = network - .get_store_costs_from_network( - NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), - vec![], - ) - .await - .map_err(PayError::CouldNotGetStoreCosts); - - tracing::debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - - (content_addr, cost) - }); - } - - tracing::debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(content_addr); - tracing::debug!("Skipped existing chunk {content_addr:?}"); - } else { - tracing::debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(content_addr, (cost.1, cost.2, cost.0.to_bytes())); - } - } - Ok((content_addr, Err(err))) => { - tracing::warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(PayError::JoinError(e)); - } - } - } - - let storage_cost = if cost_map.is_empty() { - NanoTokens::zero() - } else { - self.pay_for_records(&cost_map, wallet).await? - }; - - Ok((storage_cost, skipped_chunks)) - } - - async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - wallet: &mut HotWallet, - ) -> Result { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transactions(wallet).await; - - let total_cost = wallet.local_send_storage_payment(cost_map)?; - - // send to network - tracing::trace!("Sending storage payment transfer to the network"); - - let spend_attempt_result = self - .send_spends(wallet.unconfirmed_spend_requests().iter()) - .await; - - tracing::trace!("send_spends of {} chunks completed", cost_map.len(),); - - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - if let Err(error) = spend_attempt_result { - tracing::warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); - - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let SendSpendsError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - tracing::warn!( - "Removing double spends CashNote from wallet: {cash_note_key:?}" - ); - wallet.mark_notes_as_spent([cash_note_key]); - wallet.clear_specific_spend_request(*cash_note_key); - } - } - - wallet.store_unconfirmed_spend_requests()?; - - return Err(PayError::SendSpendsError(error)); - } else { - tracing::info!("Spend has completed: {:?}", spend_attempt_result); - wallet.clear_confirmed_spend_requests(); - } - - tracing::trace!("clear up spends of {} chunks completed", cost_map.len(),); - - Ok(total_cost.0) - } - - /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. - async fn upload_chunk(&self, chunk: Chunk, wallet: &mut HotWallet) -> Result<(), PutError> { - let xor_name = *chunk.name(); - let (payment, payee) = self.get_recent_payment_for_addr(&xor_name, wallet)?; - - self.store_chunk(chunk, payee, payment).await?; - - wallet.api().remove_payment_transaction(&xor_name); - - Ok(()) - } - - /// Actually store a chunk to a peer. - async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - ) -> Result<(), PutError> { - tracing::debug!("Storing chunk: {chunk:?} to {payee:?}"); - - let key = chunk.network_address().to_record_key(); - - let record_kind = RecordKind::ChunkWithPayment; - - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - - Ok(self.network().put_record(record, &put_cfg).await?) - } -} diff --git a/autonomi/src/native/client/files.rs b/autonomi/src/native/client/files.rs deleted file mode 100644 index b73839bf01..0000000000 --- a/autonomi/src/native/client/files.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::{collections::HashMap, path::PathBuf}; - -use crate::client::files::{FilePointer, Files, Root, UploadError}; -use crate::native::client::NativeClient; -use bytes::Bytes; -use sn_transfers::HotWallet; -use walkdir::WalkDir; -use xor_name::XorName; - -impl Files for NativeClient {} - -impl NativeClient { - /// Upload a directory to the network. The directory is recursively walked. - #[cfg(feature = "fs")] - pub async fn upload_from_dir( - &mut self, - path: PathBuf, - wallet: &mut HotWallet, - ) -> Result<(Root, XorName), UploadError> { - let mut map = HashMap::new(); - - for entry in WalkDir::new(path) { - let entry = entry?; - - if !entry.file_type().is_file() { - continue; - } - - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - let file = upload_from_file(self, path.clone(), wallet).await?; - map.insert(path, file); - } - - let root = Root { map }; - let root_serialized = Bytes::from(rmp_serde::to_vec(&root)?); - - #[cfg(feature = "vault")] - self.write_bytes_to_vault_if_defined(root_serialized.clone(), wallet) - .await?; - - let xor_name = self.put(root_serialized, wallet).await?; - - Ok((root, xor_name)) - } -} - -async fn upload_from_file( - client: &mut NativeClient, - path: PathBuf, - wallet: &mut HotWallet, -) -> Result { - let data = tokio::fs::read(path).await?; - let data = Bytes::from(data); - - let addr = client.put(data, wallet).await?; - - // TODO: Set created_at and modified_at - Ok(FilePointer { - data_map: addr, - created_at: 0, - modified_at: 0, - }) -} diff --git a/autonomi/src/native/client/mod.rs b/autonomi/src/native/client/mod.rs deleted file mode 100644 index 766aa67416..0000000000 --- a/autonomi/src/native/client/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::client::{Client, ClientWrapper}; - -#[cfg(feature = "data")] -pub mod data; -#[cfg(feature = "files")] -pub mod files; -#[cfg(feature = "registers")] -pub mod registers; -#[cfg(feature = "transfers")] -pub mod transfers; -#[cfg(feature = "vault")] -pub mod vault; - -#[derive(Clone)] -pub struct NativeClient { - client: Client, -} - -impl ClientWrapper for NativeClient { - fn from_client(client: Client) -> Self { - NativeClient { client } - } - - fn client(&self) -> &Client { - &self.client - } - - fn client_mut(&mut self) -> &mut Client { - &mut self.client - } - - fn into_client(self) -> Client { - self.client - } -} diff --git a/autonomi/src/native/client/registers.rs b/autonomi/src/native/client/registers.rs deleted file mode 100644 index 6ca80f36d6..0000000000 --- a/autonomi/src/native/client/registers.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::client::registers::{Register, RegisterError, Registers}; -use crate::client::ClientWrapper; -use crate::native::client::NativeClient; -use bls::SecretKey; -use bytes::Bytes; -use libp2p::kad::{Quorum, Record}; -use sn_networking::PutRecordCfg; -use sn_protocol::storage::try_serialize_record; -use sn_protocol::storage::RecordKind; -use sn_protocol::NetworkAddress; -use sn_registers::Permissions; -use sn_registers::Register as ClientRegister; -use sn_transfers::HotWallet; -use xor_name::XorName; - -impl Registers for NativeClient {} - -impl NativeClient { - /// Creates a new Register with an initial value and uploads it to the network. - pub async fn create_register( - &mut self, - value: Bytes, - name: XorName, - owner: SecretKey, - wallet: &mut HotWallet, - ) -> Result { - let pk = owner.public_key(); - - // Owner can write to the register. - let permissions = Permissions::new_with([pk]); - let mut register = ClientRegister::new(pk, name, permissions); - let address = NetworkAddress::from_register_address(*register.address()); - - let entries = register - .read() - .into_iter() - .map(|(entry_hash, _value)| entry_hash) - .collect(); - register - .write(value.into(), &entries, &owner) - .map_err(RegisterError::Write)?; - - let _payment_result = self - .pay(std::iter::once(register.address().xorname()), wallet) - .await?; - - let (payment, payee) = - self.get_recent_payment_for_addr(®ister.address().xorname(), wallet)?; - - let signed_register = register - .clone() - .into_signed(&owner) - .map_err(RegisterError::CouldNotSign)?; - - let record = Record { - key: address.to_record_key(), - value: try_serialize_record( - &(payment, &signed_register), - RecordKind::RegisterWithPayment, - ) - .map_err(|_| RegisterError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: None, - use_put_record_to: Some(vec![payee]), - verification: None, - }; - - self.network().put_record(record, &put_cfg).await?; - - Ok(Register { - inner: signed_register, - }) - } -} diff --git a/autonomi/src/native/client/transfers.rs b/autonomi/src/native/client/transfers.rs deleted file mode 100644 index 0117631386..0000000000 --- a/autonomi/src/native/client/transfers.rs +++ /dev/null @@ -1,327 +0,0 @@ -use crate::client::ClientWrapper; -use crate::native::client::NativeClient; -use crate::native::wallet; -use crate::native::wallet::MemWallet; -use crate::VERIFY_STORE; -use libp2p::{ - futures::future::join_all, - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::{ - GetRecordCfg, GetRecordError, Network, NetworkError, PutRecordCfg, VerificationKind, -}; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind, RetryStrategy, SpendAddress}, - NetworkAddress, PrettyPrintRecordKey, -}; -use sn_transfers::CashNote; -use sn_transfers::Payment; -use sn_transfers::UniquePubkey; -use sn_transfers::{HotWallet, SignedSpend}; -use sn_transfers::{MainPubkey, NanoTokens}; -use sn_transfers::{SpendReason, Transfer}; -use std::collections::BTreeSet; -use std::collections::HashSet; -use xor_name::XorName; - -#[derive(Debug, thiserror::Error)] -pub enum SendSpendsError { - /// The cashnotes that were attempted to be spent have already been spent to another address - #[error("Double spend attempted with cashnotes: {0:?}")] - DoubleSpendAttemptedForCashNotes(BTreeSet), - /// A general error when a transfer fails - #[error("Failed to send tokens due to {0}")] - CouldNotSendMoney(String), -} - -#[derive(Debug, thiserror::Error)] -pub enum TransferError { - #[error("Failed to send tokens due to {0}")] - CouldNotSendMoney(String), - #[error("Wallet error: {0:?}")] - WalletError(#[from] wallet::error::WalletError), - #[error("Network error: {0:?}")] - NetworkError(#[from] NetworkError), -} - -#[derive(Debug, thiserror::Error)] -pub enum CashNoteError { - #[error("CashNote was already spent.")] - AlreadySpent, - #[error("Failed to get spend: {0:?}")] - FailedToGetSpend(String), -} - -#[derive(Debug, thiserror::Error)] -pub enum SendError { - #[error("CashNote amount unexpected: {0}")] - CashNoteAmountUnexpected(String), - #[error("CashNote has no parent spends.")] - CashNoteHasNoParentSpends, - #[error("Wallet error occurred during sending of transfer.")] - WalletError(#[from] wallet::error::WalletError), - #[error("Encountered transfer error during sending.")] - TransferError(#[from] sn_transfers::TransferError), - #[error("Spends error: {0:?}")] - SpendsError(#[from] SendSpendsError), -} - -#[derive(Debug, thiserror::Error)] -pub enum ReceiveError { - #[error("Could not deserialize `Transfer`.")] - TransferDeserializationFailed, - #[error("Transfer error occurred during receiving.")] - TransferError(#[from] TransferError), -} - -// Hide these from the docs. -#[doc(hidden)] -impl NativeClient { - /// Send spend requests to the network. - pub async fn send_spends( - &self, - spend_requests: impl Iterator, - ) -> Result<(), SendSpendsError> { - let mut tasks = Vec::new(); - - // send spends to the network in parralel - for spend_request in spend_requests { - tracing::debug!( - "sending spend request to the network: {:?}: {spend_request:#?}", - spend_request.unique_pubkey() - ); - - let the_task = async move { - let cash_note_key = spend_request.unique_pubkey(); - let result = store_spend(self.network().clone(), spend_request.clone()).await; - - (cash_note_key, result) - }; - tasks.push(the_task); - } - - // wait for all the tasks to complete and gather the errors - let mut errors = Vec::new(); - let mut double_spent_keys = BTreeSet::new(); - for (spend_key, spend_attempt_result) in join_all(tasks).await { - match spend_attempt_result { - Err(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordDoesNotMatch(_), - )) - | Err(sn_networking::NetworkError::GetRecordError(GetRecordError::SplitRecord { - .. - })) => { - tracing::warn!( - "Double spend detected while trying to spend: {:?}", - spend_key - ); - double_spent_keys.insert(*spend_key); - } - Err(e) => { - tracing::warn!( - "Spend request errored out when sent to the network {spend_key:?}: {e}" - ); - errors.push((spend_key, e)); - } - Ok(()) => { - tracing::trace!( - "Spend request was successfully sent to the network: {spend_key:?}" - ); - } - } - } - - // report errors accordingly - // double spend errors in priority as they should be dealt with by the wallet - if !double_spent_keys.is_empty() { - return Err(SendSpendsError::DoubleSpendAttemptedForCashNotes( - double_spent_keys, - )); - } - if !errors.is_empty() { - let mut err_report = "Failed to send spend requests to the network:".to_string(); - for (spend_key, e) in &errors { - tracing::warn!("Failed to send spend request to the network: {spend_key:?}: {e}"); - err_report.push_str(&format!("{spend_key:?}: {e}")); - } - return Err(SendSpendsError::CouldNotSendMoney(err_report)); - } - - Ok(()) - } - - /// Resend failed transactions. This can optionally verify the store has been successful. - /// This will attempt to GET the cash_note from the network. - pub(super) async fn resend_pending_transactions(&mut self, wallet: &mut HotWallet) { - if wallet.unconfirmed_spend_requests().is_empty() { - return; - } - - if self - .send_spends(wallet.unconfirmed_spend_requests().iter()) - .await - .is_ok() - { - wallet.clear_confirmed_spend_requests(); - } - } - - /// Deposits all valid `CashNotes` from a transfer into a wallet. - pub(super) async fn receive_transfer( - &self, - transfer: Transfer, - wallet: &mut MemWallet, - ) -> Result<(), TransferError> { - let cash_note_redemptions = wallet - .unwrap_transfer(&transfer) - .map_err(TransferError::WalletError)?; - - let cash_notes = self - .network() - .verify_cash_notes_redemptions(wallet.address(), &cash_note_redemptions) - .await?; - - for cash_note in cash_notes { - match self.verify_if_cash_note_is_valid(&cash_note).await { - Ok(_) => wallet.deposit_cash_note(cash_note)?, - Err(e) => { - tracing::warn!("Error verifying CashNote: {}", e); - } - } - } - - Ok(()) - } - - /// Verify if a `CashNote` is unspent. - pub(super) async fn verify_if_cash_note_is_valid( - &self, - cash_note: &CashNote, - ) -> Result<(), CashNoteError> { - let pk = cash_note.unique_pubkey(); - let addr = SpendAddress::from_unique_pubkey(&pk); - - match self.network().get_spend(addr).await { - // if we get a RecordNotFound, it means the CashNote is not spent, which is good - Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)) => Ok(()), - // if we get a spend, it means the CashNote is already spent - Ok(_) => Err(CashNoteError::AlreadySpent), - // report all other errors - Err(e) => Err(CashNoteError::FailedToGetSpend(format!("{e}"))), - } - } - - /// Returns the most recent cached Payment for a provided NetworkAddress. This function does not check if the - /// quote has expired or not. Use get_non_expired_payment_for_addr if you want to get a non expired one. - /// - /// If multiple payments have been made to the same address, then we pick the last one as it is the most recent. - pub fn get_recent_payment_for_addr( - &self, - xor_name: &XorName, - wallet: &mut HotWallet, - ) -> Result<(Payment, PeerId), sn_transfers::WalletError> { - let payment_detail = wallet.api().get_recent_payment(xor_name)?; - - let payment = payment_detail.to_payment(); - let peer_id = PeerId::from_bytes(&payment_detail.peer_id_bytes) - .expect("payment detail should have a valid peer id"); - - Ok((payment, peer_id)) - } - - /// Creates a `Transfer` that can be received by the receiver. - /// Once received, it will be turned into a `CashNote` that the receiver can spend. - pub async fn send( - &mut self, - to: MainPubkey, - amount_in_nano: NanoTokens, - reason: Option, - wallet: &mut MemWallet, - ) -> Result { - let signed_transaction = - wallet.create_signed_transaction(vec![(amount_in_nano, to)], reason)?; - - // return the first CashNote (assuming there is only one because we only sent to one recipient) - let cash_note_for_recipient = match &signed_transaction.output_cashnotes[..] { - [cash_note] => Ok(cash_note), - [_multiple, ..] => Err(SendError::CashNoteAmountUnexpected( - "Got multiple, expected 1.".into(), - )), - [] => Err(SendError::CashNoteAmountUnexpected( - "Got 0, expected 1.".into(), - )), - }?; - - let transfer = Transfer::transfer_from_cash_note(cash_note_for_recipient) - .map_err(SendError::TransferError)?; - - self.send_spends(signed_transaction.spends.iter()).await?; - - wallet.process_signed_transaction(signed_transaction.clone()); - - for spend in &signed_transaction.spends { - wallet.add_pending_spend(spend.clone()); - } - - Ok(transfer) - } - - /// Receive a `CashNoteRedemption` through a transfer message. - pub async fn receive( - &self, - transfer_hex: &str, - wallet: &mut MemWallet, - ) -> Result<(), ReceiveError> { - let transfer = Transfer::from_hex(transfer_hex) - .map_err(|_| ReceiveError::TransferDeserializationFailed)?; - self.receive_transfer(transfer, wallet).await?; - Ok(()) - } -} - -/// Send a `SpendCashNote` request to the network. -async fn store_spend(network: Network, spend: SignedSpend) -> Result<(), NetworkError> { - let unique_pubkey = *spend.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let key = network_address.to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&key); - tracing::trace!("Sending spend {unique_pubkey:?} to the network via put_record, with addr of {cash_note_addr:?} - {pretty_key:?}"); - let record_kind = RecordKind::Spend; - let record = Record { - key, - value: try_serialize_record(&[spend], record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let (record_to_verify, expected_holders) = if VERIFY_STORE { - let expected_holders: HashSet<_> = network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - (Some(record.clone()), expected_holders) - } else { - (None, Default::default()) - }; - - // When there is retry on Put side, no need to have a retry on Get - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: record_to_verify, - expected_holders, - is_register: false, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Persistent), - use_put_record_to: None, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - network.put_record(record, &put_cfg).await -} diff --git a/autonomi/src/native/client/vault.rs b/autonomi/src/native/client/vault.rs deleted file mode 100644 index bfa5f6d31d..0000000000 --- a/autonomi/src/native/client/vault.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::client::data::PutError; -use crate::client::vault::Vault; -use crate::client::ClientWrapper; -use crate::native::client::NativeClient; -use bytes::Bytes; -use libp2p::kad::{Quorum, Record}; -use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; -use sn_protocol::storage::{try_serialize_record, RecordKind, RetryStrategy, Scratchpad}; -use sn_transfers::HotWallet; -use std::collections::HashSet; -use tracing::info; - -impl Vault for NativeClient {} - -impl NativeClient { - /// Put data into the client's VaultPacket - /// - /// Returns Ok(None) early if no vault packet is defined. - /// - /// Pays for a new VaultPacket if none yet created for the client. Returns the current version - /// of the data on success. - pub async fn write_bytes_to_vault_if_defined( - &mut self, - data: Bytes, - wallet: &mut HotWallet, - ) -> Result, PutError> { - // Exit early if no vault packet defined - let Some(client_sk) = self.client().vault_secret_key.as_ref() else { - return Ok(None); - }; - - let client_pk = client_sk.public_key(); - - let pad_res = self.get_vault_from_network().await; - - let mut is_new = true; - let mut scratch = if let Ok(existing_data) = pad_res { - tracing::info!("Scratchpad already exists, returning existing data"); - - info!( - "scratch already exists, is version {:?}", - existing_data.count() - ); - - is_new = false; - existing_data - } else { - tracing::trace!("new scratchpad creation"); - Scratchpad::new(client_pk) - }; - - let next_count = scratch.update_and_sign(data, client_sk); - let scratch_address = scratch.network_address(); - let scratch_key = scratch_address.to_record_key(); - - let record = if is_new { - self.pay( - [&scratch_address].iter().filter_map(|f| f.as_xorname()), - wallet, - ) - .await?; - - let (payment, _payee) = self.get_recent_payment_for_addr( - &scratch_address.as_xorname().ok_or(PutError::VaultXorName)?, - wallet, - )?; - - Record { - key: scratch_key, - value: try_serialize_record(&(payment, scratch), RecordKind::ScratchpadWithPayment) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - } else { - Record { - key: scratch_key, - value: try_serialize_record(&scratch, RecordKind::Scratchpad) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - } - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: None, - verification: Some(( - VerificationKind::Network, - GetRecordCfg { - get_quorum: Quorum::Majority, - retry_strategy: None, - target_record: None, - expected_holders: HashSet::new(), - is_register: false, - }, - )), - }; - - self.network().put_record(record, &put_cfg).await?; - - Ok(Some(next_count)) - } -} diff --git a/autonomi/src/native/mod.rs b/autonomi/src/native/mod.rs deleted file mode 100644 index 099e38490e..0000000000 --- a/autonomi/src/native/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub use crate::client::Client; - -pub mod client; -#[cfg(feature = "transfers")] -pub mod wallet; diff --git a/autonomi/src/native/wallet/error.rs b/autonomi/src/native/wallet/error.rs deleted file mode 100644 index 5708f21501..0000000000 --- a/autonomi/src/native/wallet/error.rs +++ /dev/null @@ -1,15 +0,0 @@ -#[derive(Debug, thiserror::Error)] -pub enum WalletError { - /// Happens when a wallet is trying to decrypt a transfer that was meant for another wallet. - #[error("Failed to decrypt transfer with our key, maybe it was not for us")] - FailedToDecryptTransfer, - /// Error when attempting to transfer 0 tokens - #[error("The transfer amount must be more than 0")] - TransferAmountZero, - #[error("Could not get value of `CashNote` as no outputs are found.")] - CashNoteOutputNotFound, - #[error("This `CashNote` is not destined for this wallet.")] - CashNoteNotOwned, - #[error(transparent)] - TransferError(#[from] sn_transfers::TransferError), -} diff --git a/autonomi/src/native/wallet/mod.rs b/autonomi/src/native/wallet/mod.rs deleted file mode 100644 index 941b7abf90..0000000000 --- a/autonomi/src/native/wallet/mod.rs +++ /dev/null @@ -1,151 +0,0 @@ -pub mod error; - -use crate::native::wallet::error::WalletError; -use sn_transfers::{ - CashNote, CashNoteRedemption, DerivationIndex, MainPubkey, NanoTokens, SignedSpend, - SignedTransaction, SpendReason, Transfer, UniquePubkey, UnsignedTransaction, -}; -use sn_transfers::{HotWallet, MainSecretKey}; -use std::collections::{BTreeMap, HashSet}; -use std::path::PathBuf; - -pub struct MemWallet { - hot_wallet: HotWallet, - available_cash_notes: BTreeMap, -} - -impl MemWallet { - /// Create an empty wallet from a main secret key. - #[allow(dead_code)] - fn from_main_secret_key(main_secret_key: MainSecretKey) -> Self { - Self { - hot_wallet: HotWallet::new(main_secret_key, PathBuf::default()), - available_cash_notes: Default::default(), - } - } - - // TODO: as WASM can not save a wallet state to disk or load from disk -- we need to provide a wallet state manually. - /// Initialise a wallet from wallet state bytes containing all payments, (un)confirmed spends, cash notes and the secret key. - #[allow(dead_code)] - fn from_state_bytes>(_data: T) -> Self { - todo!() - } - - /// Returns the entire wallet state as bytes. That includes all payments (un)confirmed spends, cash notes and the secret key. - /// A wallet can be fully initialised again from these state bytes. - #[allow(dead_code)] - fn to_state_bytes(&self) -> Vec { - todo!() - } - - /// Returns the wallet address (main public key). - pub fn address(&self) -> MainPubkey { - self.hot_wallet.address() - } - - /// Returns the balance of a wallet in Nanos. - pub fn balance(&self) -> NanoTokens { - self.hot_wallet.balance() - } - - pub(super) fn unwrap_transfer( - &self, - transfer: &Transfer, - ) -> Result, WalletError> { - self.hot_wallet - .unwrap_transfer(transfer) - .map_err(|_| WalletError::FailedToDecryptTransfer) - } - - /// Returns all available `CashNotes` together with their secret key to spend them. - pub(super) fn cash_notes_with_secret_keys(&mut self) -> Vec { - self.available_cash_notes.values().cloned().collect() - } - - pub(super) fn create_signed_transaction( - &mut self, - outputs: Vec<(NanoTokens, MainPubkey)>, - reason: Option, - ) -> Result { - for output in &outputs { - if output.0.is_zero() { - return Err(WalletError::TransferAmountZero); - } - } - - let mut rng = &mut rand::rngs::OsRng; - - // create a unique key for each output - let to_unique_keys: Vec<_> = outputs - .into_iter() - .map(|(amount, address)| (amount, address, DerivationIndex::random(&mut rng), false)) - .collect(); - - let cash_notes_with_keys = self.cash_notes_with_secret_keys(); - let reason = reason.unwrap_or_default(); - - let unsigned_transaction = - UnsignedTransaction::new(cash_notes_with_keys, to_unique_keys, self.address(), reason)?; - let signed_transaction = unsigned_transaction.sign(self.hot_wallet.key())?; - - Ok(signed_transaction) - } - - fn mark_cash_notes_as_spent<'a, T: IntoIterator>( - &mut self, - unique_pubkeys: T, - ) { - let unique_pubkeys: Vec<&'a UniquePubkey> = unique_pubkeys.into_iter().collect(); - - for unique_pubkey in &unique_pubkeys { - let _ = self.available_cash_notes.remove(unique_pubkey); - } - - self.hot_wallet - .wo_wallet_mut() - .mark_notes_as_spent(unique_pubkeys); - } - - pub(super) fn deposit_cash_note(&mut self, cash_note: CashNote) -> Result<(), WalletError> { - if cash_note - .derived_pubkey(&self.hot_wallet.key().main_pubkey()) - .is_err() - { - return Err(WalletError::CashNoteNotOwned); - } - - self.available_cash_notes - .insert(cash_note.unique_pubkey(), cash_note.clone()); - - // DevNote: the deposit fn already does the checks above, - // but I have added them here just in case we get rid - // of the composited hotwallet and its deposit checks - self.hot_wallet - .wo_wallet_mut() - .deposit(&[cash_note]) - .map_err(|_| WalletError::CashNoteOutputNotFound)?; - - Ok(()) - } - - pub(super) fn add_pending_spend(&mut self, spend: SignedSpend) { - self.hot_wallet - .unconfirmed_spend_requests_mut() - .insert(spend); - } - - // TODO: should we verify if the transfer is valid and destined for this wallet? - pub(super) fn process_signed_transaction(&mut self, transfer: SignedTransaction) { - let spent_unique_pubkeys: HashSet<_> = transfer - .spends - .iter() - .map(|spend| spend.unique_pubkey()) - .collect(); - - self.mark_cash_notes_as_spent(spent_unique_pubkeys); - - if let Some(cash_note) = transfer.change_cashnote { - let _ = self.deposit_cash_note(cash_note); - } - } -} diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs new file mode 100644 index 0000000000..15ab0cc7a6 --- /dev/null +++ b/autonomi/tests/common.rs @@ -0,0 +1,78 @@ +use bytes::Bytes; +use const_hex::ToHexExt; +use evmlib::CustomNetwork; +use libp2p::Multiaddr; +use rand::Rng; +use sn_peers_acquisition::parse_peer_addr; +use std::env; + +fn get_var_or_panic(var: &str) -> String { + env::var(var).unwrap_or_else(|_| panic!("{var} environment variable needs to be set")) +} + +#[allow(dead_code)] +pub fn gen_random_data(len: usize) -> Bytes { + let mut data = vec![0u8; len]; + rand::thread_rng().fill(&mut data[..]); + Bytes::from(data) +} + +#[allow(dead_code)] +/// Enable logging for tests. E.g. use `RUST_LOG=autonomi` to see logs. +pub fn enable_logging() { + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); +} + +#[allow(dead_code)] +/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. +/// +/// An empty `Vec` will be returned if the env var is not set. +pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { + let Ok(peers_str) = env::var("SAFE_PEERS") else { + return Ok(vec![]); + }; + + peers_str.split(',').map(parse_peer_addr).collect() +} + +pub fn evm_network_from_env() -> evmlib::Network { + let evm_network = env::var("EVM_NETWORK").ok(); + let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); + + let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { + ( + evmlib::Network::ArbitrumOne.rpc_url().to_string(), + evmlib::Network::ArbitrumOne + .payment_token_address() + .encode_hex_with_prefix(), + evmlib::Network::ArbitrumOne + .chunk_payments_address() + .encode_hex_with_prefix(), + ) + } else { + ( + get_var_or_panic("RPC_URL"), + get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), + get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), + ) + }; + + evmlib::Network::Custom(CustomNetwork::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )) +} + +pub fn evm_wallet_from_env_or_default(network: evmlib::Network) -> evmlib::wallet::Wallet { + // Default deployer wallet of the testnet. + const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + + let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + + evmlib::wallet::Wallet::new_from_private_key(network, &private_key) + .expect("Invalid private key") +} diff --git a/autonomi/tests/common/mod.rs b/autonomi/tests/common/mod.rs deleted file mode 100644 index 21dc3b8a98..0000000000 --- a/autonomi/tests/common/mod.rs +++ /dev/null @@ -1,189 +0,0 @@ -#![allow(dead_code)] - -use std::path::Path; - -use bip39::Mnemonic; -use bls::SecretKey; -use bytes::Bytes; -use const_hex::ToHexExt; -use curv::elliptic::curves::ECScalar as _; -use evmlib::CustomNetwork; -use libp2p::Multiaddr; -use rand::{Rng, RngCore}; -use sn_peers_acquisition::parse_peer_addr; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; -use std::env; - -const MNEMONIC_FILENAME: &str = "account_secret"; -const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; -const ACCOUNT_WALLET_DERIVATION: &str = "m/2/0"; -const DEFAULT_WALLET_DERIVIATION_PASSPHRASE: &str = "default"; - -fn get_var_or_panic(var: &str) -> String { - env::var(var).expect(&format!("{var} environment variable needs to be set")) -} - -/// Load a account from disk, with wallet, or create a new one using the mnemonic system -fn load_account_wallet_or_create_with_mnemonic( - root_dir: &Path, - derivation_passphrase: Option<&str>, -) -> Result> { - let wallet = HotWallet::load_from(root_dir); - - match wallet { - Ok(wallet) => Ok(wallet), - Err(error) => { - tracing::warn!("Issue loading wallet, creating a new one: {error}"); - - let mnemonic = load_or_create_mnemonic(root_dir)?; - let wallet = - secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?; - - Ok(HotWallet::create_from_key(root_dir, wallet, None)?) - } - } -} - -/// When launching a testnet locally, we can use the faucet wallet. -pub fn load_hot_wallet_from_faucet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("faucet wallet should be available for tests") -} - -pub fn gen_random_data(len: usize) -> Bytes { - let mut data = vec![0u8; len]; - rand::thread_rng().fill(&mut data[..]); - Bytes::from(data) -} - -/// Enable logging for tests. E.g. use `RUST_LOG=autonomi` to see logs. -pub fn enable_logging() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); -} - -/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. -/// -/// An empty `Vec` will be returned if the env var is not set. -pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { - let Ok(peers_str) = env::var("SAFE_PEERS") else { - return Ok(vec![]); - }; - - peers_str.split(',').map(parse_peer_addr).collect() -} - -fn load_or_create_mnemonic(root_dir: &Path) -> Result> { - match read_mnemonic_from_disk(root_dir) { - Ok(mnemonic) => { - tracing::info!("Using existing mnemonic from {root_dir:?}"); - Ok(mnemonic) - } - Err(error) => { - tracing::warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}"); - let mnemonic = random_eip2333_mnemonic()?; - write_mnemonic_to_disk(root_dir, &mnemonic)?; - Ok(mnemonic) - } - } -} - -fn secret_key_from_mnemonic( - mnemonic: Mnemonic, - derivation_passphrase: Option, -) -> Result> { - let passphrase = - derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned()); - account_wallet_secret_key(mnemonic, &passphrase) -} - -fn create_faucet_account_and_wallet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - - println!("Loading faucet wallet... {root_dir:#?}"); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("Faucet wallet shall be created successfully.") -} - -pub fn write_mnemonic_to_disk( - files_dir: &Path, - mnemonic: &Mnemonic, -) -> Result<(), Box> { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = mnemonic.to_string(); - std::fs::write(filename, content)?; - Ok(()) -} - -pub(super) fn read_mnemonic_from_disk( - files_dir: &Path, -) -> Result> { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = std::fs::read_to_string(filename)?; - let mnemonic = Mnemonic::parse_normalized(&content)?; - Ok(mnemonic) -} - -fn random_eip2333_mnemonic() -> Result> { - let mut entropy = [1u8; 32]; - let rng = &mut rand::rngs::OsRng; - rng.fill_bytes(&mut entropy); - let mnemonic = Mnemonic::from_entropy(&entropy)?; - Ok(mnemonic) -} - -/// Derive a wallet secret key from the mnemonic for the account. -fn account_wallet_secret_key( - mnemonic: bip39::Mnemonic, - passphrase: &str, -) -> Result> { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = eip2333::derive_master_sk(&seed)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_WALLET_DERIVATION); - let key_bytes = derived_key.serialize(); - let sk = SecretKey::from_bytes(key_bytes.into())?; - Ok(MainSecretKey::new(sk)) -} - -pub fn evm_network_from_env() -> evmlib::Network { - let evm_network = env::var("EVM_NETWORK").ok(); - let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); - - let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { - ( - evmlib::Network::ArbitrumOne.rpc_url().to_string(), - evmlib::Network::ArbitrumOne - .payment_token_address() - .encode_hex_with_prefix(), - evmlib::Network::ArbitrumOne - .chunk_payments_address() - .encode_hex_with_prefix(), - ) - } else { - ( - get_var_or_panic("RPC_URL"), - get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), - get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), - ) - }; - - evmlib::Network::Custom(CustomNetwork::new( - &rpc_url, - &payment_token_address, - &chunk_payments_address, - )) -} - -pub fn evm_wallet_from_env_or_default(network: evmlib::Network) -> evmlib::wallet::Wallet { - // Default deployer wallet of the testnet. - const DEFAULT_WALLET_PRIVATE_KEY: &str = - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - - let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); - - evmlib::wallet::Wallet::new_from_private_key(network, &private_key) - .expect("Invalid private key") -} diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs deleted file mode 100644 index 6a85ff3f07..0000000000 --- a/autonomi/tests/evm/file.rs +++ /dev/null @@ -1,82 +0,0 @@ -#[cfg(feature = "evm-payments")] -mod test { - - use crate::common; - use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; - use crate::evm::Client; - use bytes::Bytes; - use eyre::bail; - use std::time::Duration; - use tokio::time::sleep; - - #[tokio::test] - async fn file() -> Result<(), Box> { - common::enable_logging(); - - let network = evm_network_from_env(); - let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = evm_wallet_from_env_or_default(network); - - // let data = common::gen_random_data(1024 * 1024 * 1000); - // let user_key = common::gen_random_data(32); - - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) - .await?; - - sleep(Duration::from_secs(10)).await; - - let root_fetched = client.fetch_root(addr).await?; - - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); - - Ok(()) - } - - #[cfg(feature = "vault")] - #[tokio::test] - async fn file_into_vault() -> eyre::Result<()> { - common::enable_logging(); - - let network = evm_network_from_env(); - - let mut client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - - let mut wallet = evm_wallet_from_env_or_default(network); - - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) - .await?; - sleep(Duration::from_secs(2)).await; - - let root_fetched = client.fetch_root(addr).await?; - - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); - - // now assert over the stored account packet - let new_client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - - if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { - let ap_root_fetched = Client::deserialise_root(ap)?; - - assert_eq!( - root.map, ap_root_fetched.map, - "root fetched should match root put" - ); - } else { - bail!("No account packet found"); - } - - Ok(()) - } -} diff --git a/autonomi/tests/evm/mod.rs b/autonomi/tests/evm/mod.rs deleted file mode 100644 index cdddaa504a..0000000000 --- a/autonomi/tests/evm/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(feature = "files")] -mod file; -#[cfg(feature = "data")] -mod put; -#[cfg(feature = "registers")] -mod register; -mod wallet; diff --git a/autonomi/tests/native/file.rs b/autonomi/tests/file.rs similarity index 66% rename from autonomi/tests/native/file.rs rename to autonomi/tests/file.rs index f6b5c30f35..952f859631 100644 --- a/autonomi/tests/native/file.rs +++ b/autonomi/tests/file.rs @@ -1,24 +1,30 @@ -use std::time::Duration; +mod common; -use crate::common; -use crate::native::Client; +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use autonomi::Client; +#[cfg(feature = "vault")] use bytes::Bytes; -use eyre::{bail, Result}; +#[cfg(feature = "vault")] +use eyre::bail; +use std::time::Duration; use tokio::time::sleep; +#[cfg(feature = "files")] #[tokio::test] async fn file() -> Result<(), Box> { common::enable_logging(); - let mut client = Client::connect(&common::peers_from_env()?).await?; - let mut wallet = common::load_hot_wallet_from_faucet(); + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let wallet = evm_wallet_from_env_or_default(network); // let data = common::gen_random_data(1024 * 1024 * 1000); // let user_key = common::gen_random_data(32); let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .upload_from_dir("tests/file/test_dir".into(), &wallet) .await?; + sleep(Duration::from_secs(10)).await; let root_fetched = client.fetch_root(addr).await?; @@ -31,19 +37,21 @@ async fn file() -> Result<(), Box> { Ok(()) } -// files and vault feats -#[cfg(feature = "vault")] +#[cfg(all(feature = "vault", feature = "files"))] #[tokio::test] -async fn file_into_vault() -> Result<()> { +async fn file_into_vault() -> eyre::Result<()> { common::enable_logging(); + let network = evm_network_from_env(); + let mut client = Client::connect(&[]) .await? .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - let mut wallet = common::load_hot_wallet_from_faucet(); + + let wallet = evm_wallet_from_env_or_default(network); let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .upload_from_dir("tests/file/test_dir".into(), &wallet) .await?; sleep(Duration::from_secs(2)).await; @@ -60,7 +68,7 @@ async fn file_into_vault() -> Result<()> { .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { - let ap_root_fetched = Client::deserialise_root(ap)?; + let ap_root_fetched = Client::deserialize_root(ap)?; assert_eq!( root.map, ap_root_fetched.map, diff --git a/autonomi/tests/integration.rs b/autonomi/tests/integration.rs deleted file mode 100644 index 386c36ec91..0000000000 --- a/autonomi/tests/integration.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) mod common; -#[cfg(feature = "evm-payments")] -mod evm; -#[cfg(feature = "native-payments")] -mod native; diff --git a/autonomi/tests/native/mod.rs b/autonomi/tests/native/mod.rs deleted file mode 100644 index 620b16f77f..0000000000 --- a/autonomi/tests/native/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -use autonomi; - -#[cfg(feature = "files")] -mod file; -#[cfg(feature = "data")] -mod put; -#[cfg(feature = "registers")] -mod register; - -pub type Client = autonomi::native::client::NativeClient; diff --git a/autonomi/tests/native/put.rs b/autonomi/tests/native/put.rs deleted file mode 100644 index f9c1bb2dde..0000000000 --- a/autonomi/tests/native/put.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::time::Duration; - -use crate::common; -use crate::native::Client; -use tokio::time::sleep; - -#[tokio::test] -async fn put() { - common::enable_logging(); - - let mut client = Client::connect(&common::peers_from_env().unwrap()) - .await - .unwrap(); - let mut wallet = common::load_hot_wallet_from_faucet(); - let data = common::gen_random_data(1024 * 1024 * 10); - - // let quote = client.quote(data.clone()).await.unwrap(); - // let payment = client.pay(quote, &mut wallet).await.unwrap(); - let addr = client.put(data.clone(), &mut wallet).await.unwrap(); - - sleep(Duration::from_secs(2)).await; - - let data_fetched = client.get(addr).await.unwrap(); - assert_eq!(data, data_fetched, "data fetched should match data put"); -} diff --git a/autonomi/tests/native/register.rs b/autonomi/tests/native/register.rs deleted file mode 100644 index 97355149a9..0000000000 --- a/autonomi/tests/native/register.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::time::Duration; - -use crate::common; -use crate::native::Client; -use bytes::Bytes; -use tokio::time::sleep; -use xor_name::XorName; - -#[tokio::test] -async fn register() { - common::enable_logging(); - - let mut client = Client::connect(&common::peers_from_env().unwrap()) - .await - .unwrap(); - let mut wallet = common::load_hot_wallet_from_faucet(); - - // Owner key of the register. - let key = bls::SecretKey::random(); - - // Create a register with the value [1, 2, 3, 4] - let register = client - .create_register( - vec![1, 2, 3, 4].into(), - XorName::random(&mut rand::thread_rng()), - key.clone(), - &mut wallet, - ) - .await - .unwrap(); - - sleep(Duration::from_secs(2)).await; - - // Fetch the register again - let register = client.fetch_register(*register.address()).await.unwrap(); - - // Update the register with the value [5, 6, 7, 8] - client - .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) - .await - .unwrap(); - - sleep(Duration::from_secs(2)).await; - - // Fetch and verify the register contains the updated value - let register = client.fetch_register(*register.address()).await.unwrap(); - assert_eq!(register.values(), vec![Bytes::from(vec![5, 6, 7, 8])]); -} diff --git a/autonomi/tests/evm/put.rs b/autonomi/tests/put.rs similarity index 80% rename from autonomi/tests/evm/put.rs rename to autonomi/tests/put.rs index 9d6a236f85..ad6e95e076 100644 --- a/autonomi/tests/evm/put.rs +++ b/autonomi/tests/put.rs @@ -1,23 +1,25 @@ +mod common; + use std::time::Duration; -use crate::common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; use tokio::time::sleep; +#[cfg(feature = "data")] #[tokio::test] async fn put() { common::enable_logging(); let network = evm_network_from_env(); let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = evm_wallet_from_env_or_default(network); + let wallet = evm_wallet_from_env_or_default(network); let data = common::gen_random_data(1024 * 1024 * 10); // let quote = client.quote(data.clone()).await.unwrap(); // let payment = client.pay(quote, &mut wallet).await.unwrap(); - let addr = client.put(data.clone(), &mut wallet).await.unwrap(); + let addr = client.put(data.clone(), &wallet).await.unwrap(); sleep(Duration::from_secs(10)).await; diff --git a/autonomi/tests/evm/register.rs b/autonomi/tests/register.rs similarity index 89% rename from autonomi/tests/evm/register.rs rename to autonomi/tests/register.rs index 71aada72f8..8d39428c7d 100644 --- a/autonomi/tests/evm/register.rs +++ b/autonomi/tests/register.rs @@ -1,19 +1,21 @@ +mod common; + use std::time::Duration; -use crate::common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; -use crate::evm::Client; +use autonomi::Client; use bytes::Bytes; use tokio::time::sleep; use xor_name::XorName; +#[cfg(feature = "registers")] #[tokio::test] async fn register() { common::enable_logging(); let network = evm_network_from_env(); let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = evm_wallet_from_env_or_default(network); + let wallet = evm_wallet_from_env_or_default(network); // Owner key of the register. let key = bls::SecretKey::random(); @@ -24,7 +26,7 @@ async fn register() { vec![1, 2, 3, 4].into(), XorName::random(&mut rand::thread_rng()), key.clone(), - &mut wallet, + &wallet, ) .await .unwrap(); diff --git a/autonomi/tests/evm/wallet.rs b/autonomi/tests/wallet.rs similarity index 98% rename from autonomi/tests/evm/wallet.rs rename to autonomi/tests/wallet.rs index 3bb4972c03..5fb852921e 100644 --- a/autonomi/tests/evm/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -1,3 +1,5 @@ +mod common; + use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use const_hex::traits::FromHex; use evmlib::common::{Address, Amount}; From b0c2ae31a38ea6640ae0cfd1fca62c4f8291320a Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 27 Sep 2024 11:42:54 +0200 Subject: [PATCH 077/255] feat(autonomi): retry and verify put --- autonomi/src/client/data.rs | 34 +++++++++++++++++++++++++++++++--- autonomi/tests/put.rs | 1 - 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index fa1114f37e..4b4b9260b5 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -12,16 +12,20 @@ use crate::{self_encryption::encrypt, Client}; use evmlib::common::{QuoteHash, QuotePayment, TxHash}; use evmlib::wallet::Wallet; use libp2p::futures; +use rand::{thread_rng, Rng}; use sn_evm::ProofOfPayment; use sn_networking::PutRecordCfg; -use sn_networking::{GetRecordCfg, Network, NetworkError, PayeeQuote}; +use sn_networking::{GetRecordCfg, Network, NetworkError, PayeeQuote, VerificationKind}; use sn_protocol::{ + messages::ChunkProof, storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind, + try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, + RecordKind, RetryStrategy, }, NetworkAddress, }; use std::collections::{BTreeMap, HashMap}; +use std::num::NonZero; /// Errors that can occur during the put operation. #[derive(Debug, thiserror::Error)] @@ -249,11 +253,35 @@ impl Client { expires: None, }; + let verification = { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(RetryStrategy::Quick), + target_record: None, + expected_holders: Default::default(), + is_register: false, + }; + + let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) + .expect("TODO") + .to_vec(); + let random_nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); + + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + }; + let put_cfg = PutRecordCfg { put_quorum: Quorum::One, retry_strategy: None, use_put_record_to: Some(vec![storing_node]), - verification: None, + verification, }; Ok(self.network.put_record(record, &put_cfg).await?) } diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index ad6e95e076..c3ad7fc0a7 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -14,7 +14,6 @@ async fn put() { let network = evm_network_from_env(); let mut client = Client::connect(&[]).await.unwrap(); let wallet = evm_wallet_from_env_or_default(network); - let data = common::gen_random_data(1024 * 1024 * 10); // let quote = client.quote(data.clone()).await.unwrap(); From 41d2b2a91ddd9aae36472e6a5299e290fb4c5fb6 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 30 Sep 2024 08:43:06 +0200 Subject: [PATCH 078/255] refactor(autonomi): proper error on serialization --- autonomi/src/client/data.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 4b4b9260b5..c1cd0ecdd7 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -263,7 +263,7 @@ impl Client { }; let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) - .expect("TODO") + .map_err(|_| PutError::Serialization)? .to_vec(); let random_nonce = thread_rng().gen::(); let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); @@ -279,7 +279,7 @@ impl Client { let put_cfg = PutRecordCfg { put_quorum: Quorum::One, - retry_strategy: None, + retry_strategy: Some(RetryStrategy::Balanced), use_put_record_to: Some(vec![storing_node]), verification, }; From df57b99255b8a75fd8ea7955c749dfbadf800f1a Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 11:58:57 +0200 Subject: [PATCH 079/255] test(autonomi): few tweaks to tests --- autonomi/tests/file.rs | 8 +++----- autonomi/tests/put.rs | 5 ++--- autonomi/tests/register.rs | 3 ++- evmlib/src/wallet.rs | 1 - 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 952f859631..d3b2ed862e 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "files")] + mod common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; @@ -9,7 +11,6 @@ use eyre::bail; use std::time::Duration; use tokio::time::sleep; -#[cfg(feature = "files")] #[tokio::test] async fn file() -> Result<(), Box> { common::enable_logging(); @@ -18,9 +19,6 @@ async fn file() -> Result<(), Box> { let mut client = Client::connect(&[]).await.unwrap(); let wallet = evm_wallet_from_env_or_default(network); - // let data = common::gen_random_data(1024 * 1024 * 1000); - // let user_key = common::gen_random_data(32); - let (root, addr) = client .upload_from_dir("tests/file/test_dir".into(), &wallet) .await?; @@ -37,7 +35,7 @@ async fn file() -> Result<(), Box> { Ok(()) } -#[cfg(all(feature = "vault", feature = "files"))] +#[cfg(feature = "vault")] #[tokio::test] async fn file_into_vault() -> eyre::Result<()> { common::enable_logging(); diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index ad6e95e076..1247a4a7c3 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "data")] + mod common; use std::time::Duration; @@ -6,7 +8,6 @@ use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; use tokio::time::sleep; -#[cfg(feature = "data")] #[tokio::test] async fn put() { common::enable_logging(); @@ -17,8 +18,6 @@ async fn put() { let data = common::gen_random_data(1024 * 1024 * 10); - // let quote = client.quote(data.clone()).await.unwrap(); - // let payment = client.pay(quote, &mut wallet).await.unwrap(); let addr = client.put(data.clone(), &wallet).await.unwrap(); sleep(Duration::from_secs(10)).await; diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 8d39428c7d..5b49394aea 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "registers")] + mod common; use std::time::Duration; @@ -8,7 +10,6 @@ use bytes::Bytes; use tokio::time::sleep; use xor_name::XorName; -#[cfg(feature = "registers")] #[tokio::test] async fn register() { common::enable_logging(); diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 69c9644240..498eb3afc2 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -266,7 +266,6 @@ pub async fn pay_for_quotes>( mod tests { use crate::common::Amount; use crate::testnet::Testnet; - use crate::utils::dummy_address; use crate::wallet::{from_private_key, Wallet}; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; use alloy::primitives::address; From 786133948c187c36cd13f97cf36b2331c0dd8e9f Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 1 Oct 2024 14:18:35 +0900 Subject: [PATCH 080/255] feat: autonomi cli --- Cargo.lock | 16 +++ Cargo.toml | 1 + autonomi_cli/Cargo.toml | 33 +++++++ autonomi_cli/README.md | 27 ++++++ autonomi_cli/src/commands.rs | 135 ++++++++++++++++++++++++++ autonomi_cli/src/commands/file.rs | 33 +++++++ autonomi_cli/src/commands/register.rs | 48 +++++++++ autonomi_cli/src/commands/vault.rs | 25 +++++ autonomi_cli/src/log_metrics.rs | 39 ++++++++ autonomi_cli/src/main.rs | 34 +++++++ autonomi_cli/src/opt.rs | 61 ++++++++++++ autonomi_cli/src/utils.rs | 85 ++++++++++++++++ 12 files changed, 537 insertions(+) create mode 100644 autonomi_cli/Cargo.toml create mode 100644 autonomi_cli/README.md create mode 100644 autonomi_cli/src/commands.rs create mode 100644 autonomi_cli/src/commands/file.rs create mode 100644 autonomi_cli/src/commands/register.rs create mode 100644 autonomi_cli/src/commands/vault.rs create mode 100644 autonomi_cli/src/log_metrics.rs create mode 100644 autonomi_cli/src/main.rs create mode 100644 autonomi_cli/src/opt.rs create mode 100644 autonomi_cli/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index d0b97edc11..0af11a7cf3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1151,6 +1151,22 @@ dependencies = [ "xor_name", ] +[[package]] +name = "autonomi_cli" +version = "0.1.0" +dependencies = [ + "autonomi", + "clap", + "color-eyre", + "dirs-next", + "indicatif", + "sn_build_info", + "sn_logging", + "sn_peers_acquisition", + "tokio", + "tracing", +] + [[package]] name = "axum" version = "0.6.20" diff --git a/Cargo.toml b/Cargo.toml index 79cc1a5945..c34946d706 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ resolver = "2" members = [ "autonomi", + "autonomi_cli", "evmlib", "evm_testnet", # "sn_auditor", diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml new file mode 100644 index 0000000000..247ec69124 --- /dev/null +++ b/autonomi_cli/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "autonomi_cli" +version = "0.1.0" +edition = "2021" + +[features] +default = ["metrics"] +local-discovery = ["sn_peers_acquisition/local-discovery"] +metrics = ["sn_logging/process-metrics"] +network-contacts = ["sn_peers_acquisition/network-contacts"] + +[dependencies] +autonomi = { path = "../autonomi", version = "0.1.0" } +clap = { version = "4.2.1", features = ["derive"] } +color-eyre = "~0.6" +dirs-next = "~2.0.0" +indicatif = { version = "0.17.5", features = ["tokio"] } +tokio = { version = "1.32.0", features = [ + "io-util", + "macros", + "parking_lot", + "rt", + "sync", + "time", + "fs", +] } +tracing = { version = "~0.1.26" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.0" } +sn_build_info = { path = "../sn_build_info", version = "0.1.11" } +sn_logging = { path = "../sn_logging", version = "0.2.33" } + +[lints] +workspace = true diff --git a/autonomi_cli/README.md b/autonomi_cli/README.md new file mode 100644 index 0000000000..b10d2128fb --- /dev/null +++ b/autonomi_cli/README.md @@ -0,0 +1,27 @@ +# A CLI for the Autonomi Network + +``` +Usage: autonomi_cli [OPTIONS] + +Commands: + file Operations related to file handling + register Operations related to register management + vault Operations related to vault management + help Print this message or the help of the given subcommand(s) + +Options: + --log-output-dest + Specify the logging output destination. [default: data-dir] + --log-format + Specify the logging format. + --peer + Peer(s) to use for bootstrap, in a 'multiaddr' format containing the peer ID [env: SAFE_PEERS=] + --timeout + The maximum duration to wait for a connection to the network before timing out + -x, --no-verify + Prevent verification of data storage on the network + -h, --help + Print help (see more with '--help') + -V, --version + Print version +``` \ No newline at end of file diff --git a/autonomi_cli/src/commands.rs b/autonomi_cli/src/commands.rs new file mode 100644 index 0000000000..f9f6e03123 --- /dev/null +++ b/autonomi_cli/src/commands.rs @@ -0,0 +1,135 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +mod file; +mod register; +mod vault; + +use clap::Subcommand; +use color_eyre::Result; + +use crate::opt::Opt; + +#[derive(Subcommand, Debug)] +pub enum SubCmd { + /// Operations related to file handling. + File { + #[command(subcommand)] + command: FileCmd, + }, + + /// Operations related to register management. + Register { + #[command(subcommand)] + command: RegisterCmd, + }, + + /// Operations related to vault management. + Vault { + #[command(subcommand)] + command: VaultCmd, + }, +} + +#[derive(Subcommand, Debug)] +pub enum FileCmd { + /// Estimate cost to upload a file. + Cost { + /// The file to estimate cost for. + file: String, + }, + + /// Upload a file and pay for it. + Upload { + /// The file to upload. + file: String, + }, + + /// Download a file from the given address. + Download { + /// The address of the file to download. + addr: String, + /// The destination file path. + dest_file: String, + }, + + /// List previous uploads + List, +} + +#[derive(Subcommand, Debug)] +pub enum RegisterCmd { + /// Estimate cost to register a name. + Cost { + /// The name to register. + name: String, + }, + + /// Create a new register with the given name and value. + Create { + /// The name of the register. + name: String, + /// The value to store in the register. + value: String, + }, + + /// Edit an existing register. + Edit { + /// The name of the register. + name: String, + /// The new value to store in the register. + value: String, + }, + + /// Get the value of a register. + Get { + /// The name of the register. + name: String, + }, + + /// List previous registers + List, +} + +#[derive(Subcommand, Debug)] +pub enum VaultCmd { + /// Estimate cost to create a vault. + Cost, + + /// Create a vault at a deterministic address based on your `SECRET_KEY`. + Create, + + /// Sync vault with the network, including registers and files. + Sync, +} + +pub fn handle_subcommand(opt: Opt) -> Result<()> { + let peers = crate::utils::get_peers(opt.peers)?; + let cmd = opt.command; + + match cmd { + SubCmd::File { command } => match command { + FileCmd::Cost { file } => file::cost(&file, peers), + FileCmd::Upload { file } => file::upload(&file, peers), + FileCmd::Download { addr, dest_file } => file::download(&addr, &dest_file, peers), + FileCmd::List => file::list(peers), + }, + SubCmd::Register { command } => match command { + RegisterCmd::Cost { name } => register::cost(&name, peers), + RegisterCmd::Create { name, value } => register::create(&name, &value, peers), + RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers), + RegisterCmd::Get { name } => register::get(&name, peers), + RegisterCmd::List => register::list(peers), + }, + SubCmd::Vault { command } => match command { + VaultCmd::Cost => vault::cost(peers), + VaultCmd::Create => vault::create(peers), + VaultCmd::Sync => vault::sync(peers), + }, + } +} diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs new file mode 100644 index 0000000000..f0c3002dc6 --- /dev/null +++ b/autonomi_cli/src/commands/file.rs @@ -0,0 +1,33 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::Context; +use color_eyre::eyre::Result; + +pub fn cost(file: &str, peers: Vec) -> Result<()> { + println!("Estimate cost to upload file: {file}"); + Ok(()) +} + +pub fn upload(file: &str, peers: Vec) -> Result<()> { + let secret_key = crate::utils::get_secret_key() + .wrap_err("The secret key is required to perform this action")?; + println!("Uploading file: {file} with secret key: {secret_key}"); + Ok(()) +} + +pub fn download(addr: &str, dest_file: &str, peers: Vec) -> Result<()> { + println!("Downloading file from {addr} to {dest_file}"); + Ok(()) +} + +pub fn list(peers: Vec) -> Result<()> { + println!("Listing previous uploads..."); + Ok(()) +} diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs new file mode 100644 index 0000000000..be36490a2c --- /dev/null +++ b/autonomi_cli/src/commands/register.rs @@ -0,0 +1,48 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::Context; +use color_eyre::eyre::Result; + +pub fn cost(name: &str, peers: Vec) -> Result<()> { + let register_key = crate::utils::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + println!("Estimate cost to register name: {name} with register key: {register_key}"); + Ok(()) +} + +pub fn create(name: &str, value: &str, peers: Vec) -> Result<()> { + let secret_key = crate::utils::get_secret_key() + .wrap_err("The secret key is required to perform this action")?; + let register_key = crate::utils::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + println!( + "Creating register: {name} with value: {value} using secret key: {secret_key} and register key: {register_key}" + ); + Ok(()) +} + +pub fn edit(name: &str, value: &str, peers: Vec) -> Result<()> { + let register_key = crate::utils::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + println!("Editing register: {name} with value: {value} using register key: {register_key}"); + Ok(()) +} + +pub fn get(name: &str, peers: Vec) -> Result<()> { + let register_key = crate::utils::get_register_signing_key() + .wrap_err("The register key is required to perform this action")?; + println!("Getting value of register: {name} with register key: {register_key}"); + Ok(()) +} + +pub fn list(peers: Vec) -> Result<()> { + println!("Listing previous registers..."); + Ok(()) +} diff --git a/autonomi_cli/src/commands/vault.rs b/autonomi_cli/src/commands/vault.rs new file mode 100644 index 0000000000..9a8d708824 --- /dev/null +++ b/autonomi_cli/src/commands/vault.rs @@ -0,0 +1,25 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::Result; + +pub fn cost(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} + +pub fn create(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} + +pub fn sync(_peers: Vec) -> Result<()> { + println!("The vault feature is coming soon!"); + Ok(()) +} diff --git a/autonomi_cli/src/log_metrics.rs b/autonomi_cli/src/log_metrics.rs new file mode 100644 index 0000000000..cc109f603f --- /dev/null +++ b/autonomi_cli/src/log_metrics.rs @@ -0,0 +1,39 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use color_eyre::Result; +#[cfg(feature = "metrics")] +use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; + +use crate::opt::Opt; + +pub fn init_logging_and_metrics(opt: &Opt) -> Result<()> { + let logging_targets = vec![ + ("sn_networking".to_string(), Level::INFO), + ("sn_build_info".to_string(), Level::TRACE), + ("autonomi".to_string(), Level::TRACE), + ("sn_logging".to_string(), Level::TRACE), + ("sn_peers_acquisition".to_string(), Level::TRACE), + ("sn_protocol".to_string(), Level::TRACE), + ("sn_registers".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), + ]; + let mut log_builder = LogBuilder::new(logging_targets); + log_builder.output_dest(opt.log_output_dest.clone()); + log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); + let _log_handles = log_builder.initialize()?; + + #[cfg(feature = "metrics")] + std::thread::spawn(|| { + let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime to spawn metrics thread"); + rt.spawn(async { + init_metrics(std::process::id()).await; + }); + }); + Ok(()) +} diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs new file mode 100644 index 0000000000..1ceef45b66 --- /dev/null +++ b/autonomi_cli/src/main.rs @@ -0,0 +1,34 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#[macro_use] +extern crate tracing; + +mod commands; +mod log_metrics; +mod opt; +mod utils; + +use clap::Parser; +use color_eyre::Result; + +use opt::Opt; + +fn main() -> Result<()> { + color_eyre::install().expect("Failed to initialise error handler"); + let opt = Opt::parse(); + log_metrics::init_logging_and_metrics(&opt).expect("Failed to initialise logging and metrics"); + + // Log the full command that was run and the git version + info!("\"{}\"", std::env::args().collect::>().join(" ")); + let version = sn_build_info::git_info(); + info!("autonomi client built with git version: {version}"); + println!("autonomi client built with git version: {version}"); + + commands::handle_subcommand(opt) +} diff --git a/autonomi_cli/src/opt.rs b/autonomi_cli/src/opt.rs new file mode 100644 index 0000000000..8f3fb20967 --- /dev/null +++ b/autonomi_cli/src/opt.rs @@ -0,0 +1,61 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::time::Duration; + +use clap::Parser; +use color_eyre::Result; +use sn_logging::{LogFormat, LogOutputDest}; +use sn_peers_acquisition::PeersArgs; + +use crate::commands::SubCmd; + +// Please do not remove the blank lines in these doc comments. +// They are used for inserting line breaks when the help menu is rendered in the UI. +#[derive(Parser)] +#[command(author, version, about, long_about = None)] +pub(crate) struct Opt { + /// Specify the logging output destination. + /// + /// Valid values are "stdout", "data-dir", or a custom path. + /// + /// `data-dir` is the default value. + /// + /// The data directory location is platform specific: + /// - Linux: $HOME/.local/share/safe/client/logs + /// - macOS: $HOME/Library/Application Support/safe/client/logs + /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs + #[allow(rustdoc::invalid_html_tags)] + #[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")] + pub log_output_dest: LogOutputDest, + + /// Specify the logging format. + /// + /// Valid values are "default" or "json". + /// + /// If the argument is not used, the default format will be applied. + #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] + pub log_format: Option, + + #[command(flatten)] + pub(crate) peers: PeersArgs, + + /// Available sub commands. + #[clap(subcommand)] + pub command: SubCmd, + + /// The maximum duration to wait for a connection to the network before timing out. + #[clap(long = "timeout", global = true, value_parser = |t: &str| -> Result { Ok(t.parse().map(Duration::from_secs)?) })] + pub connection_timeout: Option, + + /// Prevent verification of data storage on the network. + /// + /// This may increase operation speed, but offers no guarantees that operations were successful. + #[clap(global = true, long = "no-verify", short = 'x')] + pub no_verify: bool, +} diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs new file mode 100644 index 0000000000..1fc7b219e8 --- /dev/null +++ b/autonomi_cli/src/utils.rs @@ -0,0 +1,85 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use color_eyre::eyre::eyre; +use color_eyre::eyre::Context; +use color_eyre::Result; +use color_eyre::Section; +use sn_peers_acquisition::PeersArgs; +use std::env; +use std::fs; +use std::path::PathBuf; + +use sn_peers_acquisition::SAFE_PEERS_ENV; + +// NB TODO: use those as return values for the functions below +// use autonomi::register::RegisterKey; +// use autonomi::wallet::WalletKey; + +const SECRET_KEY: &str = "SECRET_KEY"; +const REGISTER_SIGNING_KEY: &str = "REGISTER_SIGNING_KEY"; + +const SECRET_KEY_FILE: &str = "secret_key"; +const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; + +pub fn get_secret_key() -> Result { + // try env var first + let why_env_failed = match env::var(SECRET_KEY) { + Ok(key) => return Ok(key), + Err(e) => e, + }; + + // try from data dir + let dir = get_client_data_dir_path() + .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY} env var"))?; + + // load the key from file + let key_path = dir.join(SECRET_KEY_FILE); + fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file".to_string()) + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY} env var or have the key in a file at {key_path:?}")) +} + +pub fn get_register_signing_key() -> Result { + // try env var first + let why_env_failed = match env::var(REGISTER_SIGNING_KEY) { + Ok(key) => return Ok(key), + Err(e) => e, + }; + + // try from data dir + let dir = get_client_data_dir_path() + .wrap_err(format!("Failed to obtain register signing key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY} env var"))?; + + // load the key from file + let key_path = dir.join(REGISTER_SIGNING_KEY_FILE); + fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file".to_string()) + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY} env var or have the key in a file at {key_path:?}")) +} + +pub fn get_client_data_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir() + .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; + home_dirs.push("safe"); + home_dirs.push("client"); + std::fs::create_dir_all(home_dirs.as_path()) + .wrap_err("Failed to create data dir".to_string())?; + Ok(home_dirs) +} + +pub fn get_peers(peers: PeersArgs) -> Result> { + let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime to spawn peers acquisition thread"); + rt.block_on(peers.get_peers()) + .wrap_err(format!("Please provide valid Network peers to connect to")) + .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) + .with_suggestion(|| format!("a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere")) +} From 1dbdb89df51303b443b55d168b469cc757c53c6d Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 1 Oct 2024 17:55:23 +0900 Subject: [PATCH 081/255] feat: cli with files upload download in place but not working --- Cargo.lock | 1 + autonomi/Cargo.toml | 1 + autonomi/src/client/address.rs | 42 ++++++++++++++++ autonomi/src/client/data.rs | 27 +++++++++- autonomi/src/client/files.rs | 55 +++++++++++++++++++-- autonomi/src/client/mod.rs | 2 + autonomi/src/lib.rs | 3 ++ autonomi_cli/src/actions/connect.rs | 35 +++++++++++++ autonomi_cli/src/actions/mod.rs | 11 +++++ autonomi_cli/src/commands.rs | 18 +++---- autonomi_cli/src/commands/file.rs | 71 ++++++++++++++++++++++++--- autonomi_cli/src/commands/register.rs | 8 +-- autonomi_cli/src/main.rs | 4 +- autonomi_cli/src/utils.rs | 12 +++-- 14 files changed, 261 insertions(+), 29 deletions(-) create mode 100644 autonomi/src/client/address.rs create mode 100644 autonomi_cli/src/actions/connect.rs create mode 100644 autonomi_cli/src/actions/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 0af11a7cf3..f2c610889d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1130,6 +1130,7 @@ dependencies = [ "const-hex", "evmlib", "eyre", + "hex 0.4.3", "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index aeeebe5aaf..876936e617 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -27,6 +27,7 @@ curv = { version = "0.10.1", package = "sn_curv", default-features = false, feat eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" evmlib = { path = "../evmlib", version = "0.1" } +hex = "~0.4.3" libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs new file mode 100644 index 0000000000..e390c62d0a --- /dev/null +++ b/autonomi/src/client/address.rs @@ -0,0 +1,42 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use xor_name::XorName; + +#[derive(Debug, thiserror::Error)] +pub enum DataError { + #[error("Invalid XorName")] + InvalidXorName, + #[error("Input address is not a hex string")] + InvalidHexString, +} + +pub fn str_to_xorname(addr: &str) -> Result { + let bytes = hex::decode(addr).map_err(|_| DataError::InvalidHexString)?; + let xor = XorName(bytes.try_into().map_err(|_| DataError::InvalidXorName)?); + Ok(xor) +} + +pub fn xorname_to_str(addr: XorName) -> String { + hex::encode(addr) +} + +#[cfg(test)] +mod test { + use super::*; + use xor_name::XorName; + + #[test] + fn test_xorname_to_str() { + let rng = &mut rand::thread_rng(); + let xorname = XorName::random(rng); + let str = xorname_to_str(xorname); + let xorname2 = str_to_xorname(&str).expect("Failed to convert back to xorname"); + assert_eq!(xorname, xorname2); + } +} \ No newline at end of file diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index c1cd0ecdd7..8681e6a47f 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -13,7 +13,7 @@ use evmlib::common::{QuoteHash, QuotePayment, TxHash}; use evmlib::wallet::Wallet; use libp2p::futures; use rand::{thread_rng, Rng}; -use sn_evm::ProofOfPayment; +use sn_evm::{Amount, AttoTokens, ProofOfPayment}; use sn_networking::PutRecordCfg; use sn_networking::{GetRecordCfg, Network, NetworkError, PayeeQuote, VerificationKind}; use sn_protocol::{ @@ -54,7 +54,9 @@ pub enum PayError { #[error("Could not simultaneously fetch store costs: {0:?}")] JoinError(JoinError), #[error("Wallet error: {0:?}")] - WalletError(#[from] wallet::Error), + EvmWalletError(#[from] wallet::Error), + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), } /// Errors that can occur during the get operation. @@ -184,6 +186,27 @@ impl Client { Ok(map_xor_name) } + pub(crate) async fn cost( + &mut self, + data: Bytes, + ) -> Result { + let now = std::time::Instant::now(); + let (data_map_chunk, chunks) = encrypt(data)?; + + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + + let map_xor_name = *data_map_chunk.address().xorname(); + let mut content_addrs = vec![map_xor_name]; + + for chunk in &chunks { + content_addrs.push(*chunk.name()); + } + + let cost_map = self.get_store_quotes(content_addrs.into_iter()).await?; + let total_cost = AttoTokens::from_atto(cost_map.iter().map(|(_, quote)| quote.2.cost.as_atto()).sum::()); + Ok(total_cost) + } + pub(crate) async fn pay( &mut self, content_addrs: impl Iterator, diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 524fc6fb7e..84735408e4 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -1,8 +1,9 @@ use crate::client::data::{GetError, PutError}; use crate::client::Client; +use crate::self_encryption::encrypt; use bytes::Bytes; -use evmlib::wallet::Wallet; use serde::{Deserialize, Serialize}; +use sn_evm::{Amount, AttoTokens}; use std::collections::HashMap; use std::path::PathBuf; use walkdir::WalkDir; @@ -61,12 +62,59 @@ impl Client { Ok(data) } + /// Get the cost to upload a file/dir to the network. + /// quick and dirty implementation, please refactor once files are cleanly implemented + pub async fn file_cost( + &mut self, + path: &PathBuf, + ) -> Result { + let mut map = HashMap::new(); + let mut total_cost = Amount::ZERO; + + for entry in WalkDir::new(path) { + let entry = entry?; + + if !entry.file_type().is_file() { + continue; + } + + let path = entry.path().to_path_buf(); + tracing::info!("Cost for file: {path:?}"); + + let data = tokio::fs::read(&path).await?; + let file_bytes = Bytes::from(data); + let file_cost = self.cost(file_bytes.clone()).await.expect("TODO"); + + total_cost += file_cost.as_atto(); + + // re-do encryption to get the correct map xorname here + // this code needs refactor + let now = std::time::Instant::now(); + let (data_map_chunk, _) = encrypt(file_bytes).expect("TODO"); + tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + let map_xor_name = *data_map_chunk.address().xorname(); + let data_map_xorname = FilePointer { + data_map: map_xor_name, + created_at: 0, + modified_at: 0, + }; + + map.insert(path, data_map_xorname); + } + + let root = Root { map }; + let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + + let cost = self.cost(Bytes::from(root_serialized)).await.expect("TODO"); + Ok(cost) + } + /// Upload a directory to the network. The directory is recursively walked. #[cfg(feature = "fs")] pub async fn upload_from_dir( &mut self, path: PathBuf, - wallet: &Wallet, + wallet: &sn_evm::EvmWallet, ) -> Result<(Root, XorName), UploadError> { let mut map = HashMap::new(); @@ -79,6 +127,7 @@ impl Client { let path = entry.path().to_path_buf(); tracing::info!("Uploading file: {path:?}"); + println!("Uploading file: {path:?}"); let file = upload_from_file(self, path.clone(), wallet).await?; map.insert(path, file); @@ -96,7 +145,7 @@ impl Client { async fn upload_from_file( client: &mut Client, path: PathBuf, - wallet: &Wallet, + wallet: &sn_evm::EvmWallet, ) -> Result { let data = tokio::fs::read(path).await?; let data = Bytes::from(data); diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 2900ae12b7..bef8e7db34 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -1,3 +1,5 @@ +pub mod address; + #[cfg(feature = "data")] pub mod data; #[cfg(feature = "files")] diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 0e28f17dcb..ca78b2d20a 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -25,6 +25,9 @@ pub mod client; #[cfg(feature = "data")] mod self_encryption; +pub use sn_evm::EvmWallet as Wallet; +pub use sn_evm::EvmNetwork as Network; + #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. diff --git a/autonomi_cli/src/actions/connect.rs b/autonomi_cli/src/actions/connect.rs new file mode 100644 index 0000000000..ee54c01586 --- /dev/null +++ b/autonomi_cli/src/actions/connect.rs @@ -0,0 +1,35 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Client; +use autonomi::Multiaddr; +use color_eyre::eyre::bail; +use indicatif::ProgressBar; +use color_eyre::eyre::Result; +use std::time::Duration; + +pub async fn connect_to_network(peers: Vec) -> Result { + let progress_bar = ProgressBar::new_spinner(); + progress_bar.enable_steady_tick(Duration::from_millis(120)); + progress_bar.set_message("Connecting to The Autonomi Network..."); + let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); + progress_bar.set_style(new_style); + + progress_bar.set_message("Connecting to The Autonomi Network..."); + + match Client::connect(&peers).await { + Ok(client) => { + progress_bar.finish_with_message("Connected to the Network"); + Ok(client) + } + Err(e) => { + progress_bar.finish_with_message("Failed to connect to the network"); + bail!("Failed to connect to the network: {e}") + } + } +} diff --git a/autonomi_cli/src/actions/mod.rs b/autonomi_cli/src/actions/mod.rs new file mode 100644 index 0000000000..eba05b284f --- /dev/null +++ b/autonomi_cli/src/actions/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +mod connect; + +pub use connect::connect_to_network; diff --git a/autonomi_cli/src/commands.rs b/autonomi_cli/src/commands.rs index f9f6e03123..37fbebb36b 100644 --- a/autonomi_cli/src/commands.rs +++ b/autonomi_cli/src/commands.rs @@ -108,22 +108,22 @@ pub enum VaultCmd { Sync, } -pub fn handle_subcommand(opt: Opt) -> Result<()> { - let peers = crate::utils::get_peers(opt.peers)?; +pub async fn handle_subcommand(opt: Opt) -> Result<()> { + let peers = crate::utils::get_peers(opt.peers).await?; let cmd = opt.command; match cmd { SubCmd::File { command } => match command { - FileCmd::Cost { file } => file::cost(&file, peers), - FileCmd::Upload { file } => file::upload(&file, peers), - FileCmd::Download { addr, dest_file } => file::download(&addr, &dest_file, peers), + FileCmd::Cost { file } => file::cost(&file, peers).await, + FileCmd::Upload { file } => file::upload(&file, peers).await, + FileCmd::Download { addr, dest_file } => file::download(&addr, &dest_file, peers).await, FileCmd::List => file::list(peers), }, SubCmd::Register { command } => match command { - RegisterCmd::Cost { name } => register::cost(&name, peers), - RegisterCmd::Create { name, value } => register::create(&name, &value, peers), - RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers), - RegisterCmd::Get { name } => register::get(&name, peers), + RegisterCmd::Cost { name } => register::cost(&name, peers).await, + RegisterCmd::Create { name, value } => register::create(&name, &value, peers).await, + RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers).await, + RegisterCmd::Get { name } => register::get(&name, peers).await, RegisterCmd::List => register::list(peers), }, SubCmd::Vault { command } => match command { diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index f0c3002dc6..60c397beb9 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -6,25 +6,82 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use autonomi::client::address::{str_to_xorname, xorname_to_str}; +use autonomi::Wallet; use autonomi::Multiaddr; -use color_eyre::eyre::Context; +use color_eyre::eyre::{eyre, Context}; use color_eyre::eyre::Result; +use std::path::PathBuf; + +pub async fn cost(file: &str, peers: Vec) -> Result<()> { + let mut client = crate::actions::connect_to_network(peers).await?; + + println!("Getting upload cost..."); + let cost = client.file_cost(&PathBuf::from(file)).await + .wrap_err("Failed to calculate cost for file")?; -pub fn cost(file: &str, peers: Vec) -> Result<()> { println!("Estimate cost to upload file: {file}"); + println!("Total cost: {cost}"); Ok(()) } -pub fn upload(file: &str, peers: Vec) -> Result<()> { +pub async fn upload(file: &str, peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; - println!("Uploading file: {file} with secret key: {secret_key}"); + let network = crate::utils::get_evm_network() + .wrap_err("Failed to get evm network")?; + let wallet = Wallet::new_from_private_key(network, &secret_key) + .wrap_err("Failed to load wallet")?; + + let mut client = crate::actions::connect_to_network(peers).await?; + + println!("Uploading data to network..."); + let (_, xor_name) = client.upload_from_dir(PathBuf::from(file), &wallet).await + .wrap_err("Failed to upload file")?; + let addr = xorname_to_str(xor_name); + + println!("Successfully uploaded: {file}"); + println!("At address: {addr}"); Ok(()) } -pub fn download(addr: &str, dest_file: &str, peers: Vec) -> Result<()> { - println!("Downloading file from {addr} to {dest_file}"); - Ok(()) +pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { + let mut client = crate::actions::connect_to_network(peers).await?; + + println!("Downloading data from {addr} to {dest_path}"); + let address = str_to_xorname(addr) + .wrap_err("Failed to parse data address")?; + let root = client.fetch_root(address).await + .wrap_err("Failed to fetch root")?; + + let mut all_errs = vec![]; + for (path, file) in root.map { + println!("Fetching file: {path:?}"); + let bytes = match client.fetch_file(&file).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {path:?}: {e}"); + all_errs.push(err); + continue; + } + }; + + let path = PathBuf::from(dest_path).join(path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + } + + if all_errs.is_empty() { + println!("Successfully downloaded data at: {addr}"); + Ok(()) + } else { + let err_no = all_errs.len(); + eprintln!("{err_no} errors while downloading data at: {addr}"); + eprintln!("{all_errs:#?}"); + Err(eyre!("Errors while downloading data")) + } } pub fn list(peers: Vec) -> Result<()> { diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index be36490a2c..657d0d9bb3 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -10,14 +10,14 @@ use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; -pub fn cost(name: &str, peers: Vec) -> Result<()> { +pub async fn cost(name: &str, peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Estimate cost to register name: {name} with register key: {register_key}"); Ok(()) } -pub fn create(name: &str, value: &str, peers: Vec) -> Result<()> { +pub async fn create(name: &str, value: &str, peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; let register_key = crate::utils::get_register_signing_key() @@ -28,14 +28,14 @@ pub fn create(name: &str, value: &str, peers: Vec) -> Result<()> { Ok(()) } -pub fn edit(name: &str, value: &str, peers: Vec) -> Result<()> { +pub async fn edit(name: &str, value: &str, peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Editing register: {name} with value: {value} using register key: {register_key}"); Ok(()) } -pub fn get(name: &str, peers: Vec) -> Result<()> { +pub async fn get(name: &str, peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Getting value of register: {name} with register key: {register_key}"); diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index 1ceef45b66..f07aaf4cc4 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -13,6 +13,7 @@ mod commands; mod log_metrics; mod opt; mod utils; +mod actions; use clap::Parser; use color_eyre::Result; @@ -30,5 +31,6 @@ fn main() -> Result<()> { info!("autonomi client built with git version: {version}"); println!("autonomi client built with git version: {version}"); - commands::handle_subcommand(opt) + let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime"); + rt.block_on(commands::handle_subcommand(opt)) } diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs index 1fc7b219e8..71c8b779a7 100644 --- a/autonomi_cli/src/utils.rs +++ b/autonomi_cli/src/utils.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::Multiaddr; +use autonomi::Network; use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::Result; @@ -76,10 +77,15 @@ pub fn get_client_data_dir_path() -> Result { Ok(home_dirs) } -pub fn get_peers(peers: PeersArgs) -> Result> { - let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime to spawn peers acquisition thread"); - rt.block_on(peers.get_peers()) +pub async fn get_peers(peers: PeersArgs) -> Result> { + peers.get_peers().await .wrap_err(format!("Please provide valid Network peers to connect to")) .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) .with_suggestion(|| format!("a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere")) } + +pub(crate) fn get_evm_network() -> Result { + // NB TODO load custom network from config file/env/cmd line + let network = Network::ArbitrumOne; + Ok(network) +} From 7831d52391256472e41793f3d6ef2192e55582f5 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 1 Oct 2024 19:20:09 +0900 Subject: [PATCH 082/255] feat: progress bars and factoring --- autonomi/tests/evm/file.rs | 82 ++++++++++++++++++++++++ autonomi_cli/src/actions/download.rs | 51 +++++++++++++++ autonomi_cli/src/actions/mod.rs | 5 ++ autonomi_cli/src/actions/progress_bar.rs | 22 +++++++ autonomi_cli/src/commands/file.rs | 36 +---------- 5 files changed, 161 insertions(+), 35 deletions(-) create mode 100644 autonomi/tests/evm/file.rs create mode 100644 autonomi_cli/src/actions/download.rs create mode 100644 autonomi_cli/src/actions/progress_bar.rs diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs new file mode 100644 index 0000000000..746cd1cea3 --- /dev/null +++ b/autonomi/tests/evm/file.rs @@ -0,0 +1,82 @@ +#[cfg(feature = "evm-payments")] +mod test { + + use crate::common; + use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; + use autonomi::Client; + use bytes::Bytes; + use eyre::bail; + use std::time::Duration; + use tokio::time::sleep; + + #[tokio::test] + async fn file() -> Result<(), Box> { + common::enable_logging(); + + let network = evm_network_from_env(); + let mut client = Client::connect(&[]).await.unwrap(); + let mut wallet = evm_wallet_from_env_or_default(network); + + // let data = common::gen_random_data(1024 * 1024 * 1000); + // let user_key = common::gen_random_data(32); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + + sleep(Duration::from_secs(10)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + Ok(()) + } + + #[cfg(feature = "vault")] + #[tokio::test] + async fn file_into_vault() -> eyre::Result<()> { + common::enable_logging(); + + let network = evm_network_from_env(); + + let mut client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + let mut wallet = evm_wallet_from_env_or_default(network); + + let (root, addr) = client + .upload_from_dir("tests/file/test_dir".into(), &mut wallet) + .await?; + sleep(Duration::from_secs(2)).await; + + let root_fetched = client.fetch_root(addr).await?; + + assert_eq!( + root.map, root_fetched.map, + "root fetched should match root put" + ); + + // now assert over the stored account packet + let new_client = Client::connect(&[]) + .await? + .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + + if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { + let ap_root_fetched = Client::deserialise_root(ap)?; + + assert_eq!( + root.map, ap_root_fetched.map, + "root fetched should match root put" + ); + } else { + bail!("No account packet found"); + } + + Ok(()) + } +} diff --git a/autonomi_cli/src/actions/download.rs b/autonomi_cli/src/actions/download.rs new file mode 100644 index 0000000000..f27c3f3e37 --- /dev/null +++ b/autonomi_cli/src/actions/download.rs @@ -0,0 +1,51 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::{client::address::str_to_xorname, Client}; +use color_eyre::eyre::{eyre, Context, Result}; +use std::path::PathBuf; +use super::get_progress_bar; + +pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { + let address = str_to_xorname(addr) + .wrap_err("Failed to parse data address")?; + let root = client.fetch_root(address).await + .wrap_err("Failed to fetch data from address")?; + + let progress_bar = get_progress_bar(root.map.len() as u64)?; + let mut all_errs = vec![]; + for (path, file) in root.map { + progress_bar.println(format!("Fetching file: {path:?}...")); + let bytes = match client.fetch_file(&file).await { + Ok(bytes) => bytes, + Err(e) => { + let err = format!("Failed to fetch file {path:?}: {e}"); + all_errs.push(err); + continue; + } + }; + + let path = PathBuf::from(dest_path).join(path); + let here = PathBuf::from("."); + let parent = path.parent().unwrap_or_else(|| &here); + std::fs::create_dir_all(parent)?; + std::fs::write(path, bytes)?; + progress_bar.clone().inc(1); + } + progress_bar.finish_and_clear(); + + if all_errs.is_empty() { + println!("Successfully downloaded data at: {addr}"); + Ok(()) + } else { + let err_no = all_errs.len(); + eprintln!("{err_no} errors while downloading data at: {addr}"); + eprintln!("{all_errs:#?}"); + Err(eyre!("Errors while downloading data")) + } +} \ No newline at end of file diff --git a/autonomi_cli/src/actions/mod.rs b/autonomi_cli/src/actions/mod.rs index eba05b284f..8b4662c3d9 100644 --- a/autonomi_cli/src/actions/mod.rs +++ b/autonomi_cli/src/actions/mod.rs @@ -7,5 +7,10 @@ // permissions and limitations relating to use of the SAFE Network Software. mod connect; +mod download; +mod progress_bar; pub use connect::connect_to_network; +pub use download::download; + +pub use progress_bar::get_progress_bar; diff --git a/autonomi_cli/src/actions/progress_bar.rs b/autonomi_cli/src/actions/progress_bar.rs new file mode 100644 index 0000000000..4c6bbdf7bf --- /dev/null +++ b/autonomi_cli/src/actions/progress_bar.rs @@ -0,0 +1,22 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use indicatif::{ProgressBar, ProgressStyle}; +use std::time::Duration; +use color_eyre::eyre::Result; + +pub fn get_progress_bar(length: u64) -> Result { + let progress_bar = ProgressBar::new(length); + progress_bar.set_style( + ProgressStyle::default_bar() + .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")? + .progress_chars("#>-"), + ); + progress_bar.enable_steady_tick(Duration::from_millis(100)); + Ok(progress_bar) +} diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 60c397beb9..17245b9eba 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -47,41 +47,7 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; - - println!("Downloading data from {addr} to {dest_path}"); - let address = str_to_xorname(addr) - .wrap_err("Failed to parse data address")?; - let root = client.fetch_root(address).await - .wrap_err("Failed to fetch root")?; - - let mut all_errs = vec![]; - for (path, file) in root.map { - println!("Fetching file: {path:?}"); - let bytes = match client.fetch_file(&file).await { - Ok(bytes) => bytes, - Err(e) => { - let err = format!("Failed to fetch file {path:?}: {e}"); - all_errs.push(err); - continue; - } - }; - - let path = PathBuf::from(dest_path).join(path); - let here = PathBuf::from("."); - let parent = path.parent().unwrap_or_else(|| &here); - std::fs::create_dir_all(parent)?; - std::fs::write(path, bytes)?; - } - - if all_errs.is_empty() { - println!("Successfully downloaded data at: {addr}"); - Ok(()) - } else { - let err_no = all_errs.len(); - eprintln!("{err_no} errors while downloading data at: {addr}"); - eprintln!("{all_errs:#?}"); - Err(eyre!("Errors while downloading data")) - } + crate::actions::download(addr, dest_path, &mut client).await } pub fn list(peers: Vec) -> Result<()> { From e3d72fd4183199f1bf327b93fc3fd897d1c415e8 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 1 Oct 2024 19:26:49 +0900 Subject: [PATCH 083/255] fix: feat flags --- autonomi_cli/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index 247ec69124..a1c6bd5705 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -10,7 +10,7 @@ metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] [dependencies] -autonomi = { path = "../autonomi", version = "0.1.0" } +autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files"] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" From 05da821a1c8e5eacd35bbe95c6e7b733177015b1 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 1 Oct 2024 19:48:48 +0900 Subject: [PATCH 084/255] feat: working upload and download --- autonomi_cli/src/commands/file.rs | 6 +++--- autonomi_cli/src/commands/register.rs | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 17245b9eba..acfbfc94f8 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -6,10 +6,10 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::client::address::{str_to_xorname, xorname_to_str}; +use autonomi::client::address::xorname_to_str; use autonomi::Wallet; use autonomi::Multiaddr; -use color_eyre::eyre::{eyre, Context}; +use color_eyre::eyre::Context; use color_eyre::eyre::Result; use std::path::PathBuf; @@ -50,7 +50,7 @@ pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Res crate::actions::download(addr, dest_path, &mut client).await } -pub fn list(peers: Vec) -> Result<()> { +pub fn list(_peers: Vec) -> Result<()> { println!("Listing previous uploads..."); Ok(()) } diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 657d0d9bb3..fd57d678e0 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -10,14 +10,14 @@ use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; -pub async fn cost(name: &str, peers: Vec) -> Result<()> { +pub async fn cost(name: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Estimate cost to register name: {name} with register key: {register_key}"); Ok(()) } -pub async fn create(name: &str, value: &str, peers: Vec) -> Result<()> { +pub async fn create(name: &str, value: &str, _peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; let register_key = crate::utils::get_register_signing_key() @@ -28,21 +28,21 @@ pub async fn create(name: &str, value: &str, peers: Vec) -> Result<() Ok(()) } -pub async fn edit(name: &str, value: &str, peers: Vec) -> Result<()> { +pub async fn edit(name: &str, value: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Editing register: {name} with value: {value} using register key: {register_key}"); Ok(()) } -pub async fn get(name: &str, peers: Vec) -> Result<()> { +pub async fn get(name: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; println!("Getting value of register: {name} with register key: {register_key}"); Ok(()) } -pub fn list(peers: Vec) -> Result<()> { +pub fn list(_peers: Vec) -> Result<()> { println!("Listing previous registers..."); Ok(()) } From d48f65e388dde809d2f18e0bdb647ad603d01acd Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 12:51:44 +0200 Subject: [PATCH 085/255] refactor(autonomi): pass vault key as parameter --- autonomi/src/client/files.rs | 29 ++++++++++++------ autonomi/src/client/mod.rs | 10 +----- autonomi/src/client/vault.rs | 59 +++++++++++------------------------- autonomi/tests/file.rs | 28 ++++++----------- 4 files changed, 49 insertions(+), 77 deletions(-) diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 524fc6fb7e..e53ab37d32 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -14,6 +14,23 @@ pub struct Root { pub map: HashMap, } +impl Root { + /// Deserialize from bytes. + pub fn from_bytes(data: Bytes) -> Result { + let root: Root = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } +} + /// Structure that describes a file on the network. The actual data is stored in /// chunks, to be constructed with the address pointing to the data map. /// @@ -46,13 +63,7 @@ impl Client { pub async fn fetch_root(&mut self, address: XorName) -> Result { let data = self.get(address).await?; - Self::deserialize_root(data) - } - - pub fn deserialize_root(data: Bytes) -> Result { - let root: Root = rmp_serde::from_slice(&data[..]).expect("TODO"); - - Ok(root) + Ok(Root::from_bytes(data)?) } /// Fetch the file pointed to by the given pointer. @@ -85,9 +96,9 @@ impl Client { } let root = Root { map }; - let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); + let root_serialized = root.into_bytes()?; - let xor_name = self.put(Bytes::from(root_serialized), wallet).await?; + let xor_name = self.put(root_serialized, wallet).await?; Ok((root, xor_name)) } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 2900ae12b7..e5eb967f03 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -9,8 +9,6 @@ pub mod vault; use std::{collections::HashSet, time::Duration}; -#[cfg(feature = "vault")] -use bls::SecretKey; use libp2p::{identity::Keypair, Multiaddr}; use sn_networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; @@ -37,8 +35,6 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; #[derive(Clone)] pub struct Client { pub(crate) network: Network, - #[cfg(feature = "vault")] - pub(crate) vault_secret_key: Option, } /// Error returned by [`Client::connect`]. @@ -88,11 +84,7 @@ impl Client { receiver.await.expect("sender should not close")?; - Ok(Self { - network, - #[cfg(feature = "vault")] - vault_secret_key: None, - }) + Ok(Self { network }) } } diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 527ca2b9b7..afe38c0825 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -17,8 +17,6 @@ use tracing::info; pub enum VaultError { #[error("Could not generate Vault secret key from entropy: {0:?}")] Bls(#[from] bls::Error), - #[error("No Vault has been defined. Use `client.with_vault_entropy` to define one.")] - NoVaultPacketDefined, #[error("Scratchpad found at {0:?} was not a valid record.")] CouldNotDeserializeVaultScratchPad(ScratchpadAddress), #[error("Protocol: {0}")] @@ -28,37 +26,22 @@ pub enum VaultError { } impl Client { - /// Add a vault secret key to the client - /// - /// The secret key is derived from the supplied entropy bytes. - pub fn with_vault_entropy(mut self, bytes: Bytes) -> Result { - // simple hash as XORNAME_LEN == SK_LENs - let xorname = xor_name::XorName::from_content(&bytes); - // before generating the sk from these bytes. - self.vault_secret_key = Some(SecretKey::from_bytes(xorname.0)?); - - Ok(self) - } - /// Retrieves and returns a decrypted vault if one exists. - pub async fn fetch_and_decrypt_vault(&self) -> Result, VaultError> { - let Some(vault_secret_key) = self.vault_secret_key.as_ref() else { - return Err(VaultError::NoVaultPacketDefined); - }; - - let pad = self.get_vault_from_network().await?; + pub async fn fetch_and_decrypt_vault( + &self, + secret_key: &SecretKey, + ) -> Result, VaultError> { + let pad = self.get_vault_from_network(secret_key).await?; - Ok(pad.decrypt_data(vault_secret_key)?) + Ok(pad.decrypt_data(secret_key)?) } /// Gets the vault Scratchpad from a provided client public key - async fn get_vault_from_network(&self) -> Result { - // let vault = self.vault.as_ref()?; - let Some(vault_secret_key) = self.vault_secret_key.as_ref() else { - return Err(VaultError::NoVaultPacketDefined); - }; - - let client_pk = vault_secret_key.public_key(); + async fn get_vault_from_network( + &self, + secret_key: &SecretKey, + ) -> Result { + let client_pk = secret_key.public_key(); let scratch_address = ScratchpadAddress::new(client_pk); let network_address = NetworkAddress::from_scratchpad_address(scratch_address); @@ -85,23 +68,17 @@ impl Client { /// Put data into the client's VaultPacket /// - /// Returns Ok(None) early if no vault packet is defined. - /// /// Pays for a new VaultPacket if none yet created for the client. Returns the current version /// of the data on success. - pub async fn write_bytes_to_vault_if_defined( + pub async fn write_bytes_to_vault( &mut self, data: Bytes, wallet: &mut Wallet, - ) -> Result, PutError> { - // Exit early if no vault packet defined - let Some(client_sk) = self.vault_secret_key.as_ref() else { - return Ok(None); - }; - - let client_pk = client_sk.public_key(); + secret_key: &SecretKey, + ) -> Result { + let client_pk = secret_key.public_key(); - let pad_res = self.get_vault_from_network().await; + let pad_res = self.get_vault_from_network(secret_key).await; let mut is_new = true; let mut scratch = if let Ok(existing_data) = pad_res { @@ -119,7 +96,7 @@ impl Client { Scratchpad::new(client_pk) }; - let next_count = scratch.update_and_sign(data, client_sk); + let next_count = scratch.update_and_sign(data, secret_key); let scratch_address = scratch.network_address(); let scratch_key = scratch_address.to_record_key(); @@ -172,6 +149,6 @@ impl Client { self.network.put_record(record, &put_cfg).await?; - Ok(Some(next_count)) + Ok(next_count) } } diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 952f859631..0ecb19d292 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -2,10 +2,6 @@ mod common; use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; -#[cfg(feature = "vault")] -use bytes::Bytes; -#[cfg(feature = "vault")] -use eyre::bail; use std::time::Duration; use tokio::time::sleep; @@ -18,9 +14,6 @@ async fn file() -> Result<(), Box> { let mut client = Client::connect(&[]).await.unwrap(); let wallet = evm_wallet_from_env_or_default(network); - // let data = common::gen_random_data(1024 * 1024 * 1000); - // let user_key = common::gen_random_data(32); - let (root, addr) = client .upload_from_dir("tests/file/test_dir".into(), &wallet) .await?; @@ -44,11 +37,9 @@ async fn file_into_vault() -> eyre::Result<()> { let network = evm_network_from_env(); - let mut client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - - let wallet = evm_wallet_from_env_or_default(network); + let mut client = Client::connect(&[]).await?; + let mut wallet = evm_wallet_from_env_or_default(network); + let client_sk = bls::SecretKey::random(); let (root, addr) = client .upload_from_dir("tests/file/test_dir".into(), &wallet) @@ -56,6 +47,9 @@ async fn file_into_vault() -> eyre::Result<()> { sleep(Duration::from_secs(2)).await; let root_fetched = client.fetch_root(addr).await?; + client + .write_bytes_to_vault(root.into_bytes()?, &mut wallet, &client_sk) + .await?; assert_eq!( root.map, root_fetched.map, @@ -63,19 +57,17 @@ async fn file_into_vault() -> eyre::Result<()> { ); // now assert over the stored account packet - let new_client = Client::connect(&[]) - .await? - .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; + let new_client = Client::connect(&[]).await?; - if let Some(ap) = new_client.fetch_and_decrypt_vault().await? { - let ap_root_fetched = Client::deserialize_root(ap)?; + if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { + let ap_root_fetched = autonomi::client::files::Root::from_bytes(ap)?; assert_eq!( root.map, ap_root_fetched.map, "root fetched should match root put" ); } else { - bail!("No account packet found"); + eyre::bail!("No account packet found"); } Ok(()) From d95138014d377613b09c3b30b2ca0dceb0772239 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 1 Oct 2024 01:10:13 +0530 Subject: [PATCH 086/255] chore(ci): run network tests that are restricted by feature flags --- .github/workflows/merge.yml | 2 +- .github/workflows/nightly.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index b95a0a3488..df109d4d29 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -127,7 +127,7 @@ jobs: - name: Run network tests timeout-minutes: 25 - run: cargo test --release --package sn_networking + run: cargo test --release --package sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 7165866f79..de69269638 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -154,7 +154,7 @@ jobs: - name: Run network tests timeout-minutes: 25 - run: cargo test --release -p sn_networking + run: cargo test --release -p sn_networking --features="open-metrics" - name: Run protocol tests timeout-minutes: 25 From 309b4bee801d9be17ea4ed10528d27668f699d8e Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 14:34:16 +0200 Subject: [PATCH 087/255] ci(global): disable tests relying on `safe` --- .github/workflows/merge.yml | 1624 +++++++++++++++++------------------ 1 file changed, 812 insertions(+), 812 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index b95a0a3488..545010b2ba 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -92,362 +92,90 @@ jobs: echo "All packages built successfully. Cleaning up..." cargo clean - unit: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Unit Tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Check we're on the right commit - run: git log -1 --oneline - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Run CLI tests - timeout-minutes: 25 - run: cargo test --release --package sn_cli -- --skip test_acc_packet_ - - # We do not run client `--tests` here as they can require a network - - name: Run client tests - timeout-minutes: 25 - run: | - cargo test --release --package sn_client --lib - cargo test --release --package sn_client --doc - - - name: Run node tests - timeout-minutes: 25 - run: cargo test --release --package sn_node --lib - - - name: Run network tests - timeout-minutes: 25 - run: cargo test --release --package sn_networking - - - name: Run protocol tests - timeout-minutes: 25 - run: cargo test --release --package sn_protocol - - - name: Run transfers tests - timeout-minutes: 25 - run: cargo test --release --package sn_transfers - - - name: Run logging tests - timeout-minutes: 25 - run: cargo test --release --package sn_logging - - - name: Run register tests - timeout-minutes: 25 - run: cargo test --release --package sn_registers - env: - # this will speed up PR merge flows, while giving us a modicum - # of proptesting - # we do many more runs on the nightly run - PROPTEST_CASES: 50 - - e2e: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: E2E tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - safe_path: /home/runner/.local/share/safe - - os: windows-latest - safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - - os: macos-latest - safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 - - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - # only these unit tests require a network, the rest are run above - - name: Run sn_client --tests - run: cargo test --package sn_client --release --tests - env: - SN_LOG: "all" - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 15 - - - name: Create and fund a wallet to pay for files storage - run: | - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 - - - name: Start a client to upload cost estimate - run: ./target/release/safe --log-output-dest=data-dir files estimate "./resources" - env: - SN_LOG: "all" - timeout-minutes: 15 - - - name: Start a client to upload files - run: ./target/release/safe --log-output-dest=data-dir files upload "./resources" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 15 - - - name: Start a client to download files - run: ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 2 - - # Client FoldersApi tests against local network - - name: Client FoldersApi tests against local network - run: cargo test --release --package sn_client --test folders_api - env: - SN_LOG: "all" - timeout-minutes: 10 - - # CLI Acc-Packet files and folders tests against local network - - name: CLI Acc-Packet files and folders tests - run: cargo test --release -p sn_cli test_acc_packet -- --nocapture - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Start a client to create a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register create -n baobao - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Start a client to get a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register get -n baobao - env: - SN_LOG: "all" - timeout-minutes: 2 - - - name: Start a client to edit a register writable by the owner only - run: ./target/release/safe --log-output-dest=data-dir register edit -n baobao wood - env: - SN_LOG: "all" - timeout-minutes: 10 - # - # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) - # - - name: Start a client to create a register writable by anyone - id: register-address - if: matrix.os != 'windows-latest' - run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $GITHUB_OUTPUT - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Start a client to create a register writable by anyone - id: register-address-windows - if: matrix.os == 'windows-latest' - run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $ENV:GITHUB_OUTPUT - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Start a client to get a register writable by anyone (current client is the owner) - run: ./target/release/safe --log-output-dest=data-dir register get -n trycatch - env: - SN_LOG: "all" - timeout-minutes: 2 - - - name: Start a client to edit a register writable by anyone (current client is the owner) - run: ./target/release/safe --log-output-dest=data-dir register edit -n trycatch wood - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Delete client subdir to generate new client - shell: bash - run: rm -rf ${{ matrix.safe_path }}/client - # - # Next four steps are same with a slight difference in the which output step they read from - # - - name: Start a client to get a register writable by anyone (new client is not the owner) - if: matrix.os != 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address.outputs.REGISTER_ADDRESS }} - env: - SN_LOG: "all" - timeout-minutes: 2 - - - name: Start a client to edit a register writable by anyone (new client is not the owner) - if: matrix.os != 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address.outputs.REGISTER_ADDRESS }} water - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Start a client to get a register writable by anyone (new client is not the owner) - if: matrix.os == 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} - env: - SN_LOG: "all" - timeout-minutes: 2 - - - name: Start a client to edit a register writable by anyone (new client is not the owner) - if: matrix.os == 'windows-latest' - run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} water - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_e2e - platform: ${{ matrix.os }} - - spend_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: spend tests against network - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 + # unit: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Unit Tests + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest, windows-latest, macos-latest] + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable + # - name: Check we're on the right commit + # run: git log -1 --oneline - - uses: Swatinem/rust-cache@v2 + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable - - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode - timeout-minutes: 30 + # - uses: Swatinem/rust-cache@v2 - - name: Build faucet binary - run: cargo build --release --bin faucet --features="local-discovery,gifting" - timeout-minutes: 30 + # - name: Run CLI tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_cli -- --skip test_acc_packet_ - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 + # # We do not run client `--tests` here as they can require a network + # - name: Run client tests + # timeout-minutes: 25 + # run: | + # cargo test --release --package sn_client --lib + # cargo test --release --package sn_client --doc - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true + # - name: Run node tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_node --lib - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Run network tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_networking - - name: execute the sequential transfers tests - run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: Run protocol tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_protocol - - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: Run transfers tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_transfers - - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 + # - name: Run logging tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_logging - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend - platform: ${{ matrix.os }} + # - name: Run register tests + # timeout-minutes: 25 + # run: cargo test --release --package sn_registers + # env: + # # this will speed up PR merge flows, while giving us a modicum + # # of proptesting + # # we do many more runs on the nightly run + # PROPTEST_CASES: 50 - # # runs with increased node count - # spend_simulation: + # e2e: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: spend simulation + # name: E2E tests # runs-on: ${{ matrix.os }} # strategy: # matrix: - # os: [ ubuntu-latest, windows-latest, macos-latest ] + # include: + # - os: ubuntu-latest + # safe_path: /home/runner/.local/share/safe + # - os: windows-latest + # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # - os: macos-latest + # safe_path: /Users/runner/Library/Application Support/safe # steps: # - uses: actions/checkout@v4 # - name: Install Rust # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features=local-discovery --bin safenode + # run: cargo build --release --bin safenode --bin safe # timeout-minutes: 30 # - name: Build faucet binary - # run: cargo build --release --bin faucet --features="local-discovery,gifting" - # timeout-minutes: 30 - - # - name: Build testing executable - # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run - # env: - # # only set the target dir for windows to bypass the linker issue. - # # happens if we build the node manager via testnet action - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # run: cargo build --release --bin faucet --features gifting # timeout-minutes: 30 # - name: Start a local network @@ -455,7 +183,6 @@ jobs: # with: # action: start # interval: 2000 - # node-count: 50 # node-path: target/release/safenode # faucet-path: target/release/faucet # platform: ${{ matrix.os }} @@ -471,21 +198,294 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: execute the spend simulation - # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # # only these unit tests require a network, the rest are run above + # - name: Run sn_client --tests + # run: cargo test --package sn_client --release --tests # env: + # SN_LOG: "all" + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 25 + # timeout-minutes: 15 - # - name: Stop the local network and upload logs - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_spend_simulation - # platform: ${{ matrix.os }} + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - token_distribution_test: + # - name: Start a client to upload cost estimate + # run: ./target/release/safe --log-output-dest=data-dir files estimate "./resources" + # env: + # SN_LOG: "all" + # timeout-minutes: 15 + + # - name: Start a client to upload files + # run: ./target/release/safe --log-output-dest=data-dir files upload "./resources" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 15 + + # - name: Start a client to download files + # run: ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 2 + + # # Client FoldersApi tests against local network + # - name: Client FoldersApi tests against local network + # run: cargo test --release --package sn_client --test folders_api + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # # CLI Acc-Packet files and folders tests against local network + # - name: CLI Acc-Packet files and folders tests + # run: cargo test --release -p sn_cli test_acc_packet -- --nocapture + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Start a client to create a register writable by the owner only + # run: ./target/release/safe --log-output-dest=data-dir register create -n baobao + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Start a client to get a register writable by the owner only + # run: ./target/release/safe --log-output-dest=data-dir register get -n baobao + # env: + # SN_LOG: "all" + # timeout-minutes: 2 + + # - name: Start a client to edit a register writable by the owner only + # run: ./target/release/safe --log-output-dest=data-dir register edit -n baobao wood + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + # # + # # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) + # # + # - name: Start a client to create a register writable by anyone + # id: register-address + # if: matrix.os != 'windows-latest' + # run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $GITHUB_OUTPUT + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Start a client to create a register writable by anyone + # id: register-address-windows + # if: matrix.os == 'windows-latest' + # run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $ENV:GITHUB_OUTPUT + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Start a client to get a register writable by anyone (current client is the owner) + # run: ./target/release/safe --log-output-dest=data-dir register get -n trycatch + # env: + # SN_LOG: "all" + # timeout-minutes: 2 + + # - name: Start a client to edit a register writable by anyone (current client is the owner) + # run: ./target/release/safe --log-output-dest=data-dir register edit -n trycatch wood + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Delete client subdir to generate new client + # shell: bash + # run: rm -rf ${{ matrix.safe_path }}/client + # # + # # Next four steps are same with a slight difference in the which output step they read from + # # + # - name: Start a client to get a register writable by anyone (new client is not the owner) + # if: matrix.os != 'windows-latest' + # run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address.outputs.REGISTER_ADDRESS }} + # env: + # SN_LOG: "all" + # timeout-minutes: 2 + + # - name: Start a client to edit a register writable by anyone (new client is not the owner) + # if: matrix.os != 'windows-latest' + # run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address.outputs.REGISTER_ADDRESS }} water + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Start a client to get a register writable by anyone (new client is not the owner) + # if: matrix.os == 'windows-latest' + # run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} + # env: + # SN_LOG: "all" + # timeout-minutes: 2 + + # - name: Start a client to edit a register writable by anyone (new client is not the owner) + # if: matrix.os == 'windows-latest' + # run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} water + # env: + # SN_LOG: "all" + # timeout-minutes: 10 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_e2e + # platform: ${{ matrix.os }} + + # spend_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: spend tests against network + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest, windows-latest, macos-latest] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features=local-discovery --bin safenode + # timeout-minutes: 30 + + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # timeout-minutes: 30 + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: execute the sequential transfers tests + # run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: execute the storage payment tests + # run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: execute the double spend tests + # run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_spend + # platform: ${{ matrix.os }} + + # # runs with increased node count + # spend_simulation: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: spend simulation + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ ubuntu-latest, windows-latest, macos-latest ] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features=local-discovery --bin safenode + # timeout-minutes: 30 + + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # timeout-minutes: 30 + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-count: 50 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: execute the spend simulation + # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_spend_simulation + # platform: ${{ matrix.os }} + + token_distribution_test: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" name: token distribution test runs-on: ${{ matrix.os }} @@ -800,525 +800,525 @@ jobs: exit 1 fi - faucet_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Faucet test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # faucet_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Faucet test + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Check we're _not_ warned about using default genesis - run: | - if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then - exit 1 - fi - shell: bash + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - - name: Move built binaries and clear out target dir - shell: bash - run: | - mv target/release/faucet ~/faucet - mv target/release/safe ~/safe - rm -rf target + # - name: Check we're _not_ warned about using default genesis + # run: | + # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then + # exit 1 + # fi + # shell: bash - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Move built binaries and clear out target dir + # shell: bash + # run: | + # mv target/release/faucet ~/faucet + # mv target/release/safe ~/safe + # rm -rf target - - name: Create and fund a wallet first time - run: | - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - echo "----------" - cat first.txt - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Move faucet log to the working folder - run: | - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $SAFE_DATA_PATH/test_faucet/logs/faucet.log ./faucet_log.log - env: - SN_LOG: "all" - SAFE_DATA_PATH: /home/runner/.local/share/safe - continue-on-error: true - if: always() - timeout-minutes: 1 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Upload faucet log - uses: actions/upload-artifact@main - with: - name: faucet_test_first_faucet_log - path: faucet_log.log - continue-on-error: true - if: always() + # - name: Move faucet log to the working folder + # run: | + # echo "SAFE_DATA_PATH has: " + # ls -l $SAFE_DATA_PATH + # echo "test_faucet foder has: " + # ls -l $SAFE_DATA_PATH/test_faucet + # echo "logs folder has: " + # ls -l $SAFE_DATA_PATH/test_faucet/logs + # mv $SAFE_DATA_PATH/test_faucet/logs/faucet.log ./faucet_log.log + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # continue-on-error: true + # if: always() + # timeout-minutes: 1 - - name: Create and fund a wallet second time - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - echo "----------" - cat second.txt - if grep "genesis is already spent" second.txt; then - echo "Duplicated faucet rejected" - else - echo "Duplicated faucet not rejected!" - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Upload faucet log + # uses: actions/upload-artifact@main + # with: + # name: faucet_test_first_faucet_log + # path: faucet_log.log + # continue-on-error: true + # if: always() - - name: Create and fund a wallet with different keypair - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - ~/safe --log-output-dest=data-dir wallet create --no-password - if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - echo "Faucet with different genesis key not rejected!" - exit 1 - else - echo "Faucet with different genesis key rejected" - fi - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet second time + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt + # echo "----------" + # cat second.txt + # if grep "genesis is already spent" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Build faucet binary again without the gifting feature - run: cargo build --release --bin faucet - timeout-minutes: 30 + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Start up a faucet in server mode - run: | - ls -l /home/runner/.local/share - ls -l /home/runner/.local/share/safe - rm -rf /home/runner/.local/share/safe/test_faucet - rm -rf /home/runner/.local/share/safe/test_genesis - rm -rf /home/runner/.local/share/safe/client - target/release/faucet server & - sleep 60 - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Build faucet binary again without the gifting feature + # run: cargo build --release --bin faucet + # timeout-minutes: 30 - - name: check there is no upload happens - shell: bash - run: | - if grep -r "NanoTokens(10) }, Output" $NODE_DATA_PATH - then - echo "We find ongoing upload !" - exit 1 - fi - env: - NODE_DATA_PATH: /home/runner/.local/share/safe/node - timeout-minutes: 1 + # - name: Start up a faucet in server mode + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # target/release/faucet server & + # sleep 60 + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - platform: ubuntu-latest - log_file_prefix: safe_test_logs_faucet + # - name: check there is no upload happens + # shell: bash + # run: | + # if grep -r "NanoTokens(10) }, Output" $NODE_DATA_PATH + # then + # echo "We find ongoing upload !" + # exit 1 + # fi + # env: + # NODE_DATA_PATH: /home/runner/.local/share/safe/node + # timeout-minutes: 1 - large_file_upload_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Large file upload - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # platform: ubuntu-latest + # log_file_prefix: safe_test_logs_faucet - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # large_file_upload_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Large file upload + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: Check the available space - run: | - df - echo "Home dir:" - du -sh /home/runner/ - echo "Home subdirs:" - du -sh /home/runner/*/ - echo "PWD:" - du -sh . - echo "PWD subdirs:" - du -sh */ - - - name: Download material, 1.1G - shell: bash - run: | - wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso - ls -l + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: Check the available space + # run: | + # df + # echo "Home dir:" + # du -sh /home/runner/ + # echo "Home subdirs:" + # du -sh /home/runner/*/ + # echo "PWD:" + # du -sh . + # echo "PWD subdirs:" + # du -sh */ + + # - name: Download material, 1.1G + # shell: bash + # run: | + # wget https://releases.ubuntu.com/14.04.6/ubuntu-14.04.6-desktop-i386.iso + # ls -l - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Check we're _not_ warned about using default genesis - run: | - if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then - exit 1 - fi - shell: bash + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here. - # Might be related to additional deps used in the codebase. - - name: Move built binaries and clear out target dir - shell: bash - run: | - mv target/release/faucet ~/faucet - mv target/release/safe ~/safe - rm -rf target + # - name: Check we're _not_ warned about using default genesis + # run: | + # if rg "USING DEFAULT" "${{ matrix.safe_path }}"/*/*/logs; then + # exit 1 + # fi + # shell: bash - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # # The test currently fails because the GH runner runs out of disk space. So we clear out the target dir here. + # # Might be related to additional deps used in the codebase. + # - name: Move built binaries and clear out target dir + # shell: bash + # run: | + # mv target/release/faucet ~/faucet + # mv target/release/safe ~/safe + # rm -rf target - - name: Check the available space post download - run: | - df - echo "Home dir:" - du -sh /home/runner/ - echo "Home subdirs:" - du -sh /home/runner/*/ - echo "PWD:" - du -sh . - echo "PWD subdirs:" - du -sh */ - - - name: Create and fund a wallet to pay for files storage - run: | - ~/safe --log-output-dest=data-dir wallet create --no-password - ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Start a client to upload - run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 30 + # - name: Check the available space post download + # run: | + # df + # echo "Home dir:" + # du -sh /home/runner/ + # echo "Home subdirs:" + # du -sh /home/runner/*/ + # echo "PWD:" + # du -sh . + # echo "PWD subdirs:" + # du -sh */ + + # - name: Create and fund a wallet to pay for files storage + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - platform: ubuntu-latest - log_file_prefix: safe_test_logs_large_file_upload - build: true + # - name: Start a client to upload + # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 30 - - name: check there is no failed replication fetch - shell: bash - run: | - if grep -r "failed to fetch" $NODE_DATA_PATH - then - echo "We find failed replication fetch" - exit 1 - fi - env: - NODE_DATA_PATH: /home/runner/.local/share/safe/node - timeout-minutes: 1 + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # platform: ubuntu-latest + # log_file_prefix: safe_test_logs_large_file_upload + # build: true - - name: Check the home dir leftover space - run: | - df - du -sh /home/runner/ + # - name: check there is no failed replication fetch + # shell: bash + # run: | + # if grep -r "failed to fetch" $NODE_DATA_PATH + # then + # echo "We find failed replication fetch" + # exit 1 + # fi + # env: + # NODE_DATA_PATH: /home/runner/.local/share/safe/node + # timeout-minutes: 1 - - name: Confirm the wallet files (cash_notes, confirmed_spends) - run: | - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l - ls $CLIENT_DATA_PATH/logs -l - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 1 + # - name: Check the home dir leftover space + # run: | + # df + # du -sh /home/runner/ - replication_bench_with_heavy_upload: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Replication bench with heavy upload - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # - name: Confirm the wallet files (cash_notes, confirmed_spends) + # run: | + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # ls $CLIENT_DATA_PATH/wallet/confirmed_spends -l + # ls $CLIENT_DATA_PATH/logs -l + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 1 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # replication_bench_with_heavy_upload: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Replication bench with heavy upload + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: Download materials to create two 300MB test_files to be uploaded by client - shell: bash - run: | - mkdir test_data_1 - cd test_data_1 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode_rpc_client-qiWithListeners-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/faucet-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safe-qilesssubs-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_1.tar.gz test_data_1 - mkdir test_data_2 - cd test_data_2 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode_rpc_client-qilesssubs-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/faucet-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safe-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode-DebugMem-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_2.tar.gz test_data_2 - ls -l - mkdir test_data_3 - cd test_data_3 - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode_rpc_client-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/faucet-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safe-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode-DebugMem-x86_64.tar.gz - wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode_rpc_client-DebugMem-x86_64.tar.gz - ls -l - cd .. - tar -cvzf test_data_3.tar.gz test_data_3 - ls -l - df + # - name: install ripgrep + # shell: bash + # run: sudo apt-get install -y ripgrep - - name: Build binaries - run: cargo build --release --bin safenode --bin safe - timeout-minutes: 30 + # - name: Download materials to create two 300MB test_files to be uploaded by client + # shell: bash + # run: | + # mkdir test_data_1 + # cd test_data_1 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safe-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/Qi930/safenode_rpc_client-qiWithListeners-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/faucet-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safe-qilesssubs-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_1.tar.gz test_data_1 + # mkdir test_data_2 + # cd test_data_2 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/QiSubdivisionBranch/safenode_rpc_client-qilesssubs-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/faucet-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safe-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode-DebugMem-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_2.tar.gz test_data_2 + # ls -l + # mkdir test_data_3 + # cd test_data_3 + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush/safenode_rpc_client-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/faucet-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safe-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode-DebugMem-x86_64.tar.gz + # wget https://sn-node.s3.eu-west-2.amazonaws.com/joshuef/RemoveArtificalReplPush2/safenode_rpc_client-DebugMem-x86_64.tar.gz + # ls -l + # cd .. + # tar -cvzf test_data_3.tar.gz test_data_3 + # ls -l + # df - - name: Build faucet binary - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 + # - name: Build binaries + # run: cargo build --release --bin safenode --bin safe + # timeout-minutes: 30 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features gifting + # timeout-minutes: 30 - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true - - name: Create and fund a wallet to pay for files storage - run: | - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Start a client to upload first file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - echo "Find $cash_note_files cash_note files" - if [ $expected_cash_notes_files -lt $cash_note_files ]; then - echo "Got too many cash_note files leftover: $cash_note_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - if [ $expected_payment_files -lt $payment_files ]; then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Start a client to upload first file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 5 - - name: Wait for certain period - run: sleep 300 - timeout-minutes: 6 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 - - name: Use same client to upload second file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_2.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 10 + # - name: Wait for certain period + # run: sleep 300 + # timeout-minutes: 6 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Use same client to upload second file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_2.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 10 - - name: Wait for certain period - run: sleep 300 - timeout-minutes: 6 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 - # Start a different client to avoid local wallet slow down with more payments handled. - - name: Start a different client - run: | - pwd - mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - ls -l $SAFE_DATA_PATH - ls -l $SAFE_DATA_PATH/client_first - mkdir $SAFE_DATA_PATH/client - ls -l $SAFE_DATA_PATH - mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - ls -l $CLIENT_DATA_PATH - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - SAFE_DATA_PATH: /home/runner/.local/share/safe - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 25 + # - name: Wait for certain period + # run: sleep 300 + # timeout-minutes: 6 - - name: Use second client to upload third file - run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick - env: - SN_LOG: "all" - timeout-minutes: 10 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 25 - - name: Ensure no leftover cash_notes and payment files - run: | - expected_cash_notes_files="1" - expected_payment_files="0" - pwd - ls $CLIENT_DATA_PATH/ -l - ls $CLIENT_DATA_PATH/wallet -l - ls $CLIENT_DATA_PATH/wallet/cash_notes -l - cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - echo "Find $cash_note_files cash_note files" - if [ $expected_cash_notes_files -lt $cash_note_files ]; then - echo "Got too many cash_note files leftover: $cash_note_files" - exit 1 - fi - ls $CLIENT_DATA_PATH/wallet/payments -l - payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - if [ $expected_payment_files -lt $payment_files ]; then - echo "Got too many payment files leftover: $payment_files" - exit 1 - fi - env: - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - timeout-minutes: 10 + # - name: Use second client to upload third file + # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick + # env: + # SN_LOG: "all" + # timeout-minutes: 10 - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_heavy_replicate_bench - platform: ubuntu-latest + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_heavy_replicate_bench + # platform: ubuntu-latest From c69d2af2ad28e8b2cc52617c9012aaf741261a04 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 14:34:49 +0200 Subject: [PATCH 088/255] style(autonomi): run cargo fmt --- autonomi/src/client/address.rs | 2 +- autonomi/src/client/data.rs | 12 +++++++----- autonomi/src/client/files.rs | 5 +---- autonomi/src/lib.rs | 2 +- autonomi_cli/src/actions/connect.rs | 2 +- autonomi_cli/src/actions/download.rs | 11 ++++++----- autonomi_cli/src/actions/progress_bar.rs | 2 +- autonomi_cli/src/commands/file.rs | 17 ++++++++++------- autonomi_cli/src/log_metrics.rs | 3 ++- autonomi_cli/src/main.rs | 2 +- 10 files changed, 31 insertions(+), 27 deletions(-) diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs index e390c62d0a..1bb4d37d45 100644 --- a/autonomi/src/client/address.rs +++ b/autonomi/src/client/address.rs @@ -39,4 +39,4 @@ mod test { let xorname2 = str_to_xorname(&str).expect("Failed to convert back to xorname"); assert_eq!(xorname, xorname2); } -} \ No newline at end of file +} diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 8681e6a47f..231f365401 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -186,10 +186,7 @@ impl Client { Ok(map_xor_name) } - pub(crate) async fn cost( - &mut self, - data: Bytes, - ) -> Result { + pub(crate) async fn cost(&mut self, data: Bytes) -> Result { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -203,7 +200,12 @@ impl Client { } let cost_map = self.get_store_quotes(content_addrs.into_iter()).await?; - let total_cost = AttoTokens::from_atto(cost_map.iter().map(|(_, quote)| quote.2.cost.as_atto()).sum::()); + let total_cost = AttoTokens::from_atto( + cost_map + .iter() + .map(|(_, quote)| quote.2.cost.as_atto()) + .sum::(), + ); Ok(total_cost) } diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index e383a827e2..c8317a2ed7 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -75,10 +75,7 @@ impl Client { /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - pub async fn file_cost( - &mut self, - path: &PathBuf, - ) -> Result { + pub async fn file_cost(&mut self, path: &PathBuf) -> Result { let mut map = HashMap::new(); let mut total_cost = Amount::ZERO; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index ca78b2d20a..0e8ff3f61d 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -25,8 +25,8 @@ pub mod client; #[cfg(feature = "data")] mod self_encryption; -pub use sn_evm::EvmWallet as Wallet; pub use sn_evm::EvmNetwork as Network; +pub use sn_evm::EvmWallet as Wallet; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; diff --git a/autonomi_cli/src/actions/connect.rs b/autonomi_cli/src/actions/connect.rs index ee54c01586..9eccb3bbfb 100644 --- a/autonomi_cli/src/actions/connect.rs +++ b/autonomi_cli/src/actions/connect.rs @@ -9,8 +9,8 @@ use autonomi::Client; use autonomi::Multiaddr; use color_eyre::eyre::bail; -use indicatif::ProgressBar; use color_eyre::eyre::Result; +use indicatif::ProgressBar; use std::time::Duration; pub async fn connect_to_network(peers: Vec) -> Result { diff --git a/autonomi_cli/src/actions/download.rs b/autonomi_cli/src/actions/download.rs index f27c3f3e37..5892a21472 100644 --- a/autonomi_cli/src/actions/download.rs +++ b/autonomi_cli/src/actions/download.rs @@ -6,15 +6,16 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use super::get_progress_bar; use autonomi::{client::address::str_to_xorname, Client}; use color_eyre::eyre::{eyre, Context, Result}; use std::path::PathBuf; -use super::get_progress_bar; pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { - let address = str_to_xorname(addr) - .wrap_err("Failed to parse data address")?; - let root = client.fetch_root(address).await + let address = str_to_xorname(addr).wrap_err("Failed to parse data address")?; + let root = client + .fetch_root(address) + .await .wrap_err("Failed to fetch data from address")?; let progress_bar = get_progress_bar(root.map.len() as u64)?; @@ -48,4 +49,4 @@ pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Resul eprintln!("{all_errs:#?}"); Err(eyre!("Errors while downloading data")) } -} \ No newline at end of file +} diff --git a/autonomi_cli/src/actions/progress_bar.rs b/autonomi_cli/src/actions/progress_bar.rs index 4c6bbdf7bf..2fcfe0ba20 100644 --- a/autonomi_cli/src/actions/progress_bar.rs +++ b/autonomi_cli/src/actions/progress_bar.rs @@ -6,9 +6,9 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use color_eyre::eyre::Result; use indicatif::{ProgressBar, ProgressStyle}; use std::time::Duration; -use color_eyre::eyre::Result; pub fn get_progress_bar(length: u64) -> Result { let progress_bar = ProgressBar::new(length); diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index acfbfc94f8..0e4c597cd3 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::client::address::xorname_to_str; -use autonomi::Wallet; use autonomi::Multiaddr; +use autonomi::Wallet; use color_eyre::eyre::Context; use color_eyre::eyre::Result; use std::path::PathBuf; @@ -17,7 +17,9 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; println!("Getting upload cost..."); - let cost = client.file_cost(&PathBuf::from(file)).await + let cost = client + .file_cost(&PathBuf::from(file)) + .await .wrap_err("Failed to calculate cost for file")?; println!("Estimate cost to upload file: {file}"); @@ -28,15 +30,16 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { pub async fn upload(file: &str, peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; - let network = crate::utils::get_evm_network() - .wrap_err("Failed to get evm network")?; - let wallet = Wallet::new_from_private_key(network, &secret_key) - .wrap_err("Failed to load wallet")?; + let network = crate::utils::get_evm_network().wrap_err("Failed to get evm network")?; + let wallet = + Wallet::new_from_private_key(network, &secret_key).wrap_err("Failed to load wallet")?; let mut client = crate::actions::connect_to_network(peers).await?; println!("Uploading data to network..."); - let (_, xor_name) = client.upload_from_dir(PathBuf::from(file), &wallet).await + let (_, xor_name) = client + .upload_from_dir(PathBuf::from(file), &wallet) + .await .wrap_err("Failed to upload file")?; let addr = xorname_to_str(xor_name); diff --git a/autonomi_cli/src/log_metrics.rs b/autonomi_cli/src/log_metrics.rs index cc109f603f..9694d799a5 100644 --- a/autonomi_cli/src/log_metrics.rs +++ b/autonomi_cli/src/log_metrics.rs @@ -30,7 +30,8 @@ pub fn init_logging_and_metrics(opt: &Opt) -> Result<()> { #[cfg(feature = "metrics")] std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime to spawn metrics thread"); + let rt = tokio::runtime::Runtime::new() + .expect("Failed to create tokio runtime to spawn metrics thread"); rt.spawn(async { init_metrics(std::process::id()).await; }); diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index f07aaf4cc4..6aaa446582 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -9,11 +9,11 @@ #[macro_use] extern crate tracing; +mod actions; mod commands; mod log_metrics; mod opt; mod utils; -mod actions; use clap::Parser; use color_eyre::Result; From b035f122145eeb80a0da1a55337c24dce8c7c9e7 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 14:49:58 +0200 Subject: [PATCH 089/255] ci(global): disable more tests --- .github/workflows/benchmark-prs.yml | 760 +++++++++---------- .github/workflows/cross-platform.yml | 32 +- .github/workflows/memcheck.yml | 1004 +++++++++++++------------- .github/workflows/node_man_tests.yml | 238 +++--- 4 files changed, 1017 insertions(+), 1017 deletions(-) diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml index 3d3aa4bd77..170751ab9c 100644 --- a/.github/workflows/benchmark-prs.yml +++ b/.github/workflows/benchmark-prs.yml @@ -9,386 +9,386 @@ env: NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: - benchmark-cli: - name: Compare sn_cli benchmarks to main - # right now only ubuntu, running on multiple systems would require many pushes...\ - # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing - # once to the branch.. - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt, clippy - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - ######################## - ### Setup ### - ######################## - - run: cargo install cargo-criterion - - - name: install ripgrep - run: sudo apt-get -y install ripgrep - - - name: Download 95mb file to be uploaded with the safe client - shell: bash - run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # As normal user won't care much about initial client startup, - # but be more alerted on communication speed during transmission. - # Meanwhile the criterion testing code includes the client startup as well, - # it will be better to execute bench test with `local-discovery`, - # to make the measurement results reflect speed improvement or regression more accurately. - - name: Build sn bins - run: cargo build --release --bin safe --bin safenode --features local-discovery - timeout-minutes: 30 - - - name: Build faucet bin - run: cargo build --release --bin faucet --features local-discovery --features gifting --no-default-features - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - env: - SN_LOG: "all" - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ubuntu-latest - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - ######################### - ### Upload large file ### - ######################### - - - name: Fund cli wallet - shell: bash - run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 - env: - SN_LOG: "all" - - - name: Start a client instance to compare memory usage - shell: bash - run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick - env: - SN_LOG: "all" - - - name: Cleanup uploaded_files folder to avoid pollute download benchmark - shell: bash - run: rm -rf $CLIENT_DATA_PATH/uploaded_files - - ########################### - ### Client Mem Analysis ### - ########################### - - - name: Check client memory usage - shell: bash - run: | - client_peak_mem_limit_mb="1024" # mb - client_avg_mem_limit_mb="512" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Peak memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - exit 1 - fi - - total_mem=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - ) - num_of_times=$( - rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "num_of_times: $num_of_times" - echo "Total memory is: $total_mem" - average_mem=$(($total_mem/$(($num_of_times)))) - echo "Average memory is: $average_mem" - - if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - exit 1 - fi - # Write the client memory usage to a file - echo '[ - { - "name": "client-peak-memory-usage-during-upload", - "value": '$peak_mem_usage', - "unit": "MB" - }, - { - "name": "client-average-memory-usage-during-upload", - "value": '$average_mem', - "unit": "MB" - } - ]' > client_memory_usage.json - - - name: check client_memory_usage.json - shell: bash - run: cat client_memory_usage.json - - - name: Alert for client memory usage - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "Memory Usage of Client during uploading large file" - tool: "customSmallerIsBetter" - output-file-path: client_memory_usage.json - # Where the previous data file is stored - external-data-json-path: ./cache/client-mem-usage.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - ######################## - ### Benchmark ### - ######################## - - name: Bench `safe` cli - shell: bash - # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, - # passes to tee which displays it in the terminal and writes to output.txt - run: | - cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt - cat output.txt | rg benchmark-complete | jq -s 'map({ - name: (.id | split("/"))[-1], - unit: "MiB/s", - value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) - })' > files-benchmark.json - timeout-minutes: 15 - - - name: Confirming the number of files uploaded and downloaded during the benchmark test - shell: bash - run: | - ls -l $CLIENT_DATA_PATH - ls -l $CLIENT_DATA_PATH/uploaded_files - ls -l $CLIENT_DATA_PATH/safe_files - - - name: Store benchmark result - uses: benchmark-action/github-action-benchmark@v1 - with: - # What benchmark tool the output.txt came from - tool: "customBiggerIsBetter" - output-file-path: files-benchmark.json - # Where the previous data file is stored - external-data-json-path: ./cache/benchmark-data.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - - name: Start a client to carry out download to output the logs - shell: bash - run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - - - name: Start a client to simulate criterion upload - shell: bash - run: | - ls -l target/release - target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick - - ######################### - ### Stop Network ### - ######################### - - - name: Stop the local network - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_benchmark - platform: ubuntu-latest - build: true - - - name: Upload Faucet folder - uses: actions/upload-artifact@main - with: - name: faucet_folder - path: /home/runner/.local/share/safe/test_faucet - continue-on-error: true - if: always() - - ######################### - ### Node Mem Analysis ### - ######################### - - # The large file uploaded will increase node's peak mem usage a lot - - name: Check node memory usage - shell: bash - run: | - node_peak_mem_limit_mb="250" # mb - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - - echo "Memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - exit 1 - fi - # Write the node memory usage to a file - echo '[ - { - "name": "node-memory-usage-through-safe-benchmark", - "value": '$peak_mem_usage', - "unit": "MB" - } - ]' > node_memory_usage.json - - - name: check node_memory_usage.json - shell: bash - run: cat node_memory_usage.json - - - name: Alert for node memory usage - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "customSmallerIsBetter" - output-file-path: node_memory_usage.json - # Where the previous data file is stored - external-data-json-path: ./cache/node-mem-usage.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # Comment on the PR - comment-always: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true - - ########################################### - ### Swarm_driver handling time Analysis ### - ########################################### - - - name: Check swarm_driver handling time - shell: bash - run: | - num_of_times=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long cmd handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total cmd long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average cmd long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms)) - total_num_of_times=$(($num_of_times)) - num_of_times=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long event handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total event long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average event long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - total_num_of_times=$(($num_of_times+$total_num_of_times)) - average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - echo "Total swarm_driver long handling times is: $total_num_of_times" - echo "Total swarm_driver long handling duration is: $total_long_handling ms" - echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - total_num_of_times_limit_hits="30000" # hits - total_long_handling_limit_ms="400000" # ms - average_handling_limit_ms="20" # ms - if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then - echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" - exit 1 - fi - if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then - echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" - exit 1 - fi - if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then - echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" - exit 1 - fi - - # Write the node memory usage to a file - echo '[ - { - "name": "swarm_driver long handling times", - "value": '$total_num_of_times', - "unit": "hits" - }, - { - "name": "swarm_driver long handling total_time", - "value": '$total_long_handling', - "unit": "ms" - }, - { - "name": "swarm_driver average long handling time", - "value": '$average_handling_ms', - "unit": "ms" - } - ]' > swarm_driver_long_handlings.json - - - name: check swarm_driver_long_handlings.json - shell: bash - run: cat swarm_driver_long_handlings.json - - - name: Alert for swarm_driver long handlings - uses: benchmark-action/github-action-benchmark@v1 - with: - tool: "customSmallerIsBetter" - output-file-path: swarm_driver_long_handlings.json - # Where the previous data file is stored - external-data-json-path: ./cache/swarm_driver_long_handlings.json - # Workflow will fail when an alert happens - fail-on-alert: true - # GitHub API token to make a commit comment - github-token: ${{ secrets.GITHUB_TOKEN }} - # Enable alert commit comment - comment-on-alert: true - # Comment on the PR - comment-always: true - # 200% regression will result in alert - alert-threshold: "200%" - # Enable Job Summary for PRs - summary-always: true + # benchmark-cli: + # name: Compare sn_cli benchmarks to main + # # right now only ubuntu, running on multiple systems would require many pushes...\ + # # perhaps this can be done with one consolidation action in the future, pulling down all results and pushing + # # once to the branch.. + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + + # - uses: dtolnay/rust-toolchain@stable + # with: + # components: rustfmt, clippy + + # - uses: Swatinem/rust-cache@v2 + # continue-on-error: true + + # ######################## + # ### Setup ### + # ######################## + # - run: cargo install cargo-criterion + + # - name: install ripgrep + # run: sudo apt-get -y install ripgrep + + # - name: Download 95mb file to be uploaded with the safe client + # shell: bash + # run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + + # # As normal user won't care much about initial client startup, + # # but be more alerted on communication speed during transmission. + # # Meanwhile the criterion testing code includes the client startup as well, + # # it will be better to execute bench test with `local-discovery`, + # # to make the measurement results reflect speed improvement or regression more accurately. + # - name: Build sn bins + # run: cargo build --release --bin safe --bin safenode --features local-discovery + # timeout-minutes: 30 + + # - name: Build faucet bin + # run: cargo build --release --bin faucet --features local-discovery --features gifting --no-default-features + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # env: + # SN_LOG: "all" + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ubuntu-latest + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + + # ######################### + # ### Upload large file ### + # ######################### + + # - name: Fund cli wallet + # shell: bash + # run: target/release/safe --log-output-dest=data-dir wallet get-faucet 127.0.0.1:8000 + # env: + # SN_LOG: "all" + + # - name: Start a client instance to compare memory usage + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick + # env: + # SN_LOG: "all" + + # - name: Cleanup uploaded_files folder to avoid pollute download benchmark + # shell: bash + # run: rm -rf $CLIENT_DATA_PATH/uploaded_files + + # ########################### + # ### Client Mem Analysis ### + # ########################### + + # - name: Check client memory usage + # shell: bash + # run: | + # client_peak_mem_limit_mb="1024" # mb + # client_avg_mem_limit_mb="512" # mb + + # peak_mem_usage=$( + # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/{print $2}' | + # sort -n | + # tail -n 1 + # ) + # echo "Peak memory usage: $peak_mem_usage MB" + # if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then + # echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" + # exit 1 + # fi + + # total_mem=$( + # rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' + # ) + # num_of_times=$( + # rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "num_of_times: $num_of_times" + # echo "Total memory is: $total_mem" + # average_mem=$(($total_mem/$(($num_of_times)))) + # echo "Average memory is: $average_mem" + + # if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then + # echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" + # exit 1 + # fi + # # Write the client memory usage to a file + # echo '[ + # { + # "name": "client-peak-memory-usage-during-upload", + # "value": '$peak_mem_usage', + # "unit": "MB" + # }, + # { + # "name": "client-average-memory-usage-during-upload", + # "value": '$average_mem', + # "unit": "MB" + # } + # ]' > client_memory_usage.json + + # - name: check client_memory_usage.json + # shell: bash + # run: cat client_memory_usage.json + + # - name: Alert for client memory usage + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # name: "Memory Usage of Client during uploading large file" + # tool: "customSmallerIsBetter" + # output-file-path: client_memory_usage.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/client-mem-usage.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # ######################## + # ### Benchmark ### + # ######################## + # - name: Bench `safe` cli + # shell: bash + # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, + # # passes to tee which displays it in the terminal and writes to output.txt + # run: | + # cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt + # cat output.txt | rg benchmark-complete | jq -s 'map({ + # name: (.id | split("/"))[-1], + # unit: "MiB/s", + # value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) + # })' > files-benchmark.json + # timeout-minutes: 15 + + # - name: Confirming the number of files uploaded and downloaded during the benchmark test + # shell: bash + # run: | + # ls -l $CLIENT_DATA_PATH + # ls -l $CLIENT_DATA_PATH/uploaded_files + # ls -l $CLIENT_DATA_PATH/safe_files + + # - name: Store benchmark result + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # # What benchmark tool the output.txt came from + # tool: "customBiggerIsBetter" + # output-file-path: files-benchmark.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/benchmark-data.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # - name: Start a client to carry out download to output the logs + # shell: bash + # run: target/release/safe --log-output-dest=data-dir files download --retry-strategy quick + + # - name: Start a client to simulate criterion upload + # shell: bash + # run: | + # ls -l target/release + # target/release/safe --log-output-dest=data-dir files upload target/release/faucet --retry-strategy quick + + # ######################### + # ### Stop Network ### + # ######################### + + # - name: Stop the local network + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_benchmark + # platform: ubuntu-latest + # build: true + + # - name: Upload Faucet folder + # uses: actions/upload-artifact@main + # with: + # name: faucet_folder + # path: /home/runner/.local/share/safe/test_faucet + # continue-on-error: true + # if: always() + + # ######################### + # ### Node Mem Analysis ### + # ######################### + + # # The large file uploaded will increase node's peak mem usage a lot + # - name: Check node memory usage + # shell: bash + # run: | + # node_peak_mem_limit_mb="250" # mb + # peak_mem_usage=$( + # rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | + # awk -F':' '/"memory_used_mb":/{print $2}' | + # sort -n | + # tail -n 1 + # ) + + # echo "Memory usage: $peak_mem_usage MB" + # if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then + # echo "Node memory usage exceeded threshold: $peak_mem_usage MB" + # exit 1 + # fi + # # Write the node memory usage to a file + # echo '[ + # { + # "name": "node-memory-usage-through-safe-benchmark", + # "value": '$peak_mem_usage', + # "unit": "MB" + # } + # ]' > node_memory_usage.json + + # - name: check node_memory_usage.json + # shell: bash + # run: cat node_memory_usage.json + + # - name: Alert for node memory usage + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # tool: "customSmallerIsBetter" + # output-file-path: node_memory_usage.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/node-mem-usage.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # Comment on the PR + # comment-always: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true + + # ########################################### + # ### Swarm_driver handling time Analysis ### + # ########################################### + + # - name: Check swarm_driver handling time + # shell: bash + # run: | + # num_of_times=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long cmd handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total cmd long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average cmd long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms)) + # total_num_of_times=$(($num_of_times)) + # num_of_times=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | + # rg "(\d+) matches" | + # rg "\d+" -o + # ) + # echo "Number of long event handling times: $num_of_times" + # total_long_handling_ms=$( + # rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | + # awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' + # ) + # echo "Total event long handling time is: $total_long_handling_ms ms" + # average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) + # echo "Average event long handling time is: $average_handling_ms ms" + # total_long_handling=$(($total_long_handling_ms+$total_long_handling)) + # total_num_of_times=$(($num_of_times+$total_num_of_times)) + # average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) + # echo "Total swarm_driver long handling times is: $total_num_of_times" + # echo "Total swarm_driver long handling duration is: $total_long_handling ms" + # echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + # total_num_of_times_limit_hits="30000" # hits + # total_long_handling_limit_ms="400000" # ms + # average_handling_limit_ms="20" # ms + # if (( $(echo "$total_num_of_times > $total_num_of_times_limit_hits" | bc -l) )); then + # echo "Swarm_driver long handling times exceeded threshold: $total_num_of_times hits" + # exit 1 + # fi + # if (( $(echo "$total_long_handling > $total_long_handling_limit_ms" | bc -l) )); then + # echo "Swarm_driver total long handling duration exceeded threshold: $total_long_handling ms" + # exit 1 + # fi + # if (( $(echo "$average_handling_ms > $average_handling_limit_ms" | bc -l) )); then + # echo "Swarm_driver average long handling time exceeded threshold: $average_handling_ms ms" + # exit 1 + # fi + + # # Write the node memory usage to a file + # echo '[ + # { + # "name": "swarm_driver long handling times", + # "value": '$total_num_of_times', + # "unit": "hits" + # }, + # { + # "name": "swarm_driver long handling total_time", + # "value": '$total_long_handling', + # "unit": "ms" + # }, + # { + # "name": "swarm_driver average long handling time", + # "value": '$average_handling_ms', + # "unit": "ms" + # } + # ]' > swarm_driver_long_handlings.json + + # - name: check swarm_driver_long_handlings.json + # shell: bash + # run: cat swarm_driver_long_handlings.json + + # - name: Alert for swarm_driver long handlings + # uses: benchmark-action/github-action-benchmark@v1 + # with: + # tool: "customSmallerIsBetter" + # output-file-path: swarm_driver_long_handlings.json + # # Where the previous data file is stored + # external-data-json-path: ./cache/swarm_driver_long_handlings.json + # # Workflow will fail when an alert happens + # fail-on-alert: true + # # GitHub API token to make a commit comment + # github-token: ${{ secrets.GITHUB_TOKEN }} + # # Enable alert commit comment + # comment-on-alert: true + # # Comment on the PR + # comment-always: true + # # 200% regression will result in alert + # alert-threshold: "200%" + # # Enable Job Summary for PRs + # summary-always: true benchmark-cash: name: Compare sn_transfer benchmarks to main diff --git a/.github/workflows/cross-platform.yml b/.github/workflows/cross-platform.yml index d4d9393008..3672b6d7b7 100644 --- a/.github/workflows/cross-platform.yml +++ b/.github/workflows/cross-platform.yml @@ -14,26 +14,26 @@ env: jobs: - wasm: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Wasm builds - runs-on: ubuntu-latest + # wasm: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Wasm builds + # runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 - - name: Install wasm-pack - run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + # - name: Install wasm-pack + # run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - - name: Build client for wasm - # wasm pack doesnt support workspaces - # --dev to avoid a loong optimisation step - run: cd sn_client && wasm-pack build --dev - timeout-minutes: 30 + # - name: Build client for wasm + # # wasm pack doesnt support workspaces + # # --dev to avoid a loong optimisation step + # run: cd sn_client && wasm-pack build --dev + # timeout-minutes: 30 websocket: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index b9965b64f3..55d3790bb5 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -17,505 +17,505 @@ env: RESTART_TEST_NODE_DATA_PATH: /home/runner/.local/share/safe/restart_node FAUCET_LOG_PATH: /home/runner/.local/share/safe/test_faucet/logs -jobs: - memory-check: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Check we're on the right commit - run: git log -1 --oneline - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - continue-on-error: true - - - name: install ripgrep - shell: bash - run: sudo apt-get install -y ripgrep - - - name: Build binaries - run: cargo build --release --bin safe --bin safenode - timeout-minutes: 30 - - - name: Build faucet binary with gifting - run: cargo build --release --bin faucet --features gifting - timeout-minutes: 30 - - - name: Build tests - run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run - timeout-minutes: 30 - - - name: Start a node instance that does not undergo churn - run: | - mkdir -p $BOOTSTRAP_NODE_DATA_PATH - ./target/release/safenode --first \ - --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & - sleep 10 - env: - SN_LOG: "all" - - - name: Set SAFE_PEERS - run: | - safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ - rg '/ip4.*$' -m1 -o | rg '"' -r '') - echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV - - - name: Check SAFE_PEERS was set - shell: bash - run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" - - - name: Start a node instance to be restarted - run: | - mkdir -p $RESTART_TEST_NODE_DATA_PATH - ./target/release/safenode \ - --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & - sleep 10 - env: - SN_LOG: "all" - - - name: Start a local network - env: - SN_LOG: "all" - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - build: true - faucet-path: target/release/faucet - interval: 2000 - join: true - node-path: target/release/safenode - owner-prefix: node - platform: ubuntu-latest - set-safe-peers: false - - # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet - - name: Check SAFE_PEERS was not changed - shell: bash - run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" - - - name: Create and fund a wallet to pay for files storage - run: | - echo "Obtaining address for use with the faucet..." - ./target/release/safe --log-output-dest=data-dir wallet create --no-password - address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) - echo "Sending tokens to the faucet at $address" - ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt - cat initial_balance_from_faucet.txt - cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex - cat transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 15 - - - name: Move faucet log to the working folder - run: | - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Download 95mb file to be uploaded with the safe client - shell: bash - run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - # The resources file we upload may change, and with it mem consumption. - # Be aware! - - name: Start a client to upload files - # -p makes files public - run: | - ls -l - ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p - env: - SN_LOG: "all" - timeout-minutes: 25 - - # this check needs to be after some transfer activity - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $RESTART_TEST_NODE_DATA_PATH - cat $RESTART_TEST_NODE_DATA_PATH/safenode.log - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $BOOTSTRAP_NODE_DATA_PATH - cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log - - - name: Check we're warned about using default genesis - run: | - git log -1 --oneline - ls -la $NODE_DATA_PATH - rg "USING DEFAULT" "$NODE_DATA_PATH" -u - shell: bash - - # Uploading same file using different client shall not incur any payment neither uploads - # Note rg will throw an error directly in case of failed to find a matching pattern. - - name: Start a different client to upload the same file - run: | - pwd - mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - ls -l $SAFE_DATA_PATH - ls -l $SAFE_DATA_PATH/client_first - mkdir $SAFE_DATA_PATH/client - ls -l $SAFE_DATA_PATH - mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - ls -l $CLIENT_DATA_PATH - cp ./the-test-data.zip ./the-test-data_1.zip - ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password - ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt - cat initial_balance_from_faucet_1.txt - cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex - cat transfer_hex - ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt - cat second_upload.txt - rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats - env: - SN_LOG: "all" - timeout-minutes: 25 - - - name: Stop the restart node - run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) - - - name: Start the restart node again - run: | - ./target/release/safenode \ - --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & - sleep 10 - env: - SN_LOG: "all" - - - name: Assert we've reloaded some chunks - run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --test data_with_churn -- --nocapture - env: - TEST_DURATION_MINS: 5 - TEST_TOTAL_CHURN_CYCLES: 15 - SN_LOG: "all" - timeout-minutes: 30 - - - name: Check current files - run: ls -la - - name: Check safenode file - run: ls /home/runner/work/safe_network/safe_network/target/release - - - name: Check there was no restart issues - run: | - if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then - echo "Restart issues detected" - exit 1 - else - echo "No restart issues detected" - fi - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture - env: - SLEEP_BEFORE_VERIFICATION: 300 - timeout-minutes: 10 - - - name: Verify restart of nodes using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" - peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - node_count=$(ls $NODE_DATA_PATH | wc -l) - echo "Node dir count is $node_count" - # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - # if [ $restart_count -lt $node_count ]; then - # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # exit 1 - # fi - - - name: Verify data replication using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of replication - # TODO: make this use an env var, or relate to testnet size - # As the bootstrap_node using separate folder for logging, - # hence the folder input to rg needs to cover that as well. - run: | - sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Sent $sending_list_count replication lists" - received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Received $received_list_count replication lists" - fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Carried out $fetching_attempt_count fetching attempts" - if: always() - - - name: Start a client to download files - run: | - ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - ls -l $CLIENT_DATA_PATH/safe_files - downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) - if [ $downloaded_files -lt 1 ]; then - echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 10 - - # Download the same files again to ensure files won't get corrupted. - - name: Start a client to download the same files again - run: | - ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick - ls -l $CLIENT_DATA_PATH/safe_files - downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) - if [ $downloaded_files -lt 1 ]; then - echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" - exit 1 - fi - file_size1=$(stat -c "%s" ./the-test-data_1.zip) - file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) - if [ $file_size1 != $file_size2 ]; then - echo "The downloaded file has a different size $file_size2 to the original $file_size1." - exit 1 - fi - env: - SN_LOG: "all" - timeout-minutes: 10 - - - name: Audit from genesis to collect entire spend DAG and dump to a dot file - run: | - ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt - echo "==============================================================================" - cat spend_dag_and_statistics.txt - env: - SN_LOG: "all" - timeout-minutes: 5 - if: always() - - - name: Ensure discord_ids decrypted - run: | - rg 'node_' ./spend_dag_and_statistics.txt -o - timeout-minutes: 1 - if: always() - - - name: Check nodes running - shell: bash - timeout-minutes: 1 - continue-on-error: true - run: pgrep safenode | wc -l - if: always() - - - name: Wait before verifying reward forwarding - run: sleep 300 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_memcheck - platform: ubuntu-latest - build: true - - - name: Check node memory usage - shell: bash - # The resources file and churning chunk_size we upload may change, and with it mem consumption. - # This is set to a value high enough to allow for some variation depending on - # resources and node location in the network, but hopefully low enough to catch - # any wild memory issues - # Any changes to this value should be carefully considered and tested! - # As we have a bootstrap node acting as an access point for churning nodes and client, - # The memory usage here will be significantly higher here than in the benchmark test, - # where we don't have a bootstrap node. - run: | - node_peak_mem_limit_mb="300" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Node memory usage: $peak_mem_usage MB" - - if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then - echo "Node memory usage exceeded threshold: $peak_mem_usage MB" - exit 1 - fi - if: always() - - - name: Check client memory usage - shell: bash - # limits here are lower that benchmark tests as there is less going on. - run: | - client_peak_mem_limit_mb="1024" # mb - client_avg_mem_limit_mb="512" # mb - - peak_mem_usage=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/{print $2}' | - sort -n | - tail -n 1 - ) - echo "Peak memory usage: $peak_mem_usage MB" - if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then - echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" - exit 1 - fi - - total_mem=$( - rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | - awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' - ) - num_of_times=$( - rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "num_of_times: $num_of_times" - echo "Total memory is: $total_mem" - average_mem=$(($total_mem/$(($num_of_times)))) - echo "Average memory is: $average_mem" - - if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then - echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" - exit 1 - fi - - - name: Check node swarm_driver handling statistics - shell: bash - # With the latest improvements, swarm_driver will be in high chance - # has no super long handling (longer than 1s). - # As the `rg` cmd will fail the shell directly if no entry find, - # hence not covering it. - # Be aware that if do need to looking for handlings longer than second, it shall be: - # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats - run: | - num_of_times=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long cmd handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total cmd long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average cmd long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms)) - total_num_of_times=$(($num_of_times)) - num_of_times=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | - rg "(\d+) matches" | - rg "\d+" -o - ) - echo "Number of long event handling times: $num_of_times" - total_long_handling_ms=$( - rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | - awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' - ) - echo "Total event long handling time is: $total_long_handling_ms ms" - average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) - echo "Average event long handling time is: $average_handling_ms ms" - total_long_handling=$(($total_long_handling_ms+$total_long_handling)) - total_num_of_times=$(($num_of_times+$total_num_of_times)) - average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) - echo "Total swarm_driver long handling times is: $total_num_of_times" - echo "Total swarm_driver long handling duration is: $total_long_handling ms" - echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" - - - name: Verify reward forwarding using rg - shell: bash - timeout-minutes: 1 - run: | - min_reward_forwarding_times="100" - reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Carried out $reward_forwarding_count reward forwardings" - if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then - echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" - exit 1 - fi - if: always() - - - name: Upload payment wallet initialization log - uses: actions/upload-artifact@main - with: - name: payment_wallet_initialization_log - path: initial_balance_from_faucet.txt - continue-on-error: true - if: always() - - - name: Move faucet log to the working folder - run: | - echo "current folder is:" - pwd - echo "SAFE_DATA_PATH has: " - ls -l $SAFE_DATA_PATH - echo "test_faucet foder has: " - ls -l $SAFE_DATA_PATH/test_faucet - echo "logs folder has: " - ls -l $SAFE_DATA_PATH/test_faucet/logs - mv $FAUCET_LOG_PATH/*.log ./faucet_log.log - env: - SN_LOG: "all" - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Move bootstrap_node log to the working directory - run: | - ls -l $BOOTSTRAP_NODE_DATA_PATH - mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log - continue-on-error: true - if: always() - timeout-minutes: 1 - - - name: Upload faucet log - uses: actions/upload-artifact@main - with: - name: memory_check_faucet_log - path: faucet_log.log - continue-on-error: true - if: always() - - - name: Upload bootstrap_node log - uses: actions/upload-artifact@main - with: - name: memory_check_bootstrap_node_log - path: bootstrap_node.log - continue-on-error: true - if: always() - - - name: Upload spend DAG and statistics - uses: actions/upload-artifact@main - with: - name: memory_check_spend_dag_and_statistics - path: spend_dag_and_statistics.txt - continue-on-error: true - if: always() +# jobs: +# memory-check: +# runs-on: ubuntu-latest +# steps: +# - name: Checkout code +# uses: actions/checkout@v4 + +# - name: Check we're on the right commit +# run: git log -1 --oneline + +# - name: Install Rust +# uses: dtolnay/rust-toolchain@stable + +# - uses: Swatinem/rust-cache@v2 +# continue-on-error: true + +# - name: install ripgrep +# shell: bash +# run: sudo apt-get install -y ripgrep + +# - name: Build binaries +# run: cargo build --release --bin safe --bin safenode +# timeout-minutes: 30 + +# - name: Build faucet binary with gifting +# run: cargo build --release --bin faucet --features gifting +# timeout-minutes: 30 + +# - name: Build tests +# run: cargo test --release -p sn_node --test data_with_churn --test verify_routing_table --no-run +# timeout-minutes: 30 + +# - name: Start a node instance that does not undergo churn +# run: | +# mkdir -p $BOOTSTRAP_NODE_DATA_PATH +# ./target/release/safenode --first \ +# --root-dir $BOOTSTRAP_NODE_DATA_PATH --log-output-dest $BOOTSTRAP_NODE_DATA_PATH --local --owner=bootstrap & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Set SAFE_PEERS +# run: | +# safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ +# rg '/ip4.*$' -m1 -o | rg '"' -r '') +# echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV + +# - name: Check SAFE_PEERS was set +# shell: bash +# run: echo "The SAFE_PEERS variable has been set to $SAFE_PEERS" + +# - name: Start a node instance to be restarted +# run: | +# mkdir -p $RESTART_TEST_NODE_DATA_PATH +# ./target/release/safenode \ +# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restart & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Start a local network +# env: +# SN_LOG: "all" +# uses: maidsafe/sn-local-testnet-action@main +# with: +# action: start +# build: true +# faucet-path: target/release/faucet +# interval: 2000 +# join: true +# node-path: target/release/safenode +# owner-prefix: node +# platform: ubuntu-latest +# set-safe-peers: false + +# # In this case we did *not* want SAFE_PEERS to be set to another value by starting the testnet +# - name: Check SAFE_PEERS was not changed +# shell: bash +# run: echo "The SAFE_PEERS variable has been set to ${SAFE_PEERS}" + +# - name: Create and fund a wallet to pay for files storage +# run: | +# echo "Obtaining address for use with the faucet..." +# ./target/release/safe --log-output-dest=data-dir wallet create --no-password +# address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) +# echo "Sending tokens to the faucet at $address" +# ./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt +# cat initial_balance_from_faucet.txt +# cat initial_balance_from_faucet.txt | tail -n 1 > transfer_hex +# cat transfer_hex +# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex +# env: +# SN_LOG: "all" +# timeout-minutes: 15 + +# - name: Move faucet log to the working folder +# run: | +# echo "SAFE_DATA_PATH has: " +# ls -l $SAFE_DATA_PATH +# echo "test_faucet foder has: " +# ls -l $SAFE_DATA_PATH/test_faucet +# echo "logs folder has: " +# ls -l $SAFE_DATA_PATH/test_faucet/logs +# mv $FAUCET_LOG_PATH/faucet.log ./faucet_log.log +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Download 95mb file to be uploaded with the safe client +# shell: bash +# run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip + +# # The resources file we upload may change, and with it mem consumption. +# # Be aware! +# - name: Start a client to upload files +# # -p makes files public +# run: | +# ls -l +# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data.zip" --retry-strategy quick -p +# env: +# SN_LOG: "all" +# timeout-minutes: 25 + +# # this check needs to be after some transfer activity +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $RESTART_TEST_NODE_DATA_PATH +# cat $RESTART_TEST_NODE_DATA_PATH/safenode.log +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $BOOTSTRAP_NODE_DATA_PATH +# cat $BOOTSTRAP_NODE_DATA_PATH/safenode.log + +# - name: Check we're warned about using default genesis +# run: | +# git log -1 --oneline +# ls -la $NODE_DATA_PATH +# rg "USING DEFAULT" "$NODE_DATA_PATH" -u +# shell: bash + +# # Uploading same file using different client shall not incur any payment neither uploads +# # Note rg will throw an error directly in case of failed to find a matching pattern. +# - name: Start a different client to upload the same file +# run: | +# pwd +# mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first +# ls -l $SAFE_DATA_PATH +# ls -l $SAFE_DATA_PATH/client_first +# mkdir $SAFE_DATA_PATH/client +# ls -l $SAFE_DATA_PATH +# mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs +# ls -l $CLIENT_DATA_PATH +# cp ./the-test-data.zip ./the-test-data_1.zip +# ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password +# ./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt +# cat initial_balance_from_faucet_1.txt +# cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex +# cat transfer_hex +# ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex +# ./target/release/safe --log-output-dest=data-dir files upload "./the-test-data_1.zip" --retry-strategy quick -p > second_upload.txt +# cat second_upload.txt +# rg "New wallet balance: 5000000.000000000" second_upload.txt -c --stats +# env: +# SN_LOG: "all" +# timeout-minutes: 25 + +# - name: Stop the restart node +# run: kill $( cat $RESTART_TEST_NODE_DATA_PATH/safenode.pid ) + +# - name: Start the restart node again +# run: | +# ./target/release/safenode \ +# --root-dir $RESTART_TEST_NODE_DATA_PATH --log-output-dest $RESTART_TEST_NODE_DATA_PATH --local --owner=restarted & +# sleep 10 +# env: +# SN_LOG: "all" + +# - name: Assert we've reloaded some chunks +# run: rg "Existing record loaded" $RESTART_TEST_NODE_DATA_PATH + +# - name: Chunks data integrity during nodes churn +# run: cargo test --release -p sn_node --test data_with_churn -- --nocapture +# env: +# TEST_DURATION_MINS: 5 +# TEST_TOTAL_CHURN_CYCLES: 15 +# SN_LOG: "all" +# timeout-minutes: 30 + +# - name: Check current files +# run: ls -la +# - name: Check safenode file +# run: ls /home/runner/work/safe_network/safe_network/target/release + +# - name: Check there was no restart issues +# run: | +# if rg 'Failed to execute hard-restart command' $NODE_DATA_PATH; then +# echo "Restart issues detected" +# exit 1 +# else +# echo "No restart issues detected" +# fi + +# - name: Verify the routing tables of the nodes +# run: cargo test --release -p sn_node --test verify_routing_table -- --nocapture +# env: +# SLEEP_BEFORE_VERIFICATION: 300 +# timeout-minutes: 10 + +# - name: Verify restart of nodes using rg +# shell: bash +# timeout-minutes: 1 +# # get the counts, then the specific line, and then the digit count only +# # then check we have an expected level of restarts +# # TODO: make this use an env var, or relate to testnet size +# run: | +# restart_count=$(rg "Node is restarting in" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Restart $restart_count nodes" +# peer_removed=$(rg "PeerRemovedFromRoutingTable" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "PeerRemovedFromRoutingTable $peer_removed times" +# if [ $peer_removed -lt $restart_count ]; then +# echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" +# exit 1 +# fi +# node_count=$(ls $NODE_DATA_PATH | wc -l) +# echo "Node dir count is $node_count" +# # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here +# # if [ $restart_count -lt $node_count ]; then +# # echo "Restart count of: $restart_count is less than the node count of: $node_count" +# # exit 1 +# # fi + +# - name: Verify data replication using rg +# shell: bash +# timeout-minutes: 1 +# # get the counts, then the specific line, and then the digit count only +# # then check we have an expected level of replication +# # TODO: make this use an env var, or relate to testnet size +# # As the bootstrap_node using separate folder for logging, +# # hence the folder input to rg needs to cover that as well. +# run: | +# sending_list_count=$(rg "Sending a replication list" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Sent $sending_list_count replication lists" +# received_list_count=$(rg "Received replication list from" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Received $received_list_count replication lists" +# fetching_attempt_count=$(rg "FetchingKeysForReplication" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Carried out $fetching_attempt_count fetching attempts" +# if: always() + +# - name: Start a client to download files +# run: | +# ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick +# ls -l $CLIENT_DATA_PATH/safe_files +# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) +# if [ $downloaded_files -lt 1 ]; then +# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" +# exit 1 +# fi +# env: +# SN_LOG: "all" +# timeout-minutes: 10 + +# # Download the same files again to ensure files won't get corrupted. +# - name: Start a client to download the same files again +# run: | +# ./target/release/safe --log-output-dest=data-dir files download --show-holders --retry-strategy quick +# ls -l $CLIENT_DATA_PATH/safe_files +# downloaded_files=$(ls $CLIENT_DATA_PATH/safe_files | wc -l) +# if [ $downloaded_files -lt 1 ]; then +# echo "Only downloaded $downloaded_files files, less than the 1 file uploaded" +# exit 1 +# fi +# file_size1=$(stat -c "%s" ./the-test-data_1.zip) +# file_size2=$(stat -c "%s" $CLIENT_DATA_PATH/safe_files/the-test-data_1.zip) +# if [ $file_size1 != $file_size2 ]; then +# echo "The downloaded file has a different size $file_size2 to the original $file_size1." +# exit 1 +# fi +# env: +# SN_LOG: "all" +# timeout-minutes: 10 + +# - name: Audit from genesis to collect entire spend DAG and dump to a dot file +# run: | +# ./target/release/safe --log-output-dest=data-dir wallet audit --dot --sk-str 49113d2083f57a976076adbe85decb75115820de1e6e74b47e0429338cef124a > spend_dag_and_statistics.txt +# echo "==============================================================================" +# cat spend_dag_and_statistics.txt +# env: +# SN_LOG: "all" +# timeout-minutes: 5 +# if: always() + +# - name: Ensure discord_ids decrypted +# run: | +# rg 'node_' ./spend_dag_and_statistics.txt -o +# timeout-minutes: 1 +# if: always() + +# - name: Check nodes running +# shell: bash +# timeout-minutes: 1 +# continue-on-error: true +# run: pgrep safenode | wc -l +# if: always() + +# - name: Wait before verifying reward forwarding +# run: sleep 300 + +# - name: Stop the local network and upload logs +# if: always() +# uses: maidsafe/sn-local-testnet-action@main +# with: +# action: stop +# log_file_prefix: safe_test_logs_memcheck +# platform: ubuntu-latest +# build: true + +# - name: Check node memory usage +# shell: bash +# # The resources file and churning chunk_size we upload may change, and with it mem consumption. +# # This is set to a value high enough to allow for some variation depending on +# # resources and node location in the network, but hopefully low enough to catch +# # any wild memory issues +# # Any changes to this value should be carefully considered and tested! +# # As we have a bootstrap node acting as an access point for churning nodes and client, +# # The memory usage here will be significantly higher here than in the benchmark test, +# # where we don't have a bootstrap node. +# run: | +# node_peak_mem_limit_mb="300" # mb + +# peak_mem_usage=$( +# rg '"memory_used_mb":[^,]*' $NODE_DATA_PATH/*/logs/* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/{print $2}' | +# sort -n | +# tail -n 1 +# ) +# echo "Node memory usage: $peak_mem_usage MB" + +# if (( $(echo "$peak_mem_usage > $node_peak_mem_limit_mb" | bc -l) )); then +# echo "Node memory usage exceeded threshold: $peak_mem_usage MB" +# exit 1 +# fi +# if: always() + +# - name: Check client memory usage +# shell: bash +# # limits here are lower that benchmark tests as there is less going on. +# run: | +# client_peak_mem_limit_mb="1024" # mb +# client_avg_mem_limit_mb="512" # mb + +# peak_mem_usage=$( +# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/{print $2}' | +# sort -n | +# tail -n 1 +# ) +# echo "Peak memory usage: $peak_mem_usage MB" +# if (( $(echo "$peak_mem_usage > $client_peak_mem_limit_mb" | bc -l) )); then +# echo "Client peak memory usage exceeded threshold: $client_peak_mem_limit_mb MB" +# exit 1 +# fi + +# total_mem=$( +# rg '"memory_used_mb":[^,]*' $CLIENT_DATA_PATH/logs --glob safe.* -o --no-line-number --no-filename | +# awk -F':' '/"memory_used_mb":/ {sum += $2} END {printf "%.0f\n", sum}' +# ) +# num_of_times=$( +# rg "\"memory_used_mb\"" $CLIENT_DATA_PATH/logs --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "num_of_times: $num_of_times" +# echo "Total memory is: $total_mem" +# average_mem=$(($total_mem/$(($num_of_times)))) +# echo "Average memory is: $average_mem" + +# if (( $(echo "$average_mem > $client_avg_mem_limit_mb" | bc -l) )); then +# echo "Client average memory usage exceeded threshold: $client_avg_mem_limit_mb MB" +# exit 1 +# fi + +# - name: Check node swarm_driver handling statistics +# shell: bash +# # With the latest improvements, swarm_driver will be in high chance +# # has no super long handling (longer than 1s). +# # As the `rg` cmd will fail the shell directly if no entry find, +# # hence not covering it. +# # Be aware that if do need to looking for handlings longer than second, it shall be: +# # rg "SwarmCmd handled in [^m,µ,n]*s:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats +# run: | +# num_of_times=$( +# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "Number of long cmd handling times: $num_of_times" +# total_long_handling_ms=$( +# rg "SwarmCmd handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | +# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' +# ) +# echo "Total cmd long handling time is: $total_long_handling_ms ms" +# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) +# echo "Average cmd long handling time is: $average_handling_ms ms" +# total_long_handling=$(($total_long_handling_ms)) +# total_num_of_times=$(($num_of_times)) +# num_of_times=$( +# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -c --stats | +# rg "(\d+) matches" | +# rg "\d+" -o +# ) +# echo "Number of long event handling times: $num_of_times" +# total_long_handling_ms=$( +# rg "SwarmEvent handled in [0-9.]+ms:" $NODE_DATA_PATH/*/logs/* --glob safe.* -o --no-line-number --no-filename | +# awk -F' |ms:' '{sum += $4} END {printf "%.0f\n", sum}' +# ) +# echo "Total event long handling time is: $total_long_handling_ms ms" +# average_handling_ms=$(($total_long_handling_ms/$(($num_of_times)))) +# echo "Average event long handling time is: $average_handling_ms ms" +# total_long_handling=$(($total_long_handling_ms+$total_long_handling)) +# total_num_of_times=$(($num_of_times+$total_num_of_times)) +# average_handling_ms=$(($total_long_handling/$(($total_num_of_times)))) +# echo "Total swarm_driver long handling times is: $total_num_of_times" +# echo "Total swarm_driver long handling duration is: $total_long_handling ms" +# echo "Total average swarm_driver long handling duration is: $average_handling_ms ms" + +# - name: Verify reward forwarding using rg +# shell: bash +# timeout-minutes: 1 +# run: | +# min_reward_forwarding_times="100" +# reward_forwarding_count=$(rg "Reward forwarding completed sending spend" $NODE_DATA_PATH -c --stats | \ +# rg "(\d+) matches" | rg "\d+" -o) +# echo "Carried out $reward_forwarding_count reward forwardings" +# if (( $(echo "$reward_forwarding_count < $min_reward_forwarding_times" | bc -l) )); then +# echo "Reward forwarding times below the threshold: $min_reward_forwarding_times" +# exit 1 +# fi +# if: always() + +# - name: Upload payment wallet initialization log +# uses: actions/upload-artifact@main +# with: +# name: payment_wallet_initialization_log +# path: initial_balance_from_faucet.txt +# continue-on-error: true +# if: always() + +# - name: Move faucet log to the working folder +# run: | +# echo "current folder is:" +# pwd +# echo "SAFE_DATA_PATH has: " +# ls -l $SAFE_DATA_PATH +# echo "test_faucet foder has: " +# ls -l $SAFE_DATA_PATH/test_faucet +# echo "logs folder has: " +# ls -l $SAFE_DATA_PATH/test_faucet/logs +# mv $FAUCET_LOG_PATH/*.log ./faucet_log.log +# env: +# SN_LOG: "all" +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Move bootstrap_node log to the working directory +# run: | +# ls -l $BOOTSTRAP_NODE_DATA_PATH +# mv $BOOTSTRAP_NODE_DATA_PATH/safenode.log ./bootstrap_node.log +# continue-on-error: true +# if: always() +# timeout-minutes: 1 + +# - name: Upload faucet log +# uses: actions/upload-artifact@main +# with: +# name: memory_check_faucet_log +# path: faucet_log.log +# continue-on-error: true +# if: always() + +# - name: Upload bootstrap_node log +# uses: actions/upload-artifact@main +# with: +# name: memory_check_bootstrap_node_log +# path: bootstrap_node.log +# continue-on-error: true +# if: always() + +# - name: Upload spend DAG and statistics +# uses: actions/upload-artifact@main +# with: +# name: memory_check_spend_dag_and_statistics +# path: spend_dag_and_statistics.txt +# continue-on-error: true +# if: always() diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml index ea49a67372..55cd701cbf 100644 --- a/.github/workflows/node_man_tests.yml +++ b/.github/workflows/node_man_tests.yml @@ -35,122 +35,122 @@ jobs: - shell: bash run: cargo test --lib --package sn-node-manager - node-manager-user-mode-e2e-tests: - name: user-mode e2e - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - { os: ubuntu-latest } - - { os: macos-latest } - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - shell: bash - run: | - cargo test --package sn-node-manager --release --test e2e -- --nocapture - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: node_man_tests_user_mode - platform: ${{ matrix.os }} - - node-manager-e2e-tests: - name: system-wide e2e - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - include: - - { os: ubuntu-latest, elevated: sudo -E env PATH="$PATH" } - - { os: macos-latest, elevated: sudo -E } - - { os: windows-latest } - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --bin safenode --bin faucet - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - shell: bash - if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' - run: | - ${{ matrix.elevated }} rustup default stable - ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture - - # Powershell step runs as admin by default. - - name: run integration test in powershell - if: matrix.os == 'windows-latest' - shell: pwsh - run: | - curl -L -o WinSW.exe $env:WINSW_URL - - New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" - Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" - $env:PATH += ";$env:GITHUB_WORKSPACE\bin" - - cargo test --release --package sn-node-manager --test e2e -- --nocapture - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: node_man_tests_system_wide - platform: ${{ matrix.os }} + # node-manager-user-mode-e2e-tests: + # name: user-mode e2e + # runs-on: ${{ matrix.os }} + # strategy: + # fail-fast: false + # matrix: + # include: + # - { os: ubuntu-latest } + # - { os: macos-latest } + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --bin safenode --bin faucet + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - shell: bash + # run: | + # cargo test --package sn-node-manager --release --test e2e -- --nocapture + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: node_man_tests_user_mode + # platform: ${{ matrix.os }} + + # node-manager-e2e-tests: + # name: system-wide e2e + # runs-on: ${{ matrix.os }} + # strategy: + # fail-fast: false + # matrix: + # include: + # - { os: ubuntu-latest, elevated: sudo -E env PATH="$PATH" } + # - { os: macos-latest, elevated: sudo -E } + # - { os: windows-latest } + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --bin safenode --bin faucet + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - shell: bash + # if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' + # run: | + # ${{ matrix.elevated }} rustup default stable + # ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture + + # # Powershell step runs as admin by default. + # - name: run integration test in powershell + # if: matrix.os == 'windows-latest' + # shell: pwsh + # run: | + # curl -L -o WinSW.exe $env:WINSW_URL + + # New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" + # Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" + # $env:PATH += ";$env:GITHUB_WORKSPACE\bin" + + # cargo test --release --package sn-node-manager --test e2e -- --nocapture + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: node_man_tests_system_wide + # platform: ${{ matrix.os }} From 8c5429a70bac56076afa7b48f959edea3212466d Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 14:50:13 +0200 Subject: [PATCH 090/255] refactor(global): fix clippy/formatting --- autonomi/src/client/data.rs | 4 ++-- autonomi_cli/src/commands/register.rs | 4 ++++ autonomi_cli/src/utils.rs | 4 ++-- evmlib/src/wallet.rs | 1 - sn_node_manager/src/cmd/local.rs | 8 ++++---- sn_node_manager/src/local.rs | 5 ++++- 6 files changed, 16 insertions(+), 10 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 231f365401..b2dd9521b6 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -202,8 +202,8 @@ impl Client { let cost_map = self.get_store_quotes(content_addrs.into_iter()).await?; let total_cost = AttoTokens::from_atto( cost_map - .iter() - .map(|(_, quote)| quote.2.cost.as_atto()) + .values() + .map(|quote| quote.2.cost.as_atto()) .sum::(), ); Ok(total_cost) diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index fd57d678e0..250ff75dfd 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -10,6 +10,7 @@ use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; +#[expect(clippy::unused_async)] pub async fn cost(name: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; @@ -17,6 +18,7 @@ pub async fn cost(name: &str, _peers: Vec) -> Result<()> { Ok(()) } +#[expect(clippy::unused_async)] pub async fn create(name: &str, value: &str, _peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; @@ -28,6 +30,7 @@ pub async fn create(name: &str, value: &str, _peers: Vec) -> Result<( Ok(()) } +#[expect(clippy::unused_async)] pub async fn edit(name: &str, value: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; @@ -35,6 +38,7 @@ pub async fn edit(name: &str, value: &str, _peers: Vec) -> Result<()> Ok(()) } +#[expect(clippy::unused_async)] pub async fn get(name: &str, _peers: Vec) -> Result<()> { let register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs index 71c8b779a7..aa37b007fb 100644 --- a/autonomi_cli/src/utils.rs +++ b/autonomi_cli/src/utils.rs @@ -79,9 +79,9 @@ pub fn get_client_data_dir_path() -> Result { pub async fn get_peers(peers: PeersArgs) -> Result> { peers.get_peers().await - .wrap_err(format!("Please provide valid Network peers to connect to")) + .wrap_err("Please provide valid Network peers to connect to".to_string()) .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) - .with_suggestion(|| format!("a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere")) + .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere".to_string()) } pub(crate) fn get_evm_network() -> Result { diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 69c9644240..498eb3afc2 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -266,7 +266,6 @@ pub async fn pay_for_quotes>( mod tests { use crate::common::Amount; use crate::testnet::Testnet; - use crate::utils::dummy_address; use crate::wallet::{from_private_key, Wallet}; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; use alloy::primitives::address; diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index b77ed0b36e..c83938137f 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -65,9 +65,9 @@ pub async fn join( #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, - faucet_path, + _faucet_path, ReleaseType::Faucet, - faucet_version, + _faucet_version, &*release_repo, verbosity, ) @@ -200,9 +200,9 @@ pub async fn run( #[cfg(feature = "faucet")] let faucet_bin_path = get_bin_path( build, - faucet_path, + _faucet_path, ReleaseType::Faucet, - faucet_version, + _faucet_version, &*release_repo, verbosity, ) diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 6373ba46d4..e729022d69 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,8 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, + check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + increment_port_option, }; use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; @@ -19,11 +20,13 @@ use mockall::automock; use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; +use sn_service_management::FaucetServiceData; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; +use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, From cb98494e1cd1de909f4c6177dfea5760145b8d3d Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 14:56:09 +0200 Subject: [PATCH 091/255] ci(global): disable CI tests failing on EVM --- .github/workflows/merge.yml | 584 ++++++++++++++++++------------------ 1 file changed, 292 insertions(+), 292 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 545010b2ba..0634a90837 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -485,320 +485,320 @@ jobs: # log_file_prefix: safe_test_logs_spend_simulation # platform: ${{ matrix.os }} - token_distribution_test: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: token distribution test - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 + # token_distribution_test: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: token distribution test + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ubuntu-latest, windows-latest, macos-latest] + # steps: + # - uses: actions/checkout@v4 - - name: Install Rust - uses: dtolnay/rust-toolchain@stable + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + # - uses: Swatinem/rust-cache@v2 - - name: Build binaries - run: cargo build --release --features=local-discovery,distribution --bin safenode - timeout-minutes: 35 + # - name: Build binaries + # run: cargo build --release --features=local-discovery,distribution --bin safenode + # timeout-minutes: 35 - - name: Build faucet binary - run: cargo build --release --features=local-discovery,distribution,gifting --bin faucet - timeout-minutes: 35 + # - name: Build faucet binary + # run: cargo build --release --features=local-discovery,distribution,gifting --bin faucet + # timeout-minutes: 35 - - name: Build testing executable - run: cargo test --release --features=local-discovery,distribution --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 35 + # - name: Build testing executable + # run: cargo test --release --features=local-discovery,distribution --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 35 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: execute token_distribution tests - run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 - env: - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_token_distribution - platform: ${{ matrix.os }} + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true - churn: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Network churning tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - node_data_path: /home/runner/.local/share/safe/node - safe_path: /home/runner/.local/share/safe - - os: windows-latest - node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - - os: macos-latest - node_data_path: /Users/runner/Library/Application Support/safe/node - safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - uses: dtolnay/rust-toolchain@stable + # - name: execute token_distribution tests + # run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 + # env: + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 - - uses: Swatinem/rust-cache@v2 + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_token_distribution + # platform: ${{ matrix.os }} - - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode - timeout-minutes: 30 + # churn: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Network churning tests + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # node_data_path: /home/runner/.local/share/safe/node + # safe_path: /home/runner/.local/share/safe + # - os: windows-latest + # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # - os: macos-latest + # node_data_path: /Users/runner/Library/Application Support/safe/node + # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 - - name: Build faucet binaries - run: cargo build --release --features="local-discovery,gifting" --bin faucet - timeout-minutes: 30 + # - uses: dtolnay/rust-toolchain@stable - - name: Build churn tests - run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 + # - uses: Swatinem/rust-cache@v2 - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: Chunks data integrity during nodes churn - run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture - env: - TEST_DURATION_MINS: 5 - TEST_TOTAL_CHURN_CYCLES: 15 - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_churn - platform: ${{ matrix.os }} + # - name: Build binaries + # run: cargo build --release --features local-discovery --bin safenode + # timeout-minutes: 30 - - name: Verify restart of nodes using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" - peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" - - # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - # if [ $restart_count -lt $node_count ]; then - # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # exit 1 - # fi - - - name: Verify data replication using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of replication - # TODO: make this use an env var, or relate to testnet size - run: | - fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Carried out $fetching_attempt_count fetching attempts" - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - if [ $fetching_attempt_count -lt $node_count ]; then - echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" - exit 1 - fi - - # Only error out after uploading the logs - - name: Don't log raw data - if: matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi + # - name: Build faucet binaries + # run: cargo build --release --features="local-discovery,gifting" --bin faucet + # timeout-minutes: 30 - verify_data_location_routing_table: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: Verify data location and Routing Table - runs-on: ${{ matrix.os }} - strategy: - matrix: - include: - - os: ubuntu-latest - node_data_path: /home/runner/.local/share/safe/node - safe_path: /home/runner/.local/share/safe - - os: windows-latest - node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - - os: macos-latest - node_data_path: /Users/runner/Library/Application Support/safe/node - safe_path: /Users/runner/Library/Application Support/safe - steps: - - uses: actions/checkout@v4 + # - name: Build churn tests + # run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 - - uses: dtolnay/rust-toolchain@stable + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true - - uses: Swatinem/rust-cache@v2 + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi - - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode - timeout-minutes: 30 + # - name: Chunks data integrity during nodes churn + # run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture + # env: + # TEST_DURATION_MINS: 5 + # TEST_TOTAL_CHURN_CYCLES: 15 + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 - - name: Build fuacet binary - run: cargo build --release --features="local-discovery,gifting" --bin faucet - timeout-minutes: 30 + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_churn + # platform: ${{ matrix.os }} - - name: Build data location and routing table tests - run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 + # - name: Verify restart of nodes using rg + # shell: bash + # timeout-minutes: 1 + # # get the counts, then the specific line, and then the digit count only + # # then check we have an expected level of restarts + # # TODO: make this use an env var, or relate to testnet size + # run: | + # restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + # rg "(\d+) matches" | rg "\d+" -o) + # echo "Restart $restart_count nodes" + # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + # rg "(\d+) matches" | rg "\d+" -o) + # echo "PeerRemovedFromRoutingTable $peer_removed times" + # if [ $peer_removed -lt $restart_count ]; then + # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # exit 1 + # fi + # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + # echo "Node dir count is $node_count" - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 5 - - - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture - env: - CHURN_COUNT: 6 - SN_LOG: "all" - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 5 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_data_location - platform: ${{ matrix.os }} + # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # # if [ $restart_count -lt $node_count ]; then + # # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # # exit 1 + # # fi - - name: Verify restart of nodes using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restart $restart_count nodes" - peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "PeerRemovedFromRoutingTable $peer_removed times" - if [ $peer_removed -lt $restart_count ]; then - echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - exit 1 - fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" - - # Only error out after uploading the logs - - name: Don't log raw data - if: matrix.os != 'windows-latest' # causes error - shell: bash - timeout-minutes: 10 - run: | - if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' - then - echo "We are logging an extremely large data" - exit 1 - fi + # - name: Verify data replication using rg + # shell: bash + # timeout-minutes: 1 + # # get the counts, then the specific line, and then the digit count only + # # then check we have an expected level of replication + # # TODO: make this use an env var, or relate to testnet size + # run: | + # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.node_data_path }}" -c --stats | \ + # rg "(\d+) matches" | rg "\d+" -o) + # echo "Carried out $fetching_attempt_count fetching attempts" + # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + # if [ $fetching_attempt_count -lt $node_count ]; then + # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" + # exit 1 + # fi + + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi + + # verify_data_location_routing_table: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: Verify data location and Routing Table + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # include: + # - os: ubuntu-latest + # node_data_path: /home/runner/.local/share/safe/node + # safe_path: /home/runner/.local/share/safe + # - os: windows-latest + # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + # - os: macos-latest + # node_data_path: /Users/runner/Library/Application Support/safe/node + # safe_path: /Users/runner/Library/Application Support/safe + # steps: + # - uses: actions/checkout@v4 + + # - uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features local-discovery --bin safenode + # timeout-minutes: 30 + + # - name: Build fuacet binary + # run: cargo build --release --features="local-discovery,gifting" --bin faucet + # timeout-minutes: 30 + + # - name: Build data location and routing table tests + # run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: Verify the routing tables of the nodes + # run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 5 + + # - name: Verify the location of the data on the network + # run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + # env: + # CHURN_COUNT: 6 + # SN_LOG: "all" + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Verify the routing tables of the nodes + # run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 5 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_data_location + # platform: ${{ matrix.os }} + + # - name: Verify restart of nodes using rg + # shell: bash + # timeout-minutes: 1 + # # get the counts, then the specific line, and then the digit count only + # # then check we have an expected level of restarts + # # TODO: make this use an env var, or relate to testnet size + # run: | + # restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + # rg "(\d+) matches" | rg "\d+" -o) + # echo "Restart $restart_count nodes" + # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + # rg "(\d+) matches" | rg "\d+" -o) + # echo "PeerRemovedFromRoutingTable $peer_removed times" + # if [ $peer_removed -lt $restart_count ]; then + # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + # exit 1 + # fi + # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + # echo "Node dir count is $node_count" + + # # Only error out after uploading the logs + # - name: Don't log raw data + # if: matrix.os != 'windows-latest' # causes error + # shell: bash + # timeout-minutes: 10 + # run: | + # if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' + # then + # echo "We are logging an extremely large data" + # exit 1 + # fi # faucet_test: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" From 3a3b2c7261e31e50ed3f5345930d42ea05a3f6f8 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 1 Oct 2024 15:12:38 +0200 Subject: [PATCH 092/255] chore(manager): add `which` dep for windows --- Cargo.lock | 1 + sn_node_manager/Cargo.toml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index f2c610889d..26f5411a0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8090,6 +8090,7 @@ dependencies = [ "tracing", "users", "uuid", + "which 6.0.3", ] [[package]] diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 4163854115..745f432331 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -65,6 +65,9 @@ uuid = { version = "1.5.0", features = ["v4"] } nix = { version = "0.27.1", features = ["fs", "user"] } users = "0.11" +[target.'cfg(target_os = "windows")'.dependencies] +which = "6.0.1" + [dev-dependencies] assert_cmd = "2.0.12" assert_fs = "1.0.13" From e36dfdd4a2fa18328daa92ccc4fab6dc73dec816 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 1 Oct 2024 21:57:18 +0530 Subject: [PATCH 093/255] fix(manager): use the correct cargo target dir during local build --- sn_node_manager/src/cmd/mod.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/sn_node_manager/src/cmd/mod.rs b/sn_node_manager/src/cmd/mod.rs index ec4055a7a3..a8cb0bde8f 100644 --- a/sn_node_manager/src/cmd/mod.rs +++ b/sn_node_manager/src/cmd/mod.rs @@ -137,10 +137,8 @@ pub async fn get_bin_path( ) -> Result { if build { debug!("Obtaining bin path for {release_type:?} by building"); - build_binary(&release_type)?; - Ok(PathBuf::from("target") - .join("release") - .join(release_type.to_string())) + let target_dir = build_binary(&release_type)?; + Ok(target_dir.join(release_type.to_string())) } else if let Some(path) = path { debug!("Using the supplied custom binary for {release_type:?}: {path:?}"); Ok(path) @@ -159,7 +157,8 @@ pub async fn get_bin_path( } } -fn build_binary(bin_type: &ReleaseType) -> Result<()> { +// Returns the target dir after building the binary +fn build_binary(bin_type: &ReleaseType) -> Result { debug!("Building {bin_type} binary"); let mut args = vec!["build", "--release"]; let bin_name = bin_type.to_string(); @@ -193,12 +192,17 @@ fn build_binary(bin_type: &ReleaseType) -> Result<()> { print_banner(&format!("Building {} binary", bin_name)); + let mut target_dir = PathBuf::new(); let mut build_result = Command::new("cargo"); let _ = build_result.args(args.clone()); if let Ok(val) = std::env::var("CARGO_TARGET_DIR") { - let _ = build_result.env("CARGO_TARGET_DIR", val); + let _ = build_result.env("CARGO_TARGET_DIR", val.clone()); + target_dir.push(val); + } else { + target_dir.push("target"); } + let target_dir = target_dir.join("release"); let build_result = build_result .stdout(Stdio::inherit()) @@ -210,5 +214,5 @@ fn build_binary(bin_type: &ReleaseType) -> Result<()> { return Err(eyre!("Failed to build binaries")); } - Ok(()) + Ok(target_dir) } From a500ed256f215d402908d1a35bca89d568d7e106 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 2 Oct 2024 09:21:10 +0200 Subject: [PATCH 094/255] feat: evm network from env vars --- Cargo.lock | 1 + autonomi_cli/Cargo.toml | 1 + autonomi_cli/src/commands/file.rs | 17 ++++++++++------- autonomi_cli/src/utils.rs | 7 +++---- evmlib/src/utils.rs | 21 ++++++++++++++++++++- 5 files changed, 35 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2c610889d..af24889f07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1160,6 +1160,7 @@ dependencies = [ "clap", "color-eyre", "dirs-next", + "evmlib", "indicatif", "sn_build_info", "sn_logging", diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index a1c6bd5705..e779493126 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -14,6 +14,7 @@ autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" +evmlib = { path = "../evmlib", version = "0.1.0" } indicatif = { version = "0.17.5", features = ["tokio"] } tokio = { version = "1.32.0", features = [ "io-util", diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index acfbfc94f8..9a153438a5 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::client::address::xorname_to_str; -use autonomi::Wallet; use autonomi::Multiaddr; +use autonomi::Wallet; use color_eyre::eyre::Context; use color_eyre::eyre::Result; use std::path::PathBuf; @@ -17,7 +17,9 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; println!("Getting upload cost..."); - let cost = client.file_cost(&PathBuf::from(file)).await + let cost = client + .file_cost(&PathBuf::from(file)) + .await .wrap_err("Failed to calculate cost for file")?; println!("Estimate cost to upload file: {file}"); @@ -28,15 +30,16 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { pub async fn upload(file: &str, peers: Vec) -> Result<()> { let secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; - let network = crate::utils::get_evm_network() - .wrap_err("Failed to get evm network")?; - let wallet = Wallet::new_from_private_key(network, &secret_key) - .wrap_err("Failed to load wallet")?; + let network = crate::utils::get_evm_network_from_environment()?; + let wallet = + Wallet::new_from_private_key(network, &secret_key).wrap_err("Failed to load wallet")?; let mut client = crate::actions::connect_to_network(peers).await?; println!("Uploading data to network..."); - let (_, xor_name) = client.upload_from_dir(PathBuf::from(file), &wallet).await + let (_, xor_name) = client + .upload_from_dir(PathBuf::from(file), &wallet) + .await .wrap_err("Failed to upload file")?; let addr = xorname_to_str(xor_name); diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs index 71c8b779a7..ebadde29ab 100644 --- a/autonomi_cli/src/utils.rs +++ b/autonomi_cli/src/utils.rs @@ -84,8 +84,7 @@ pub async fn get_peers(peers: PeersArgs) -> Result> { .with_suggestion(|| format!("a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere")) } -pub(crate) fn get_evm_network() -> Result { - // NB TODO load custom network from config file/env/cmd line - let network = Network::ArbitrumOne; - Ok(network) +pub(crate) fn get_evm_network_from_environment() -> Result { + evmlib::utils::evm_network_from_env() + .map_err(|err| eyre!("Failed to get EVM network from environment: {err}")) } diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 3346eb789e..8fde529508 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -1,12 +1,31 @@ use crate::common::{Address, Hash}; +use crate::{CustomNetwork, Network}; use rand::Rng; +use std::env; +use std::env::VarError; /// Generate a random Address. pub fn dummy_address() -> Address { Address::new(rand::rngs::OsRng.gen()) } -/// generate a random Hash. +/// Generate a random Hash. pub fn dummy_hash() -> Hash { Hash::new(rand::rngs::OsRng.gen()) } + +/// Get the `Network` from environment variables +pub fn evm_network_from_env() -> Result { + const EVM_VARS: [&str; 3] = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "CHUNK_PAYMENTS_ADDRESS"]; + let custom_vars_exist = EVM_VARS.iter().all(|var| env::var(var).is_ok()); + + if custom_vars_exist { + Ok(Network::Custom(CustomNetwork::new( + &env::var(EVM_VARS[0])?, + &env::var(EVM_VARS[1])?, + &env::var(EVM_VARS[2])?, + ))) + } else { + Ok(Network::ArbitrumOne) + } +} From 4e90d6fbdac421dc41d6f20e8a92373723a9c7b6 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Tue, 1 Oct 2024 16:18:04 +0200 Subject: [PATCH 095/255] chore: `autonomi` entry into WASM --- Cargo.lock | 419 ++++++++++++++++++++------------------------ README.md | 33 ++-- autonomi/Cargo.toml | 5 +- evmlib/Cargo.toml | 5 +- 4 files changed, 221 insertions(+), 241 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9110c9bf2f..49c73299c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,7 +130,6 @@ dependencies = [ "alloy-network", "alloy-node-bindings", "alloy-provider", - "alloy-pubsub", "alloy-rpc-client", "alloy-rpc-types", "alloy-serde", @@ -138,8 +137,6 @@ dependencies = [ "alloy-signer-local", "alloy-transport", "alloy-transport-http", - "alloy-transport-ipc", - "alloy-transport-ws", ] [[package]] @@ -178,7 +175,6 @@ dependencies = [ "alloy-network-primitives", "alloy-primitives", "alloy-provider", - "alloy-pubsub", "alloy-rpc-types-eth", "alloy-sol-types", "alloy-transport", @@ -226,8 +222,6 @@ dependencies = [ "alloy-rlp", "alloy-serde", "c-kzg", - "derive_more", - "k256", "once_cell", "serde", "sha2 0.10.8", @@ -354,15 +348,12 @@ dependencies = [ "alloy-network-primitives", "alloy-node-bindings", "alloy-primitives", - "alloy-pubsub", "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", "alloy-signer-local", "alloy-transport", "alloy-transport-http", - "alloy-transport-ipc", - "alloy-transport-ws", "async-stream", "async-trait", "auto_impl", @@ -379,25 +370,6 @@ dependencies = [ "url", ] -[[package]] -name = "alloy-pubsub" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5da2c55cbaf229bad3c5f8b00b5ab66c74ef093e5f3a753d874cfecf7d2281" -dependencies = [ - "alloy-json-rpc", - "alloy-primitives", - "alloy-transport", - "bimap", - "futures", - "serde", - "serde_json", - "tokio", - "tokio-stream", - "tower", - "tracing", -] - [[package]] name = "alloy-rlp" version = "0.3.8" @@ -427,12 +399,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b38e3ffdb285df5d9f60cb988d336d9b8e3505acb78750c3bc60336a7af41d3" dependencies = [ "alloy-json-rpc", - "alloy-primitives", - "alloy-pubsub", "alloy-transport", "alloy-transport-http", - "alloy-transport-ipc", - "alloy-transport-ws", "futures", "pin-project", "reqwest 0.12.7", @@ -452,7 +420,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c31a3750b8f5a350d17354e46a52b0f2f19ec5f2006d816935af599dedc521" dependencies = [ "alloy-rpc-types-anvil", - "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", "serde", @@ -469,24 +436,6 @@ dependencies = [ "serde", ] -[[package]] -name = "alloy-rpc-types-engine" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff63f51b2fb2f547df5218527fd0653afb1947bf7fead5b3ce58c75d170b30f7" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "alloy-rlp", - "alloy-rpc-types-eth", - "alloy-serde", - "jsonwebtoken", - "rand 0.8.5", - "serde", - "thiserror", -] - [[package]] name = "alloy-rpc-types-eth" version = "0.2.1" @@ -654,43 +603,6 @@ dependencies = [ "url", ] -[[package]] -name = "alloy-transport-ipc" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804494366e20468776db4e18f9eb5db7db0fe14f1271eb6dbf155d867233405c" -dependencies = [ - "alloy-json-rpc", - "alloy-pubsub", - "alloy-transport", - "bytes", - "futures", - "interprocess", - "pin-project", - "serde_json", - "tokio", - "tokio-util 0.7.12", - "tracing", -] - -[[package]] -name = "alloy-transport-ws" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af855163e7df008799941aa6dd324a43ef2bf264b08ba4b22d44aad6ced65300" -dependencies = [ - "alloy-pubsub", - "alloy-transport", - "futures", - "http 1.1.0", - "rustls 0.23.13", - "serde_json", - "tokio", - "tokio-tungstenite 0.23.1", - "tracing", - "ws_stream_wasm", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -1048,17 +960,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", -] - [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -1149,6 +1050,7 @@ dependencies = [ "tracing", "tracing-subscriber", "walkdir", + "wasm-bindgen-test", "xor_name", ] @@ -1295,12 +1197,6 @@ dependencies = [ "console", ] -[[package]] -name = "bimap" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" - [[package]] name = "bincode" version = "1.3.3" @@ -1930,6 +1826,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if", + "wasm-bindgen", +] + [[package]] name = "const-hex" version = "1.12.0" @@ -2579,12 +2485,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "doctest-file" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" - [[package]] name = "downcast" version = "0.11.0" @@ -2801,6 +2701,7 @@ name = "evmlib" version = "0.1.0" dependencies = [ "alloy", + "getrandom 0.2.15", "rand 0.8.5", "serde", "thiserror", @@ -2987,6 +2888,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -4214,6 +4130,22 @@ dependencies = [ "tokio-io-timeout", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.8" @@ -4454,21 +4386,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "interprocess" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" -dependencies = [ - "doctest-file", - "futures-core", - "libc", - "recvmsg", - "tokio", - "widestring", - "windows-sys 0.52.0", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -4566,21 +4483,6 @@ dependencies = [ "serde", ] -[[package]] -name = "jsonwebtoken" -version = "9.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" -dependencies = [ - "base64 0.21.7", - "js-sys", - "pem", - "ring 0.17.8", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "k256" version = "0.13.4" @@ -5432,6 +5334,16 @@ dependencies = [ "unicase", ] +[[package]] +name = "minicov" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +dependencies = [ + "cc", + "walkdir", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5631,6 +5543,23 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -5965,6 +5894,50 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.20.0" @@ -6298,16 +6271,6 @@ dependencies = [ "indexmap 2.5.0", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - [[package]] name = "pin-project" version = "1.1.5" @@ -7123,12 +7086,6 @@ dependencies = [ "rand_core 0.3.1", ] -[[package]] -name = "recvmsg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" - [[package]] name = "redox_syscall" version = "0.5.4" @@ -7249,11 +7206,13 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.3", + "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -7266,6 +7225,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.0", "tower-service", "url", @@ -7632,6 +7592,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -7721,6 +7690,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "self_encryption" version = "0.29.2" @@ -8023,18 +8015,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "thiserror", - "time", -] - [[package]] name = "slab" version = "0.4.9" @@ -8966,6 +8946,16 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -9019,23 +9009,7 @@ dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.21.0", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" -dependencies = [ - "futures-util", - "log", - "rustls 0.23.13", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.26.0", - "tungstenite 0.23.0", - "webpki-roots 0.26.6", + "tungstenite", ] [[package]] @@ -9393,26 +9367,6 @@ dependencies = [ "utf-8", ] -[[package]] -name = "tungstenite" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http 1.1.0", - "httparse", - "log", - "rand 0.8.5", - "rustls 0.23.13", - "rustls-pki-types", - "sha1", - "thiserror", - "utf-8", -] - [[package]] name = "typenum" version = "1.17.0" @@ -9602,6 +9556,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vergen" version = "8.3.2" @@ -9700,7 +9660,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "tokio", - "tokio-tungstenite 0.21.0", + "tokio-tungstenite", "tokio-util 0.7.12", "tower-service", "tracing", @@ -9785,6 +9745,32 @@ version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +[[package]] +name = "wasm-bindgen-test" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +dependencies = [ + "console_error_panic_hook", + "js-sys", + "minicov", + "scoped-tls", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "wasmtimer" version = "0.2.0" @@ -10146,25 +10132,6 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper 0.6.0", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/README.md b/README.md index 33bbd87661..52a485c16a 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ If you wish to build a version of `safenode` from source, some special considera if you want it to connect to the current beta network. You should build from the `stable` branch, as follows: + ``` git checkout stable export GENESIS_PK=806b5c2eba70354ea92ba142c57587c9c5467ff69f0d43c482cda2313f9351e40c6120d76a2495cb3ca8367eee0a676f @@ -50,6 +51,7 @@ royalties are collected. They are also used as part of the node version string, a connecting node is compatible. For a client to connect to the current beta network, these keys must be set at build time: + ``` GENESIS_PK=8829ca178d6022de16fb8d3498411dd8a674a69c5f12e04d8b794a52ab056f1d419d12f690df1082dfa7efbbb10f62fa FOUNDATION_PK=84418659a8581b510c40b12e57da239787fd0d3b323f102f09fae9daf2ac96907e0045b1653c301de45117d393d92678 @@ -60,11 +62,13 @@ PAYMENT_FORWARD_PK=8c2f406a52d48d48505e1a3fdbb0c19ab42cc7c4807e9ea19c1fff3e5148f ##### Features You should also build `safe` with the `network-contacts` and `distribution` features enabled: + ``` cargo build --release --features="network-contacts,distribution" --bin safe ``` For `safenode`, only the `network-contacts` feature should be required: + ``` cargo build --release --features=network-contacts --bin safenode ``` @@ -135,7 +139,8 @@ YMMV until stabilised. ## Using a Local Network -We can explore the network's features by using multiple node processes to form a local network. +We can explore the network's features by using multiple node processes to form a local network. e also need to run a +local EVM network for our nodes and client to connect to. The latest version of [Rust](https://www.rust-lang.org/learn/get-started) should be installed. If you already have an installation, use `rustup update` to get the latest version. @@ -146,27 +151,31 @@ Run all the commands from the root of this repository. Follow these steps to create a local network: -1. Create the test network:
+1. If you haven't already, install Foundry. We need to have access to Anvil, which is packaged with Foundry, to run an + EVM node: https://book.getfoundry.sh/getting-started/installation
+2. Run a local EVM node:
-```bash -cargo run --bin safenode-manager --features local-discovery -- local run --build +```sh +cargo run --bin evm_testnet ``` -2. Verify node status:
+Take note of the console output for the next step (`RPC URL`, `Payment token address` & `Chunk payments address`). + +3. Create the test network and pass the EVM params:
+ `--rewards-address` _is the address where you will receive your node earnings on._ ```bash -cargo run --bin safenode-manager --features local-discovery -- status +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --chunk-payments-address ``` -3. Build a tokenized wallet:
+4. Verify node status:
```bash -cargo run --bin safe --features local-discovery -- wallet get-faucet 127.0.0.1:8000 +cargo run --bin safenode-manager --features local-discovery -- status ``` -The node manager's `run` command starts the node processes and a faucet process, the latter of -which will dispense tokens for use with the network. The `status` command should show twenty-five -running nodes. The `wallet` command retrieves some tokens, which enables file uploads. +The node manager's `run` command starts the node processes. The `status` command should show twenty-five +running nodes. ### Files @@ -256,7 +265,7 @@ cargo run --bin safe --features local-discovery -- wallet send 2 [address] ``` This will output a transfer as a hex string, which should be sent to the recipient. -This transfer is encrypted to the recipient so only the recipient can read and redeem it. +This transfer is encrypted to the recipient so only the recipient can read and redeem it. To receive a transfer, simply paste it after the wallet receive command: ``` diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 876936e617..dd99f98fbe 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -15,7 +15,7 @@ full = ["data", "registers", "vault"] data = [] vault = ["data"] files = ["fs", "data"] -fs = [] +fs = ["tokio/fs"] local = ["sn_networking/local-discovery"] registers = [] @@ -40,7 +40,7 @@ sn_registers = { path = "../sn_registers", version = "0.3.19" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } sn_evm = { path = "../sn_evm" } thiserror = "1.0.23" -tokio = { version = "1.35.0", features = ["sync", "fs"] } +tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } walkdir = "2.5.0" xor_name = "5.0.0" @@ -48,6 +48,7 @@ xor_name = "5.0.0" [dev-dependencies] eyre = "0.6.5" tracing-subscriber = { version = "0.3", features = ["env-filter"] } +wasm-bindgen-test = "0.3.43" [lints] workspace = true diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index ea795b69c8..d78ee9e2e3 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -9,11 +9,14 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.1.0" [dependencies] -alloy = { version = "0.2", default-features = false, features = ["std", "full", "provider-anvil-node", "reqwest-rustls-tls"] } +alloy = { version = "0.2", features = ["provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } serde = "1.0" thiserror = "1.0" tokio = "1.38.0" rand = "0.8.5" +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { version = "0.2.12", features = ["js"] } + [lints] workspace = true From fede77dfefc27eae25adfab1807e2f6e067fef3c Mon Sep 17 00:00:00 2001 From: Mick van Dijke Date: Wed, 2 Oct 2024 09:32:53 +0200 Subject: [PATCH 096/255] chore: update alloy features in evmlib Co-authored-by: Benno --- evmlib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index d78ee9e2e3..79c2170270 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.1.0" [dependencies] -alloy = { version = "0.2", features = ["provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +alloy = { version = "0.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } serde = "1.0" thiserror = "1.0" tokio = "1.38.0" From be618dc5db6c92dd91dd8174c0b8693a67527f3b Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Wed, 2 Oct 2024 09:49:57 +0200 Subject: [PATCH 097/255] test(autonomi): added wasm put/get test --- Cargo.lock | 143 ----------------------------------------- autonomi/tests/wasm.rs | 37 +++++++++++ 2 files changed, 37 insertions(+), 143 deletions(-) create mode 100644 autonomi/tests/wasm.rs diff --git a/Cargo.lock b/Cargo.lock index 49c73299c9..1de99e6ffe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2888,21 +2888,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -4130,22 +4115,6 @@ dependencies = [ "tokio-io-timeout", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.4.1", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.8" @@ -5543,23 +5512,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -5894,50 +5846,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" -dependencies = [ - "bitflags 2.6.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.77", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "opentelemetry" version = "0.20.0" @@ -7206,13 +7114,11 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.3", - "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -7225,7 +7131,6 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tokio-native-tls", "tokio-rustls 0.26.0", "tower-service", "url", @@ -7592,15 +7497,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" -dependencies = [ - "windows-sys 0.59.0", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -7690,29 +7586,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.6.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "self_encryption" version = "0.29.2" @@ -8946,16 +8819,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -9556,12 +9419,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "vergen" version = "8.3.2" diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs new file mode 100644 index 0000000000..4814a0c102 --- /dev/null +++ b/autonomi/tests/wasm.rs @@ -0,0 +1,37 @@ +use std::time::Duration; + +use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +use autonomi::Client; +use tokio::time::sleep; +use wasm_bindgen_test::*; + +mod common; + +wasm_bindgen_test_configure!(run_in_browser); + +#[tokio::test] +#[wasm_bindgen_test] +async fn file() -> Result<(), Box> { + common::enable_logging(); + + let peers = vec![ + "/ip4/127.0.0.1/tcp/35499/ws/p2p/12D3KooWGN5RqREZ4RYtsUc3DNCkrNSVXEzTYEbMb1AZx2rNddoW" + .try_into() + .expect("str to be valid multiaddr"), + ]; + + let network = evm_network_from_env(); + let mut client = Client::connect(&peers).await.unwrap(); + let wallet = evm_wallet_from_env_or_default(network); + + let data = common::gen_random_data(1024 * 1024 * 10); + + let addr = client.put(data.clone(), &wallet).await.unwrap(); + + sleep(Duration::from_secs(2)).await; + + let data_fetched = client.get(addr).await.unwrap(); + assert_eq!(data, data_fetched, "data fetched should match data put"); + + Ok(()) +} From b57429c86703185b1beba5a9468ade3ccc89f08b Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 2 Oct 2024 09:57:10 +0200 Subject: [PATCH 098/255] fix(autonomi): resolve libinstant error See https://github.com/rustwasm/wasm-bindgen/discussions/3500#discussioncomment-6334669 for an example of errors that happened when running the PUT test. --- Cargo.lock | 4 ++++ autonomi/Cargo.toml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1de99e6ffe..d385373a75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1032,6 +1032,7 @@ dependencies = [ "evmlib", "eyre", "hex 0.4.3", + "instant", "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", @@ -4353,6 +4354,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index dd99f98fbe..d750cc07ed 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -50,6 +50,10 @@ eyre = "0.6.5" tracing-subscriber = { version = "0.3", features = ["env-filter"] } wasm-bindgen-test = "0.3.43" +[target.'cfg(target_arch = "wasm32")'.dependencies] +# See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available +instant = { version = "0.1", features = [ "wasm-bindgen", "inaccurate" ] } + [lints] workspace = true From 8b4852c9f5cd74f5ed43bc6e4b0ce82d6d48eb43 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 2 Oct 2024 14:15:49 +0900 Subject: [PATCH 099/255] chore: polishes --- autonomi_cli/src/commands.rs | 8 +++---- autonomi_cli/src/commands/file.rs | 2 +- autonomi_cli/src/commands/register.rs | 34 +++++++++++---------------- autonomi_cli/src/utils.rs | 11 ++++----- 4 files changed, 24 insertions(+), 31 deletions(-) diff --git a/autonomi_cli/src/commands.rs b/autonomi_cli/src/commands.rs index 37fbebb36b..12d4af26f1 100644 --- a/autonomi_cli/src/commands.rs +++ b/autonomi_cli/src/commands.rs @@ -120,10 +120,10 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { FileCmd::List => file::list(peers), }, SubCmd::Register { command } => match command { - RegisterCmd::Cost { name } => register::cost(&name, peers).await, - RegisterCmd::Create { name, value } => register::create(&name, &value, peers).await, - RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers).await, - RegisterCmd::Get { name } => register::get(&name, peers).await, + RegisterCmd::Cost { name } => register::cost(&name, peers), + RegisterCmd::Create { name, value } => register::create(&name, &value, peers), + RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers), + RegisterCmd::Get { name } => register::get(&name, peers), RegisterCmd::List => register::list(peers), }, SubCmd::Vault { command } => match command { diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 9a153438a5..45b60a24df 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -54,6 +54,6 @@ pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Res } pub fn list(_peers: Vec) -> Result<()> { - println!("Listing previous uploads..."); + println!("The file list feature is coming soon!"); Ok(()) } diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 250ff75dfd..6afa26c755 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -10,43 +10,37 @@ use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; -#[expect(clippy::unused_async)] -pub async fn cost(name: &str, _peers: Vec) -> Result<()> { - let register_key = crate::utils::get_register_signing_key() +pub fn cost(_name: &str, _peers: Vec) -> Result<()> { + let _register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("Estimate cost to register name: {name} with register key: {register_key}"); + println!("The register feature is coming soon!"); Ok(()) } -#[expect(clippy::unused_async)] -pub async fn create(name: &str, value: &str, _peers: Vec) -> Result<()> { - let secret_key = crate::utils::get_secret_key() +pub fn create(_name: &str, _value: &str, _peers: Vec) -> Result<()> { + let _secret_key = crate::utils::get_secret_key() .wrap_err("The secret key is required to perform this action")?; - let register_key = crate::utils::get_register_signing_key() + let _register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!( - "Creating register: {name} with value: {value} using secret key: {secret_key} and register key: {register_key}" - ); + println!("The register feature is coming soon!"); Ok(()) } -#[expect(clippy::unused_async)] -pub async fn edit(name: &str, value: &str, _peers: Vec) -> Result<()> { - let register_key = crate::utils::get_register_signing_key() +pub fn edit(_name: &str, _value: &str, _peers: Vec) -> Result<()> { + let _register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("Editing register: {name} with value: {value} using register key: {register_key}"); + println!("The register feature is coming soon!"); Ok(()) } -#[expect(clippy::unused_async)] -pub async fn get(name: &str, _peers: Vec) -> Result<()> { - let register_key = crate::utils::get_register_signing_key() +pub fn get(_name: &str, _peers: Vec) -> Result<()> { + let _register_key = crate::utils::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("Getting value of register: {name} with register key: {register_key}"); + println!("The register feature is coming soon!"); Ok(()) } pub fn list(_peers: Vec) -> Result<()> { - println!("Listing previous registers..."); + println!("The register feature is coming soon!"); Ok(()) } diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs index c7568cc804..2d7cce6d19 100644 --- a/autonomi_cli/src/utils.rs +++ b/autonomi_cli/src/utils.rs @@ -44,7 +44,7 @@ pub fn get_secret_key() -> Result { // load the key from file let key_path = dir.join(SECRET_KEY_FILE); fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file".to_string()) + .wrap_err("Failed to read secret key from file") .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY} env var or have the key in a file at {key_path:?}")) } @@ -63,7 +63,7 @@ pub fn get_register_signing_key() -> Result { // load the key from file let key_path = dir.join(REGISTER_SIGNING_KEY_FILE); fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file".to_string()) + .wrap_err("Failed to read secret key from file") .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY} env var or have the key in a file at {key_path:?}")) } @@ -72,16 +72,15 @@ pub fn get_client_data_dir_path() -> Result { .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; home_dirs.push("safe"); home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path()) - .wrap_err("Failed to create data dir".to_string())?; + std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; Ok(home_dirs) } pub async fn get_peers(peers: PeersArgs) -> Result> { peers.get_peers().await - .wrap_err("Please provide valid Network peers to connect to".to_string()) + .wrap_err("Please provide valid Network peers to connect to") .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) - .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere".to_string()) + .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") } pub(crate) fn get_evm_network_from_environment() -> Result { From 28ac5c216b35379e6de5ea7e53cbed51609200a9 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 13:43:24 +0530 Subject: [PATCH 100/255] feat: enable data location test --- Cargo.lock | 6 + autonomi/Cargo.toml | 3 + autonomi/src/client/data.rs | 8 +- autonomi/src/client/registers.rs | 2 +- autonomi/tests/common.rs | 46 -- autonomi/tests/evm/file.rs | 11 +- autonomi/tests/file.rs | 13 +- autonomi/tests/put.rs | 10 +- autonomi/tests/register.rs | 16 +- autonomi/tests/wallet.rs | 4 +- autonomi/tests/wasm.rs | 7 +- sn_node/Cargo.toml | 4 + sn_node/tests/common/client.rs | 466 ++++++-------- sn_node/tests/common/mod.rs | 439 +++++++------- sn_node/tests/verify_data_location.rs | 840 +++++++++++++------------- test_utils/Cargo.toml | 4 + test_utils/src/evm.rs | 56 ++ test_utils/src/lib.rs | 1 + 18 files changed, 930 insertions(+), 1006 deletions(-) create mode 100644 test_utils/src/evm.rs diff --git a/Cargo.lock b/Cargo.lock index db8f45cfe5..47d0cdfb71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1046,6 +1046,7 @@ dependencies = [ "sn_protocol", "sn_registers", "sn_transfers", + "test_utils", "thiserror", "tokio", "tracing", @@ -8136,6 +8137,7 @@ version = "0.111.3" dependencies = [ "assert_fs", "async-trait", + "autonomi", "blsttc", "bytes", "chrono", @@ -8145,6 +8147,7 @@ dependencies = [ "crdts", "custom_debug", "dirs-next", + "evmlib", "eyre", "file-rotate", "futures", @@ -8649,8 +8652,11 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" name = "test_utils" version = "0.4.6" dependencies = [ + "autonomi", "color-eyre", + "const-hex", "dirs-next", + "evmlib", "libp2p 0.54.1", "serde", "serde_json", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 84de6cc4ce..d5990e8b53 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -48,6 +48,9 @@ xor_name = "5.0.0" [dev-dependencies] eyre = "0.6.5" tracing-subscriber = { version = "0.3", features = ["env-filter"] } +# Do not specify the version field. Release process expects even the local dev deps to be published. +# Removing the version field is a workaround. +test_utils = { path = "../test_utils" } wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index b2dd9521b6..e0650a2ca9 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -154,7 +154,7 @@ impl Client { /// Upload a piece of data to the network. This data will be self-encrypted, /// and the data map XOR address will be returned. - pub async fn put(&mut self, data: Bytes, wallet: &Wallet) -> Result { + pub async fn put(&self, data: Bytes, wallet: &Wallet) -> Result { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -186,7 +186,7 @@ impl Client { Ok(map_xor_name) } - pub(crate) async fn cost(&mut self, data: Bytes) -> Result { + pub(crate) async fn cost(&self, data: Bytes) -> Result { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -210,7 +210,7 @@ impl Client { } pub(crate) async fn pay( - &mut self, + &self, content_addrs: impl Iterator, wallet: &Wallet, ) -> Result<(HashMap, Vec), PayError> { @@ -237,7 +237,7 @@ impl Client { } async fn get_store_quotes( - &mut self, + &self, content_addrs: impl Iterator, ) -> Result, PayError> { let futures: Vec<_> = content_addrs diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 43f35c40db..e5e3f24866 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -145,7 +145,7 @@ impl Client { /// Creates a new Register with an initial value and uploads it to the network. pub async fn create_register( - &mut self, + &self, value: Bytes, name: XorName, owner: SecretKey, diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs index 15ab0cc7a6..77a057fde2 100644 --- a/autonomi/tests/common.rs +++ b/autonomi/tests/common.rs @@ -1,15 +1,9 @@ use bytes::Bytes; -use const_hex::ToHexExt; -use evmlib::CustomNetwork; use libp2p::Multiaddr; use rand::Rng; use sn_peers_acquisition::parse_peer_addr; use std::env; -fn get_var_or_panic(var: &str) -> String { - env::var(var).unwrap_or_else(|_| panic!("{var} environment variable needs to be set")) -} - #[allow(dead_code)] pub fn gen_random_data(len: usize) -> Bytes { let mut data = vec![0u8; len]; @@ -36,43 +30,3 @@ pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { peers_str.split(',').map(parse_peer_addr).collect() } - -pub fn evm_network_from_env() -> evmlib::Network { - let evm_network = env::var("EVM_NETWORK").ok(); - let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); - - let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { - ( - evmlib::Network::ArbitrumOne.rpc_url().to_string(), - evmlib::Network::ArbitrumOne - .payment_token_address() - .encode_hex_with_prefix(), - evmlib::Network::ArbitrumOne - .chunk_payments_address() - .encode_hex_with_prefix(), - ) - } else { - ( - get_var_or_panic("RPC_URL"), - get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), - get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), - ) - }; - - evmlib::Network::Custom(CustomNetwork::new( - &rpc_url, - &payment_token_address, - &chunk_payments_address, - )) -} - -pub fn evm_wallet_from_env_or_default(network: evmlib::Network) -> evmlib::wallet::Wallet { - // Default deployer wallet of the testnet. - const DEFAULT_WALLET_PRIVATE_KEY: &str = - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - - let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); - - evmlib::wallet::Wallet::new_from_private_key(network, &private_key) - .expect("Invalid private key") -} diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs index 746cd1cea3..5283c775b9 100644 --- a/autonomi/tests/evm/file.rs +++ b/autonomi/tests/evm/file.rs @@ -1,21 +1,18 @@ #[cfg(feature = "evm-payments")] mod test { - - use crate::common; - use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; use bytes::Bytes; use eyre::bail; use std::time::Duration; + use test_utils::evm::get_funded_wallet; use tokio::time::sleep; #[tokio::test] async fn file() -> Result<(), Box> { common::enable_logging(); - let network = evm_network_from_env(); let mut client = Client::connect(&[]).await.unwrap(); - let mut wallet = evm_wallet_from_env_or_default(network); + let mut wallet = get_funded_wallet(); // let data = common::gen_random_data(1024 * 1024 * 1000); // let user_key = common::gen_random_data(32); @@ -41,13 +38,11 @@ mod test { async fn file_into_vault() -> eyre::Result<()> { common::enable_logging(); - let network = evm_network_from_env(); - let mut client = Client::connect(&[]) .await? .with_vault_entropy(Bytes::from("at least 32 bytes of entropy here"))?; - let mut wallet = evm_wallet_from_env_or_default(network); + let mut wallet = get_funded_wallet(); let (root, addr) = client .upload_from_dir("tests/file/test_dir".into(), &mut wallet) diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 29b048bf68..85cef358ed 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -2,18 +2,21 @@ mod common; -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +#[cfg(feature = "files")] use autonomi::Client; +#[cfg(feature = "files")] use std::time::Duration; +#[cfg(feature = "files")] +use test_utils::evm::get_funded_wallet; +#[cfg(feature = "files")] use tokio::time::sleep; #[tokio::test] async fn file() -> Result<(), Box> { common::enable_logging(); - let network = evm_network_from_env(); let mut client = Client::connect(&[]).await.unwrap(); - let wallet = evm_wallet_from_env_or_default(network); + let wallet = get_funded_wallet(); let (root, addr) = client .upload_from_dir("tests/file/test_dir".into(), &wallet) @@ -36,10 +39,8 @@ async fn file() -> Result<(), Box> { async fn file_into_vault() -> eyre::Result<()> { common::enable_logging(); - let network = evm_network_from_env(); - let mut client = Client::connect(&[]).await?; - let mut wallet = evm_wallet_from_env_or_default(network); + let mut wallet = get_funded_wallet(); let client_sk = bls::SecretKey::random(); let (root, addr) = client diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index 3deeeca082..be7f84c025 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -2,19 +2,17 @@ mod common; -use std::time::Duration; - -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; +use std::time::Duration; +use test_utils::evm::get_funded_wallet; use tokio::time::sleep; #[tokio::test] async fn put() { common::enable_logging(); - let network = evm_network_from_env(); - let mut client = Client::connect(&[]).await.unwrap(); - let wallet = evm_wallet_from_env_or_default(network); + let client = Client::connect(&[]).await.unwrap(); + let wallet = get_funded_wallet(); let data = common::gen_random_data(1024 * 1024 * 10); let addr = client.put(data.clone(), &wallet).await.unwrap(); diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 5b49394aea..8c47776208 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -2,21 +2,25 @@ mod common; -use std::time::Duration; - -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; +#[cfg(feature = "registers")] use autonomi::Client; +#[cfg(feature = "registers")] use bytes::Bytes; +#[cfg(feature = "registers")] +use std::time::Duration; +#[cfg(feature = "registers")] +use test_utils::evm::get_funded_wallet; +#[cfg(feature = "registers")] use tokio::time::sleep; +#[cfg(feature = "registers")] use xor_name::XorName; #[tokio::test] async fn register() { common::enable_logging(); - let network = evm_network_from_env(); - let mut client = Client::connect(&[]).await.unwrap(); - let wallet = evm_wallet_from_env_or_default(network); + let client = Client::connect(&[]).await.unwrap(); + let wallet = get_funded_wallet(); // Owner key of the register. let key = bls::SecretKey::random(); diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index 5fb852921e..faf24109c7 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -1,9 +1,9 @@ mod common; -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use const_hex::traits::FromHex; use evmlib::common::{Address, Amount}; use evmlib::wallet::Wallet; +use test_utils::evm::{evm_network_from_env, get_funded_wallet}; #[tokio::test] async fn from_private_key() { @@ -20,7 +20,7 @@ async fn from_private_key() { #[tokio::test] async fn send_tokens() { let network = evm_network_from_env(); - let wallet = evm_wallet_from_env_or_default(network.clone()); + let wallet = get_funded_wallet(); let receiving_wallet = Wallet::new_with_random_wallet(network); diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index 4814a0c102..e1265d8e59 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -1,7 +1,7 @@ use std::time::Duration; -use crate::common::{evm_network_from_env, evm_wallet_from_env_or_default}; use autonomi::Client; +use test_utils::evm::get_funded_wallet; use tokio::time::sleep; use wasm_bindgen_test::*; @@ -20,9 +20,8 @@ async fn file() -> Result<(), Box> { .expect("str to be valid multiaddr"), ]; - let network = evm_network_from_env(); - let mut client = Client::connect(&peers).await.unwrap(); - let wallet = evm_wallet_from_env_or_default(network); + let client = Client::connect(&peers).await.unwrap(); + let wallet = get_funded_wallet(); let data = common::gen_random_data(1024 * 1024 * 10); diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 8ae460103c..4155b2c6c4 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -81,6 +81,10 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] +evmlib = { path = "../evmlib", version = "0.1" } +autonomi = { path = "../autonomi", version = "0.1.0", features = [ + "registers", +] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index 297b103d27..9e9336f492 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -6,32 +6,20 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use eyre::{bail, OptionExt, Result}; -use libp2p::PeerId; -/// TODO: Update to use autonomi API here -// use sn_client::{ -// acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, -// send, Client, -// }; +use autonomi::Client; +use eyre::Result; use sn_peers_acquisition::parse_peer_addr; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; -use sn_service_management::{ - get_local_node_registry_path, safenode_manager_proto::NodeServiceRestartRequest, NodeRegistry, -}; -use sn_transfers::{HotWallet, NanoTokens, Transfer}; +use sn_service_management::{get_local_node_registry_path, NodeRegistry}; +use std::env; use std::{net::SocketAddr, path::Path}; -use test_utils::testnet::DeploymentInventory; -use tokio::{ - sync::Mutex, - time::{Duration, Instant}, -}; +use test_utils::{evm::evm_network_from_env, testnet::DeploymentInventory}; +use tokio::sync::Mutex; use tonic::Request; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info}; use crate::common::get_safenode_rpc_client; -use super::get_safenode_manager_rpc_client; - /// This is a limited hard coded value as Droplet version has to contact the faucet to get the funds. /// This is limited to 10 requests to the faucet, where each request yields 100 SNT pub const INITIAL_WALLET_BALANCE: u64 = 3 * 100 * 1_000_000_000; @@ -48,10 +36,16 @@ const LOAD_FAUCET_WALLET_RETRIES: usize = 6; // mutex to restrict access to faucet wallet from concurrent tests static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); -/// Load HotWallet from dir -pub fn get_wallet(root_dir: &Path) -> HotWallet { - load_account_wallet_or_create_with_mnemonic(root_dir, None) - .expect("Wallet shall be successfully created.") +pub async fn get_client_and_funded_wallet() -> (Client, evmlib::wallet::Wallet) { + match DeploymentInventory::load() { + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + } + Err(_) => ( + LocalNetwork::get_client().await, + LocalNetwork::get_funded_wallet(), + ), + } } /// Get the node count @@ -59,7 +53,10 @@ pub fn get_wallet(root_dir: &Path) -> HotWallet { /// else return the local node count pub fn get_node_count() -> usize { match DeploymentInventory::load() { - Ok(inventory) => inventory.rpc_endpoints.len(), + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // inventory.rpc_endpoints.len() + } Err(_) => LOCAL_NODE_COUNT, } } @@ -70,32 +67,33 @@ pub fn get_node_count() -> usize { /// /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. /// The restarted node relies on the genesis multiaddr to bootstrap after restart. -pub fn get_all_rpc_addresses(skip_genesis_for_droplet: bool) -> Result> { +pub fn get_all_rpc_addresses(_skip_genesis_for_droplet: bool) -> Result> { match DeploymentInventory::load() { - Ok(inventory) => { - if !skip_genesis_for_droplet { - return Ok(inventory.rpc_endpoints.values().cloned().collect()); - } - // else filter out genesis - let genesis_ip = inventory - .vm_list - .iter() - .find_map(|(name, addr)| { - if name.contains("genesis") { - Some(*addr) - } else { - None - } - }) - .ok_or_eyre("Could not get the genesis VM's addr")?; - - let rpc_endpoints = inventory - .rpc_endpoints - .into_iter() - .filter(|(_, addr)| addr.ip() != genesis_ip) - .map(|(_, addr)| addr) - .collect(); - Ok(rpc_endpoints) + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // if !skip_genesis_for_droplet { + // return Ok(inventory.rpc_endpoints.values().cloned().collect()); + // } + // // else filter out genesis + // let genesis_ip = inventory + // .vm_list + // .iter() + // .find_map(|(name, addr)| { + // if name.contains("genesis") { + // Some(*addr) + // } else { + // None + // } + // }) + // .ok_or_eyre("Could not get the genesis VM's addr")?; + + // let rpc_endpoints = inventory + // .rpc_endpoints + // .into_iter() + // .filter(|(_, addr)| addr.ip() != genesis_ip) + // .map(|(_, addr)| addr) + // .collect(); + // Ok(rpc_endpoints) } Err(_) => { let local_node_reg_path = &get_local_node_registry_path()?; @@ -110,121 +108,55 @@ pub fn get_all_rpc_addresses(skip_genesis_for_droplet: bool) -> Result Result { - match DeploymentInventory::load() { - Ok(inventory) => { - Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await - } - Err(_) => NonDroplet::get_funded_wallet(client, to_wallet_dir, false).await, - } -} - -/// Create a client and fund the wallet. -/// If SN_INVENTORY flag is passed, the wallet is funded by fetching it from the faucet -/// Else create a genesis wallet and transfer funds from there. -/// -/// We get a maximum of 10*100 SNT from the network. This is hardcoded as the Droplet tests have the fetch the -/// coins from the faucet and each request is limited to 100 SNT. -pub async fn get_client_and_funded_wallet(root_dir: &Path) -> Result<(Client, HotWallet)> { - match DeploymentInventory::load() { - Ok(inventory) => { - let client = Droplet::get_client(&inventory).await; - let local_wallet = - Droplet::get_funded_wallet(&client, root_dir, inventory.faucet_address, true) - .await?; - Ok((client, local_wallet)) - } - Err(_) => { - let client = NonDroplet::get_client().await; - let local_wallet = NonDroplet::get_funded_wallet(&client, root_dir, true).await?; - - Ok((client, local_wallet)) - } - } -} - -pub struct NonDroplet; -impl NonDroplet { +// /// Adds funds to the provided to_wallet_dir +// /// If SN_INVENTORY flag is passed, the amount is retrieved from the faucet url +// /// else obtain it from the provided faucet HotWallet +// /// +// /// We obtain 100 SNT from the network per call. Use `get_client_and_wallet` during the initial setup which would +// /// obtain 10*100 SNT +// pub async fn add_funds_to_wallet(client: &Client, to_wallet_dir: &Path) -> Result { +// match DeploymentInventory::load() { +// Ok(inventory) => { +// Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await +// } +// Err(_) => NonDroplet::get_funded_wallet(client, to_wallet_dir, false).await, +// } +// } + +pub struct LocalNetwork; +impl LocalNetwork { /// Get a new Client for testing pub async fn get_client() -> Client { - let secret_key = bls::SecretKey::random(); - let bootstrap_peers = if !cfg!(feature = "local-discovery") { match std::env::var("SAFE_PEERS") { Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => Some(vec![peer]), + Ok(peer) => vec![peer], Err(err) => panic!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), }, Err(err) => panic!("Can't get env var SAFE_PEERS with error {err:?}"), } } else { - None + vec![] }; println!("Client bootstrap with peer {bootstrap_peers:?}"); info!("Client bootstrap with peer {bootstrap_peers:?}"); - Client::new(secret_key, bootstrap_peers, None, None) + Client::connect(&bootstrap_peers) .await .expect("Client shall be successfully created.") } - pub async fn get_funded_wallet( - client: &Client, - root_dir: &Path, - initial_wallet: bool, - ) -> Result { - let wallet_balance = if initial_wallet { - NanoTokens::from(INITIAL_WALLET_BALANCE) - } else { - NanoTokens::from(ADD_FUNDS_TO_WALLET) - }; - let _guard = FAUCET_WALLET_MUTEX.lock().await; - let from_faucet_wallet = NonDroplet::load_faucet_wallet().await?; - let mut local_wallet = get_wallet(root_dir); - - println!("Getting {wallet_balance} tokens from the faucet..."); - info!("Getting {wallet_balance} tokens from the faucet..."); - let tokens = send( - from_faucet_wallet, - wallet_balance, - local_wallet.address(), - client, - true, - ) - .await?; - - println!("Verifying the transfer from faucet..."); - info!("Verifying the transfer from faucet..."); - client.verify_cashnote(&tokens).await?; - local_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(local_wallet.balance(), wallet_balance); - println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - - Ok(local_wallet) - } + fn get_funded_wallet() -> evmlib::wallet::Wallet { + let network = evm_network_from_env(); + // Default deployer wallet of the testnet. + const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - async fn load_faucet_wallet() -> Result { - info!("Loading faucet..."); - let now = Instant::now(); - for attempt in 1..LOAD_FAUCET_WALLET_RETRIES + 1 { - let faucet_wallet = create_faucet_account_and_wallet(); + let private_key = + env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - info!("Loaded faucet wallet after {:?}", now.elapsed()); - return Ok(faucet_wallet); - } - tokio::time::sleep(Duration::from_secs(1)).await; - warn!("The faucet wallet is empty. Attempts: {attempt}/{LOAD_FAUCET_WALLET_RETRIES}") - } - bail!("The faucet wallet is empty even after {LOAD_FAUCET_WALLET_RETRIES} retries. Bailing after {:?}. Check the faucet_server logs.", now.elapsed()); + evmlib::wallet::Wallet::new_from_private_key(network, &private_key) + .expect("Invalid private key") } // Restart a local node by sending in the SafenodeRpcCmd::Restart to the node's RPC endpoint. @@ -269,127 +201,127 @@ impl NonDroplet { } } -pub struct Droplet; -impl Droplet { - /// Create a new client and bootstrap from the provided safe_peers - pub async fn get_client(inventory: &DeploymentInventory) -> Client { - let secret_key = bls::SecretKey::random(); - - let mut bootstrap_peers = Vec::new(); - for peer in inventory - .peers - .iter() - .chain(vec![&inventory.genesis_multiaddr]) - { - match parse_peer_addr(peer) { - Ok(peer) => bootstrap_peers.push(peer), - Err(err) => error!("Can't parse SAFE_PEERS {peer:?} with error {err:?}"), - } - } - if bootstrap_peers.is_empty() { - panic!("Could parse/find any bootstrap peers"); - } - - println!("Client bootstrap with peer {bootstrap_peers:?}"); - info!("Client bootstrap with peer {bootstrap_peers:?}"); - Client::new(secret_key, Some(bootstrap_peers), None, None) - .await - .expect("Client shall be successfully created.") - } - - // Create a wallet at root_dir and fetch the amount from the faucet url - async fn get_funded_wallet( - client: &Client, - root_dir: &Path, - faucet_socket: String, - initial_wallet: bool, - ) -> Result { - let _guard = FAUCET_WALLET_MUTEX.lock().await; - - let requests_to_faucet = if initial_wallet { - let requests_to_faucet = 3; - assert_eq!( - requests_to_faucet * 100 * 1_000_000_000, - INITIAL_WALLET_BALANCE - ); - requests_to_faucet - } else { - let requests_to_faucet = 1; - assert_eq!( - requests_to_faucet * 100 * 1_000_000_000, - ADD_FUNDS_TO_WALLET - ); - requests_to_faucet - }; - - let mut local_wallet = get_wallet(root_dir); - let address_hex = hex::encode(local_wallet.address().to_bytes()); - - println!( - "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - info!( - "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - for _ in 0..requests_to_faucet { - let faucet_url = format!("http://{faucet_socket}/{address_hex}"); - - // Get transfer from faucet - let transfer = reqwest::get(&faucet_url).await?.text().await?; - let transfer = match Transfer::from_hex(&transfer) { - Ok(transfer) => transfer, - Err(err) => { - println!("Failed to parse transfer: {err:?}"); - println!("Transfer: \"{transfer}\""); - error!("Failed to parse transfer: {err:?}"); - error!("Transfer: \"{transfer}\""); - return Err(err.into()); - } - }; - let cashnotes = match client.receive(&transfer, &local_wallet).await { - Ok(cashnotes) => cashnotes, - Err(err) => { - println!("Failed to verify and redeem transfer: {err:?}"); - error!("Failed to verify and redeem transfer: {err:?}"); - return Err(err.into()); - } - }; - info!("Successfully verified transfer."); - local_wallet.deposit_and_store_to_disk(&cashnotes)?; - } - println!( - "Successfully got {} after {requests_to_faucet} requests to the faucet", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - info!( - "Successfully got {} after {requests_to_faucet} requests to the faucet", - NanoTokens::from(INITIAL_WALLET_BALANCE) - ); - - Ok(local_wallet) - } - - // Restart a remote safenode service by sending a RPC to the safenode manager daemon. - pub async fn restart_node( - peer_id: &PeerId, - daemon_endpoint: SocketAddr, - retain_peer_id: bool, - ) -> Result<()> { - let mut rpc_client = get_safenode_manager_rpc_client(daemon_endpoint).await?; - - let _response = rpc_client - .restart_node_service(Request::new(NodeServiceRestartRequest { - peer_id: peer_id.to_bytes(), - delay_millis: 0, - retain_peer_id, - })) - .await?; - - println!("Node restart requested to safenodemand {daemon_endpoint}"); - info!("Node restart requested to safenodemand {daemon_endpoint}"); - - Ok(()) - } +pub struct WanNetwork; +impl WanNetwork { + // /// Create a new client and bootstrap from the provided safe_peers + // pub async fn get_client(inventory: &DeploymentInventory) -> Client { + // let secret_key = bls::SecretKey::random(); + + // let mut bootstrap_peers = Vec::new(); + // for peer in inventory + // .peers + // .iter() + // .chain(vec![&inventory.genesis_multiaddr]) + // { + // match parse_peer_addr(peer) { + // Ok(peer) => bootstrap_peers.push(peer), + // Err(err) => error!("Can't parse SAFE_PEERS {peer:?} with error {err:?}"), + // } + // } + // if bootstrap_peers.is_empty() { + // panic!("Could parse/find any bootstrap peers"); + // } + + // println!("Client bootstrap with peer {bootstrap_peers:?}"); + // info!("Client bootstrap with peer {bootstrap_peers:?}"); + // Client::new(secret_key, Some(bootstrap_peers), None, None) + // .await + // .expect("Client shall be successfully created.") + // } + + // // Create a wallet at root_dir and fetch the amount from the faucet url + // async fn get_funded_wallet( + // client: &Client, + // root_dir: &Path, + // faucet_socket: String, + // initial_wallet: bool, + // ) -> Result { + // let _guard = FAUCET_WALLET_MUTEX.lock().await; + + // let requests_to_faucet = if initial_wallet { + // let requests_to_faucet = 3; + // assert_eq!( + // requests_to_faucet * 100 * 1_000_000_000, + // INITIAL_WALLET_BALANCE + // ); + // requests_to_faucet + // } else { + // let requests_to_faucet = 1; + // assert_eq!( + // requests_to_faucet * 100 * 1_000_000_000, + // ADD_FUNDS_TO_WALLET + // ); + // requests_to_faucet + // }; + + // let mut local_wallet = get_wallet(root_dir); + // let address_hex = hex::encode(local_wallet.address().to_bytes()); + + // println!( + // "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // info!( + // "Getting {} tokens from the faucet... num_requests:{requests_to_faucet}", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // for _ in 0..requests_to_faucet { + // let faucet_url = format!("http://{faucet_socket}/{address_hex}"); + + // // Get transfer from faucet + // let transfer = reqwest::get(&faucet_url).await?.text().await?; + // let transfer = match Transfer::from_hex(&transfer) { + // Ok(transfer) => transfer, + // Err(err) => { + // println!("Failed to parse transfer: {err:?}"); + // println!("Transfer: \"{transfer}\""); + // error!("Failed to parse transfer: {err:?}"); + // error!("Transfer: \"{transfer}\""); + // return Err(err.into()); + // } + // }; + // let cashnotes = match client.receive(&transfer, &local_wallet).await { + // Ok(cashnotes) => cashnotes, + // Err(err) => { + // println!("Failed to verify and redeem transfer: {err:?}"); + // error!("Failed to verify and redeem transfer: {err:?}"); + // return Err(err.into()); + // } + // }; + // info!("Successfully verified transfer."); + // local_wallet.deposit_and_store_to_disk(&cashnotes)?; + // } + // println!( + // "Successfully got {} after {requests_to_faucet} requests to the faucet", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + // info!( + // "Successfully got {} after {requests_to_faucet} requests to the faucet", + // NanoTokens::from(INITIAL_WALLET_BALANCE) + // ); + + // Ok(local_wallet) + // } + + // // Restart a remote safenode service by sending a RPC to the safenode manager daemon. + // pub async fn restart_node( + // peer_id: &PeerId, + // daemon_endpoint: SocketAddr, + // retain_peer_id: bool, + // ) -> Result<()> { + // let mut rpc_client = get_safenode_manager_rpc_client(daemon_endpoint).await?; + + // let _response = rpc_client + // .restart_node_service(Request::new(NodeServiceRestartRequest { + // peer_id: peer_id.to_bytes(), + // delay_millis: 0, + // retain_peer_id, + // })) + // .await?; + + // println!("Node restart requested to safenodemand {daemon_endpoint}"); + // info!("Node restart requested to safenodemand {daemon_endpoint}"); + + // Ok(()) + // } } diff --git a/sn_node/tests/common/mod.rs b/sn_node/tests/common/mod.rs index 452d506379..fc3a94e97e 100644 --- a/sn_node/tests/common/mod.rs +++ b/sn_node/tests/common/mod.rs @@ -1,45 +1,27 @@ -// // Copyright 2024 MaidSafe.net limited. -// // -// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// // KIND, either express or implied. Please review the Licences for the specific language governing -// // permissions and limitations relating to use of the SAFE Network Software. -// #![allow(dead_code)] +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. +#![allow(dead_code)] -// pub mod client; +pub mod client; -// use self::client::{Droplet, NonDroplet}; -// use bytes::Bytes; -// use eyre::{bail, eyre, OptionExt, Result}; -// use itertools::Either; -// use libp2p::PeerId; -// use rand::{ -// distributions::{Distribution, Standard}, -// Rng, -// }; -// use self_encryption::MIN_ENCRYPTABLE_BYTES; -// // TODO: Use autonimi API here -// // use sn_client::{Client, FilesApi}; -// use sn_protocol::{ -// safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, -// storage::ChunkAddress, -// }; -// use sn_service_management::{ -// get_local_node_registry_path, -// safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, -// }; -// use std::{ -// fs::File, -// io::Write, -// net::SocketAddr, -// path::{Path, PathBuf}, -// time::Duration, -// }; -// use test_utils::testnet::DeploymentInventory; -// use tonic::Request; -// use tracing::{debug, error, warn}; -// use xor_name::XorName; +use self::client::LocalNetwork; +use eyre::{bail, eyre, OptionExt, Result}; +use itertools::Either; +use libp2p::PeerId; +use sn_protocol::safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}; +use sn_service_management::{ + get_local_node_registry_path, + safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, +}; +use std::{net::SocketAddr, time::Duration}; +use test_utils::testnet::DeploymentInventory; +use tonic::Request; +use tracing::{debug, error, warn}; // type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; @@ -72,205 +54,206 @@ // )) // } -// // Connect to a RPC socket addr with retry -// pub async fn get_safenode_rpc_client( -// socket_addr: SocketAddr, -// ) -> Result> { -// // get the new PeerId for the current NodeIndex -// let endpoint = format!("https://{socket_addr}"); -// let mut attempts = 0; -// loop { -// if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { -// break Ok(rpc_client); -// } -// attempts += 1; -// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); -// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); -// tokio::time::sleep(Duration::from_secs(1)).await; -// if attempts >= 10 { -// bail!("Failed to connect to {endpoint:?} even after 10 retries"); -// } -// } -// } +// Connect to a RPC socket addr with retry +pub async fn get_safenode_rpc_client( + socket_addr: SocketAddr, +) -> Result> { + // get the new PeerId for the current NodeIndex + let endpoint = format!("https://{socket_addr}"); + let mut attempts = 0; + loop { + if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { + break Ok(rpc_client); + } + attempts += 1; + println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); + error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); + tokio::time::sleep(Duration::from_secs(1)).await; + if attempts >= 10 { + bail!("Failed to connect to {endpoint:?} even after 10 retries"); + } + } +} -// // Connect to a RPC socket addr with retry -// pub async fn get_safenode_manager_rpc_client( -// socket_addr: SocketAddr, -// ) -> Result> { -// // get the new PeerId for the current NodeIndex -// let endpoint = format!("https://{socket_addr}"); -// let mut attempts = 0; -// loop { -// if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { -// break Ok(rpc_client); -// } -// attempts += 1; -// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); -// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); -// tokio::time::sleep(Duration::from_secs(1)).await; -// if attempts >= 10 { -// bail!("Failed to connect to {endpoint:?} even after 10 retries"); -// } -// } -// } +// Connect to a RPC socket addr with retry +pub async fn get_safenode_manager_rpc_client( + socket_addr: SocketAddr, +) -> Result> { + // get the new PeerId for the current NodeIndex + let endpoint = format!("https://{socket_addr}"); + let mut attempts = 0; + loop { + if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { + break Ok(rpc_client); + } + attempts += 1; + println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); + error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); + tokio::time::sleep(Duration::from_secs(1)).await; + if attempts >= 10 { + bail!("Failed to connect to {endpoint:?} even after 10 retries"); + } + } +} -// // Returns all the PeerId for all the running nodes -// pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { -// let mut all_peers = Vec::new(); +// Returns all the PeerId for all the running nodes +pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { + let mut all_peers = Vec::new(); -// for addr in node_rpc_addresses { -// let mut rpc_client = get_safenode_rpc_client(*addr).await?; + for addr in node_rpc_addresses { + let mut rpc_client = get_safenode_rpc_client(*addr).await?; -// // get the peer_id -// let response = rpc_client -// .node_info(Request::new(NodeInfoRequest {})) -// .await?; -// let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; -// all_peers.push(peer_id); -// } -// debug!( -// "Obtained the PeerId list for the running network with a node count of {}", -// node_rpc_addresses.len() -// ); -// Ok(all_peers) -// } + // get the peer_id + let response = rpc_client + .node_info(Request::new(NodeInfoRequest {})) + .await?; + let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; + all_peers.push(peer_id); + } + debug!( + "Obtained the PeerId list for the running network with a node count of {}", + node_rpc_addresses.len() + ); + Ok(all_peers) +} -// /// A struct to facilitate restart of droplet/local nodes -// pub struct NodeRestart { -// // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. -// inventory_file: Either, -// next_to_restart_idx: usize, -// skip_genesis_for_droplet: bool, -// retain_peer_id: bool, -// } +/// A struct to facilitate restart of droplet/local nodes +pub struct NodeRestart { + // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. + inventory_file: Either, + next_to_restart_idx: usize, + skip_genesis_for_droplet: bool, + retain_peer_id: bool, +} -// impl NodeRestart { -// /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. -// /// The restarted node relies on the genesis multiaddr to bootstrap after restart. -// /// -// /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. -// pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { -// let inventory_file = match DeploymentInventory::load() { -// Ok(inv) => Either::Left(inv), -// Err(_) => { -// let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; -// Either::Right(reg) -// } -// }; +impl NodeRestart { + /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. + /// The restarted node relies on the genesis multiaddr to bootstrap after restart. + /// + /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. + pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { + let inventory_file = match DeploymentInventory::load() { + Ok(inv) => Either::Left(inv), + Err(_) => { + let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; + Either::Right(reg) + } + }; -// Ok(Self { -// inventory_file, -// next_to_restart_idx: 0, -// skip_genesis_for_droplet, -// retain_peer_id, -// }) -// } + Ok(Self { + inventory_file, + next_to_restart_idx: 0, + skip_genesis_for_droplet, + retain_peer_id, + }) + } -// /// Restart the next node in the list. -// /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all -// /// the nodes. -// /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. -// /// Else we'll be retrying the same node on the next call. -// /// -// /// Returns the `safenode's RPC addr` if we have restarted a node successfully. -// /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. -// pub async fn restart_next( -// &mut self, -// loop_over: bool, -// progress_on_error: bool, -// ) -> Result> { -// let safenode_rpc_endpoint = match self.inventory_file.clone() { -// Either::Left(inv) => { -// // check if we've reached the end -// if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { -// self.next_to_restart_idx = 0; -// } + /// Restart the next node in the list. + /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all + /// the nodes. + /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. + /// Else we'll be retrying the same node on the next call. + /// + /// Returns the `safenode's RPC addr` if we have restarted a node successfully. + /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. + pub async fn restart_next( + &mut self, + loop_over: bool, + progress_on_error: bool, + ) -> Result> { + let safenode_rpc_endpoint = match self.inventory_file.clone() { + Either::Left(inv) => { + // check if we've reached the end + if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { + self.next_to_restart_idx = 0; + } -// if let Some((peer_id, daemon_endpoint)) = inv -// .safenodemand_endpoints -// .iter() -// .nth(self.next_to_restart_idx) -// { -// self.restart(*peer_id, *daemon_endpoint, progress_on_error) -// .await?; + if let Some((peer_id, daemon_endpoint)) = inv + .safenodemand_endpoints + .iter() + .nth(self.next_to_restart_idx) + { + self.restart(*peer_id, *daemon_endpoint, progress_on_error) + .await?; -// let safenode_rpc_endpoint = inv -// .rpc_endpoints -// .get(peer_id) -// .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; -// Some(*safenode_rpc_endpoint) -// } else { -// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); -// None -// } -// } -// Either::Right(reg) => { -// // check if we've reached the end -// if loop_over && self.next_to_restart_idx > reg.nodes.len() { -// self.next_to_restart_idx = 0; -// } + let safenode_rpc_endpoint = inv + .rpc_endpoints + .get(peer_id) + .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; + Some(*safenode_rpc_endpoint) + } else { + warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); + None + } + } + Either::Right(reg) => { + // check if we've reached the end + if loop_over && self.next_to_restart_idx > reg.nodes.len() { + self.next_to_restart_idx = 0; + } -// if let Some((peer_id, safenode_rpc_endpoint)) = reg -// .nodes -// .get(self.next_to_restart_idx) -// .map(|node| (node.peer_id, node.rpc_socket_addr)) -// { -// let peer_id = -// peer_id.ok_or_eyre("PeerId should be present for a local node")?; -// self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) -// .await?; -// Some(safenode_rpc_endpoint) -// } else { -// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); -// None -// } -// } -// }; + if let Some((peer_id, safenode_rpc_endpoint)) = reg + .nodes + .get(self.next_to_restart_idx) + .map(|node| (node.peer_id, node.rpc_socket_addr)) + { + let peer_id = + peer_id.ok_or_eyre("PeerId should be present for a local node")?; + self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) + .await?; + Some(safenode_rpc_endpoint) + } else { + warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); + None + } + } + }; -// Ok(safenode_rpc_endpoint) -// } + Ok(safenode_rpc_endpoint) + } -// async fn restart( -// &mut self, -// peer_id: PeerId, -// endpoint: SocketAddr, -// progress_on_error: bool, -// ) -> Result<()> { -// match &self.inventory_file { -// Either::Left(_inv) => { -// match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) -// .await -// .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { -// Ok(_) => { -// self.next_to_restart_idx += 1; -// }, -// Err(err) => { -// if progress_on_error { -// self.next_to_restart_idx += 1; -// } -// return Err(err); -// }, -// } -// }, -// Either::Right(_reg) => { -// match NonDroplet::restart_node(endpoint, self.retain_peer_id).await -// .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { -// Ok(_) => { -// self.next_to_restart_idx += 1; -// }, -// Err(err) => { -// if progress_on_error { -// self.next_to_restart_idx += 1; -// } -// return Err(err); -// } -// } -// } -// } -// Ok(()) -// } + async fn restart( + &mut self, + peer_id: PeerId, + endpoint: SocketAddr, + progress_on_error: bool, + ) -> Result<()> { + match &self.inventory_file { + Either::Left(_inv) => { + todo!("Not implemented yet for WanNetwork"); + // match WanNetwork::restart_node(&peer_id, endpoint, self.retain_peer_id) + // .await + // .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { + // Ok(_) => { + // self.next_to_restart_idx += 1; + // }, + // Err(err) => { + // if progress_on_error { + // self.next_to_restart_idx += 1; + // } + // return Err(err); + // }, + // } + }, + Either::Right(_reg) => { + match LocalNetwork::restart_node(endpoint, self.retain_peer_id).await + .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { + Ok(_) => { + self.next_to_restart_idx += 1; + }, + Err(err) => { + if progress_on_error { + self.next_to_restart_idx += 1; + } + return Err(err); + } + } + } + } + Ok(()) + } -// pub fn reset_index(&mut self) { -// self.next_to_restart_idx = 0; -// } -// } + pub fn reset_index(&mut self) { + self.next_to_restart_idx = 0; + } +} diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index d81cc8a8d6..3a1c091dc1 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -1,428 +1,412 @@ -// // Copyright 2024 MaidSafe.net limited. -// // -// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// // KIND, either express or implied. Please review the Licences for the specific language governing -// // permissions and limitations relating to use of the SAFE Network Software. - -// #![allow(clippy::mutable_key_type)] -// mod common; - -// use crate::common::{ -// client::{get_all_rpc_addresses, get_client_and_funded_wallet}, -// get_all_peer_ids, get_safenode_rpc_client, NodeRestart, -// }; -// use assert_fs::TempDir; -// use common::client::get_wallet; -// use eyre::{eyre, Result}; -// use libp2p::{ -// kad::{KBucketKey, RecordKey}, -// PeerId, -// }; -// use rand::{rngs::OsRng, Rng}; -// // TODO: update autonomi API here -// // use sn_client::{Client, FilesApi, Uploader, WalletClient}; -// use sn_logging::LogBuilder; -// use sn_networking::sort_peers_by_key; -// use sn_protocol::{ -// safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, -// NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -// }; -// use sn_registers::{Permissions, RegisterAddress}; -// use std::{ -// collections::{BTreeSet, HashMap, HashSet}, -// fs::File, -// io::Write, -// net::SocketAddr, -// path::PathBuf, -// time::{Duration, Instant}, -// }; -// use tonic::Request; -// use tracing::{debug, error, info}; -// use xor_name::XorName; - -// const CHUNK_SIZE: usize = 1024; - -// // VERIFICATION_DELAY is set based on the dead peer detection interval -// // Once a node has been restarted, it takes VERIFICATION_DELAY time -// // for the old peer to be removed from the routing table. -// // Replication is then kicked off to distribute the data to the new closest -// // nodes, hence verification has to be performed after this. -// const VERIFICATION_DELAY: Duration = Duration::from_secs(60); - -// /// Number of times to retry verification if it fails -// const VERIFICATION_ATTEMPTS: usize = 5; - -// /// Length of time to wait before re-verifying the data location -// const REVERIFICATION_DELAY: Duration = -// Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); - -// // Default number of churns that should be performed. After each churn, we -// // wait for VERIFICATION_DELAY time before verifying the data location. -// // It can be overridden by setting the 'CHURN_COUNT' env var. -// const CHURN_COUNT: u8 = 20; - -// /// Default number of chunks that should be PUT to the network. -// /// It can be overridden by setting the 'CHUNK_COUNT' env var. -// const CHUNK_COUNT: usize = 5; -// /// Default number of registers that should be PUT to the network. -// /// It can be overridden by setting the 'REGISTER_COUNT' env var. -// const REGISTER_COUNT: usize = 5; - -// type NodeIndex = usize; -// type RecordHolders = HashMap>; - -// #[tokio::test(flavor = "multi_thread")] -// async fn verify_data_location() -> Result<()> { -// let _log_appender_guard = -// LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); - -// let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { -// str.parse::()? -// } else { -// CHURN_COUNT -// }; -// let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { -// str.parse::()? -// } else { -// CHUNK_COUNT -// }; -// let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { -// str.parse::()? -// } else { -// REGISTER_COUNT -// }; -// println!( -// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", -// VERIFICATION_DELAY*churn_count as u32 -// ); -// info!( -// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", -// VERIFICATION_DELAY*churn_count as u32 -// ); -// let node_rpc_address = get_all_rpc_addresses(true)?; -// let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; - -// // Store chunks -// println!("Creating a client and paying wallet..."); -// debug!("Creating a client and paying wallet..."); - -// let paying_wallet_dir = TempDir::new()?; - -// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - -// store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; -// store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; - -// // Verify data location initially -// verify_location(&all_peers, &node_rpc_address).await?; - -// // Churn nodes and verify the location of the data after VERIFICATION_DELAY -// let mut current_churn_count = 0; - -// let mut node_restart = NodeRestart::new(true, false)?; -// let mut node_index = 0; -// 'main: loop { -// if current_churn_count >= churn_count { -// break 'main Ok(()); -// } -// current_churn_count += 1; - -// let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { -// None => { -// // we have reached the end. -// break 'main Ok(()); -// } -// Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, -// }; - -// // wait for the dead peer to be removed from the RT and the replication flow to finish -// println!( -// "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" -// ); -// info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); -// tokio::time::sleep(VERIFICATION_DELAY).await; - -// // get the new PeerId for the current NodeIndex -// let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; - -// let response = rpc_client -// .node_info(Request::new(NodeInfoRequest {})) -// .await?; -// let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; -// // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses -// // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. -// // todo: make this more cleaner. -// if all_peers[node_index] == new_peer_id { -// println!("new and old peer id are the same {new_peer_id:?}"); -// return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); -// } -// all_peers[node_index] = new_peer_id; -// node_index += 1; - -// print_node_close_groups(&all_peers); - -// verify_location(&all_peers, &node_rpc_address).await?; -// } -// } - -// fn print_node_close_groups(all_peers: &[PeerId]) { -// let all_peers = all_peers.to_vec(); -// info!("\nNode close groups:"); - -// for (node_index, peer) in all_peers.iter().enumerate() { -// let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); -// let closest_peers = -// sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); -// let closest_peers_idx = closest_peers -// .iter() -// .map(|&&peer| { -// all_peers -// .iter() -// .position(|&p| p == peer) -// .expect("peer to be in iterator") -// }) -// .collect::>(); -// info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); -// } -// } - -// async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { -// let mut record_holders = RecordHolders::default(); - -// for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { -// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; - -// let records_response = rpc_client -// .record_addresses(Request::new(RecordAddressesRequest {})) -// .await?; - -// for bytes in records_response.get_ref().addresses.iter() { -// let key = RecordKey::from(bytes.clone()); -// let holders = record_holders.entry(key).or_insert(HashSet::new()); -// holders.insert(node_index); -// } -// } -// debug!("Obtained the current set of Record Key holders"); -// Ok(record_holders) -// } - -// // Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey -// // It has a retry loop built in. -// async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { -// let mut failed = HashMap::new(); - -// println!("*********************************************"); -// println!("Verifying data across all peers {all_peers:?}"); -// info!("*********************************************"); -// info!("Verifying data across all peers {all_peers:?}"); - -// let mut verification_attempts = 0; -// while verification_attempts < VERIFICATION_ATTEMPTS { -// failed.clear(); -// let record_holders = get_records_and_holders(node_rpc_addresses).await?; -// for (key, actual_holders_idx) in record_holders.iter() { -// println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); -// info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); -// let record_key = KBucketKey::from(key.to_vec()); -// let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? -// .into_iter() -// .cloned() -// .collect::>(); - -// let actual_holders = actual_holders_idx -// .iter() -// .map(|i| all_peers[*i]) -// .collect::>(); - -// info!( -// "Expected to be held by {:?} nodes: {expected_holders:?}", -// expected_holders.len() -// ); -// info!( -// "Actually held by {:?} nodes : {actual_holders:?}", -// actual_holders.len() -// ); - -// if actual_holders != expected_holders { -// // print any expect holders that are not in actual holders -// let mut missing_peers = Vec::new(); -// expected_holders -// .iter() -// .filter(|expected| !actual_holders.contains(expected)) -// .for_each(|expected| missing_peers.push(*expected)); - -// if !missing_peers.is_empty() { -// error!( -// "Record {:?} is not stored by {missing_peers:?}", -// PrettyPrintRecordKey::from(key), -// ); -// println!( -// "Record {:?} is not stored by {missing_peers:?}", -// PrettyPrintRecordKey::from(key), -// ); -// } -// } - -// let mut failed_peers = Vec::new(); -// expected_holders -// .iter() -// .filter(|expected| !actual_holders.contains(expected)) -// .for_each(|expected| failed_peers.push(*expected)); - -// if !failed_peers.is_empty() { -// failed.insert(key.clone(), failed_peers); -// } -// } - -// if !failed.is_empty() { -// error!("Verification failed for {:?} entries", failed.len()); -// println!("Verification failed for {:?} entries", failed.len()); - -// failed.iter().for_each(|(key, failed_peers)| { -// let key_addr = NetworkAddress::from_record_key(key); -// let pretty_key = PrettyPrintRecordKey::from(key); -// failed_peers.iter().for_each(|peer| { -// let peer_addr = NetworkAddress::from_peer(*peer); -// let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); -// println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); -// error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); -// }); -// }); -// info!("State of each node:"); -// record_holders.iter().for_each(|(key, node_index)| { -// info!( -// "Record {:?} is currently held by node indices {node_index:?}", -// PrettyPrintRecordKey::from(key) -// ); -// }); -// info!("Node index map:"); -// all_peers -// .iter() -// .enumerate() -// .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); -// verification_attempts += 1; -// println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); -// info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); -// if verification_attempts < VERIFICATION_ATTEMPTS { -// tokio::time::sleep(REVERIFICATION_DELAY).await; -// } -// } else { -// // if successful, break out of the loop -// break; -// } -// } - -// if !failed.is_empty() { -// println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); -// error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); -// Err(eyre!("Verification failed for: {failed:?}")) -// } else { -// println!("All the Records have been verified!"); -// info!("All the Records have been verified!"); -// Ok(()) -// } -// } - -// // Generate random Chunks and store them to the Network -// async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { -// let start = Instant::now(); -// let mut rng = OsRng; - -// let mut uploaded_chunks_count = 0; -// loop { -// if uploaded_chunks_count >= chunk_count { -// break; -// } - -// let chunks_dir = TempDir::new()?; - -// let random_bytes: Vec = ::std::iter::repeat(()) -// .map(|()| rng.gen::()) -// .take(CHUNK_SIZE) -// .collect(); - -// let file_path = chunks_dir.join("random_content"); -// let mut output_file = File::create(file_path.clone())?; -// output_file.write_all(&random_bytes)?; - -// let (head_chunk_addr, _data_map, _file_size, chunks) = -// FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; - -// debug!( -// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", -// chunks.len(), -// random_bytes.len() -// ); - -// let key = -// PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); - -// let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); -// uploader.set_show_holders(true); -// uploader.set_verify_store(false); -// uploader.insert_chunk_paths(chunks); -// let _upload_stats = uploader.start_upload().await?; - -// uploaded_chunks_count += 1; - -// println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); -// info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); -// } - -// println!( -// "{chunk_count:?} Chunks were stored in {:?}", -// start.elapsed() -// ); -// info!( -// "{chunk_count:?} Chunks were stored in {:?}", -// start.elapsed() -// ); - -// // to make sure the last chunk was stored -// tokio::time::sleep(Duration::from_secs(10)).await; - -// Ok(()) -// } - -// async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { -// let start = Instant::now(); -// let paying_wallet = get_wallet(&wallet_dir); -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - -// let mut uploaded_registers_count = 0; -// loop { -// if uploaded_registers_count >= register_count { -// break; -// } -// let meta = XorName(rand::random()); -// let owner = client.signer_pk(); - -// let addr = RegisterAddress::new(meta, owner); -// println!("Creating Register at {addr:?}"); -// debug!("Creating Register at {addr:?}"); - -// let (mut register, ..) = client -// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) -// .await?; - -// println!("Editing Register at {addr:?}"); -// debug!("Editing Register at {addr:?}"); -// register.write_online("entry".as_bytes(), true).await?; - -// uploaded_registers_count += 1; -// } -// println!( -// "{register_count:?} Registers were stored in {:?}", -// start.elapsed() -// ); -// info!( -// "{register_count:?} Registers were stored in {:?}", -// start.elapsed() -// ); - -// // to make sure the last register was stored -// tokio::time::sleep(Duration::from_secs(10)).await; -// Ok(()) -// } +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![allow(clippy::mutable_key_type)] +mod common; + +use autonomi::Client; +use bytes::Bytes; +use common::{ + client::{get_all_rpc_addresses, get_client_and_funded_wallet}, + get_all_peer_ids, get_safenode_rpc_client, NodeRestart, +}; +use eyre::{eyre, Result}; +use libp2p::{ + kad::{KBucketKey, RecordKey}, + PeerId, +}; +use rand::{rngs::OsRng, Rng}; +use sn_logging::LogBuilder; +use sn_networking::{sleep, sort_peers_by_key}; +use sn_protocol::{ + safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, +}; +use std::{ + collections::{BTreeSet, HashMap, HashSet}, + net::SocketAddr, + time::{Duration, Instant}, +}; +use tonic::Request; +use tracing::{debug, error, info}; +use xor_name::XorName; + +const CHUNK_SIZE: usize = 1024; + +// VERIFICATION_DELAY is set based on the dead peer detection interval +// Once a node has been restarted, it takes VERIFICATION_DELAY time +// for the old peer to be removed from the routing table. +// Replication is then kicked off to distribute the data to the new closest +// nodes, hence verification has to be performed after this. +const VERIFICATION_DELAY: Duration = Duration::from_secs(60); + +/// Number of times to retry verification if it fails +const VERIFICATION_ATTEMPTS: usize = 5; + +/// Length of time to wait before re-verifying the data location +const REVERIFICATION_DELAY: Duration = + Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); + +// Default number of churns that should be performed. After each churn, we +// wait for VERIFICATION_DELAY time before verifying the data location. +// It can be overridden by setting the 'CHURN_COUNT' env var. +const CHURN_COUNT: u8 = 20; + +/// Default number of chunks that should be PUT to the network. +/// It can be overridden by setting the 'CHUNK_COUNT' env var. +const CHUNK_COUNT: usize = 5; +/// Default number of registers that should be PUT to the network. +/// It can be overridden by setting the 'REGISTER_COUNT' env var. +const REGISTER_COUNT: usize = 5; + +type NodeIndex = usize; +type RecordHolders = HashMap>; + +#[tokio::test(flavor = "multi_thread")] +async fn verify_data_location() -> Result<()> { + let _log_appender_guard = + LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); + + let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { + str.parse::()? + } else { + CHURN_COUNT + }; + let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { + str.parse::()? + } else { + CHUNK_COUNT + }; + let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { + str.parse::()? + } else { + REGISTER_COUNT + }; + println!( + "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", + VERIFICATION_DELAY*churn_count as u32 + ); + info!( + "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", + VERIFICATION_DELAY*churn_count as u32 + ); + let node_rpc_address = get_all_rpc_addresses(true)?; + let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; + + let (client, wallet) = get_client_and_funded_wallet().await; + + store_chunks(&client, chunk_count, &wallet).await?; + store_registers(&client, register_count, &wallet).await?; + + // Verify data location initially + verify_location(&all_peers, &node_rpc_address).await?; + + // Churn nodes and verify the location of the data after VERIFICATION_DELAY + let mut current_churn_count = 0; + + let mut node_restart = NodeRestart::new(true, false)?; + let mut node_index = 0; + 'main: loop { + if current_churn_count >= churn_count { + break 'main Ok(()); + } + current_churn_count += 1; + + let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { + None => { + // we have reached the end. + break 'main Ok(()); + } + Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, + }; + + // wait for the dead peer to be removed from the RT and the replication flow to finish + println!( + "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" + ); + info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); + tokio::time::sleep(VERIFICATION_DELAY).await; + + // get the new PeerId for the current NodeIndex + let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; + + let response = rpc_client + .node_info(Request::new(NodeInfoRequest {})) + .await?; + let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; + // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses + // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. + // todo: make this more cleaner. + if all_peers[node_index] == new_peer_id { + println!("new and old peer id are the same {new_peer_id:?}"); + return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); + } + all_peers[node_index] = new_peer_id; + node_index += 1; + + print_node_close_groups(&all_peers); + + verify_location(&all_peers, &node_rpc_address).await?; + } +} + +fn print_node_close_groups(all_peers: &[PeerId]) { + let all_peers = all_peers.to_vec(); + info!("\nNode close groups:"); + + for (node_index, peer) in all_peers.iter().enumerate() { + let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); + let closest_peers = + sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); + let closest_peers_idx = closest_peers + .iter() + .map(|&&peer| { + all_peers + .iter() + .position(|&p| p == peer) + .expect("peer to be in iterator") + }) + .collect::>(); + info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); + } +} + +async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { + let mut record_holders = RecordHolders::default(); + + for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { + let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; + + let records_response = rpc_client + .record_addresses(Request::new(RecordAddressesRequest {})) + .await?; + + for bytes in records_response.get_ref().addresses.iter() { + let key = RecordKey::from(bytes.clone()); + let holders = record_holders.entry(key).or_insert(HashSet::new()); + holders.insert(node_index); + } + } + debug!("Obtained the current set of Record Key holders"); + Ok(record_holders) +} + +// Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey +// It has a retry loop built in. +async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { + let mut failed = HashMap::new(); + + println!("*********************************************"); + println!("Verifying data across all peers {all_peers:?}"); + info!("*********************************************"); + info!("Verifying data across all peers {all_peers:?}"); + + let mut verification_attempts = 0; + while verification_attempts < VERIFICATION_ATTEMPTS { + failed.clear(); + let record_holders = get_records_and_holders(node_rpc_addresses).await?; + for (key, actual_holders_idx) in record_holders.iter() { + println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); + info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); + let record_key = KBucketKey::from(key.to_vec()); + let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? + .into_iter() + .cloned() + .collect::>(); + + let actual_holders = actual_holders_idx + .iter() + .map(|i| all_peers[*i]) + .collect::>(); + + info!( + "Expected to be held by {:?} nodes: {expected_holders:?}", + expected_holders.len() + ); + info!( + "Actually held by {:?} nodes : {actual_holders:?}", + actual_holders.len() + ); + + if actual_holders != expected_holders { + // print any expect holders that are not in actual holders + let mut missing_peers = Vec::new(); + expected_holders + .iter() + .filter(|expected| !actual_holders.contains(expected)) + .for_each(|expected| missing_peers.push(*expected)); + + if !missing_peers.is_empty() { + error!( + "Record {:?} is not stored by {missing_peers:?}", + PrettyPrintRecordKey::from(key), + ); + println!( + "Record {:?} is not stored by {missing_peers:?}", + PrettyPrintRecordKey::from(key), + ); + } + } + + let mut failed_peers = Vec::new(); + expected_holders + .iter() + .filter(|expected| !actual_holders.contains(expected)) + .for_each(|expected| failed_peers.push(*expected)); + + if !failed_peers.is_empty() { + failed.insert(key.clone(), failed_peers); + } + } + + if !failed.is_empty() { + error!("Verification failed for {:?} entries", failed.len()); + println!("Verification failed for {:?} entries", failed.len()); + + failed.iter().for_each(|(key, failed_peers)| { + let key_addr = NetworkAddress::from_record_key(key); + let pretty_key = PrettyPrintRecordKey::from(key); + failed_peers.iter().for_each(|peer| { + let peer_addr = NetworkAddress::from_peer(*peer); + let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); + println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); + error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); + }); + }); + info!("State of each node:"); + record_holders.iter().for_each(|(key, node_index)| { + info!( + "Record {:?} is currently held by node indices {node_index:?}", + PrettyPrintRecordKey::from(key) + ); + }); + info!("Node index map:"); + all_peers + .iter() + .enumerate() + .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); + verification_attempts += 1; + println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); + info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); + if verification_attempts < VERIFICATION_ATTEMPTS { + tokio::time::sleep(REVERIFICATION_DELAY).await; + } + } else { + // if successful, break out of the loop + break; + } + } + + if !failed.is_empty() { + println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); + error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); + Err(eyre!("Verification failed for: {failed:?}")) + } else { + println!("All the Records have been verified!"); + info!("All the Records have been verified!"); + Ok(()) + } +} + +// Generate random Chunks and store them to the Network +async fn store_chunks( + client: &Client, + chunk_count: usize, + wallet: &evmlib::wallet::Wallet, +) -> Result<()> { + let start = Instant::now(); + let mut rng = OsRng; + + let mut uploaded_chunks_count = 0; + loop { + if uploaded_chunks_count >= chunk_count { + break; + } + + let random_bytes: Vec = ::std::iter::repeat(()) + .map(|()| rng.gen::()) + .take(CHUNK_SIZE) + .collect(); + + let random_bytes = Bytes::from(random_bytes); + + client.put(random_bytes, wallet).await?; + + uploaded_chunks_count += 1; + + println!("Stored Chunk with len {CHUNK_SIZE}"); + info!("Stored Chunk with len {CHUNK_SIZE}"); + } + + println!( + "{chunk_count:?} Chunks were stored in {:?}", + start.elapsed() + ); + info!( + "{chunk_count:?} Chunks were stored in {:?}", + start.elapsed() + ); + + // to make sure the last chunk was stored + tokio::time::sleep(Duration::from_secs(10)).await; + + Ok(()) +} + +async fn store_registers( + client: &Client, + register_count: usize, + wallet: &evmlib::wallet::Wallet, +) -> Result<()> { + let start = Instant::now(); + + let mut uploaded_registers_count = 0; + loop { + if uploaded_registers_count >= register_count { + break; + } + // Owner key of the register. + let key = bls::SecretKey::random(); + + // Create a register with the value [1, 2, 3, 4] + let register = client + .create_register( + vec![1, 2, 3, 4].into(), + XorName::random(&mut rand::thread_rng()), + key.clone(), + wallet, + ) + .await?; + + println!("Created Register at {:?}", register.address()); + debug!("Created Register at {:?}", register.address()); + sleep(Duration::from_secs(5)).await; + + // Update the register with the value [5, 6, 7, 8] + client + .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) + .await?; + + println!("Updated Register at {:?}", register.address()); + debug!("Updated Register at {:?}", register.address()); + + uploaded_registers_count += 1; + } + println!( + "{register_count:?} Registers were stored in {:?}", + start.elapsed() + ); + info!( + "{register_count:?} Registers were stored in {:?}", + start.elapsed() + ); + + // to make sure the last register was stored + sleep(Duration::from_secs(10)).await; + Ok(()) +} diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 292bc01d4a..2944073de5 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -9,9 +9,13 @@ readme = "README.md" repository = "https://github.com/maidsafe/safe_network" version = "0.4.6" + [dependencies] +autonomi = { path ="../autonomi", version = "0.1" } color-eyre = "~0.6.2" +const-hex = "1.12.0" dirs-next = "~2.0.0" +evmlib = { path = "../evmlib", version = "0.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } serde = { version = "1.0.133", features = [ "derive"]} serde_json = "1.0" diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs new file mode 100644 index 0000000000..4ec41ca5d2 --- /dev/null +++ b/test_utils/src/evm.rs @@ -0,0 +1,56 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use const_hex::ToHexExt; +use evmlib::CustomNetwork; +use std::env; + +fn get_var_or_panic(var: &str) -> String { + env::var(var).unwrap_or_else(|_| panic!("{var} environment variable needs to be set")) +} + +pub fn evm_network_from_env() -> evmlib::Network { + let evm_network = env::var("EVM_NETWORK").ok(); + let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); + + let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { + ( + evmlib::Network::ArbitrumOne.rpc_url().to_string(), + evmlib::Network::ArbitrumOne + .payment_token_address() + .encode_hex_with_prefix(), + evmlib::Network::ArbitrumOne + .chunk_payments_address() + .encode_hex_with_prefix(), + ) + } else { + ( + get_var_or_panic("RPC_URL"), + get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), + get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), + ) + }; + + evmlib::Network::Custom(CustomNetwork::new( + &rpc_url, + &payment_token_address, + &chunk_payments_address, + )) +} + +pub fn get_funded_wallet() -> evmlib::wallet::Wallet { + let network = evm_network_from_env(); + // Default deployer wallet of the testnet. + const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + + let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + + evmlib::wallet::Wallet::new_from_private_key(network, &private_key) + .expect("Invalid private key") +} diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index 3466e43bc4..e2ddf72f2f 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -6,4 +6,5 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +pub mod evm; pub mod testnet; From 57d0b68da60c17eb22acedccca6615d96a9a50ee Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 14:07:07 +0530 Subject: [PATCH 101/255] feat: enable verify routing table test --- autonomi/tests/file.rs | 4 - autonomi/tests/register.rs | 6 - sn_node/tests/verify_routing_table.rs | 204 +++++++++++++------------- 3 files changed, 102 insertions(+), 112 deletions(-) diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 85cef358ed..6b2d58ad9d 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -2,13 +2,9 @@ mod common; -#[cfg(feature = "files")] use autonomi::Client; -#[cfg(feature = "files")] use std::time::Duration; -#[cfg(feature = "files")] use test_utils::evm::get_funded_wallet; -#[cfg(feature = "files")] use tokio::time::sleep; #[tokio::test] diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 8c47776208..3cee58d0d2 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -2,17 +2,11 @@ mod common; -#[cfg(feature = "registers")] use autonomi::Client; -#[cfg(feature = "registers")] use bytes::Bytes; -#[cfg(feature = "registers")] use std::time::Duration; -#[cfg(feature = "registers")] use test_utils::evm::get_funded_wallet; -#[cfg(feature = "registers")] use tokio::time::sleep; -#[cfg(feature = "registers")] use xor_name::XorName; #[tokio::test] diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index 8f01c1a24a..da19270b69 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -1,114 +1,114 @@ -// // Copyright 2024 MaidSafe.net limited. -// // -// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// // KIND, either express or implied. Please review the Licences for the specific language governing -// // permissions and limitations relating to use of the SAFE Network Software. +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. -// #![allow(clippy::mutable_key_type)] -// mod common; +#![allow(clippy::mutable_key_type)] +mod common; -// use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; -// use color_eyre::Result; -// use libp2p::{ -// kad::{KBucketKey, K_VALUE}, -// PeerId, -// }; -// use sn_logging::LogBuilder; -// use sn_protocol::safenode_proto::KBucketsRequest; -// use std::{ -// collections::{BTreeMap, HashSet}, -// time::Duration, -// }; -// use tonic::Request; -// use tracing::{error, info, trace}; +use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; +use color_eyre::Result; +use libp2p::{ + kad::{KBucketKey, K_VALUE}, + PeerId, +}; +use sn_logging::LogBuilder; +use sn_protocol::safenode_proto::KBucketsRequest; +use std::{ + collections::{BTreeMap, HashSet}, + time::Duration, +}; +use tonic::Request; +use tracing::{error, info, trace}; -// /// Sleep for sometime for the nodes for discover each other before verification -// /// Also can be set through the env variable of the same name. -// const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +/// Sleep for sometime for the nodes for discover each other before verification +/// Also can be set through the env variable of the same name. +const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); -// #[tokio::test(flavor = "multi_thread")] -// async fn verify_routing_table() -> Result<()> { -// let _log_appender_guard = -// LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); +#[tokio::test(flavor = "multi_thread")] +async fn verify_routing_table() -> Result<()> { + let _log_appender_guard = + LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); -// let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") -// .map(|value| { -// value -// .parse::() -// .expect("Failed to prase sleep value into u64") -// }) -// .map(Duration::from_secs) -// .unwrap_or(SLEEP_BEFORE_VERIFICATION); -// info!("Sleeping for {sleep_duration:?} before verification"); -// tokio::time::sleep(sleep_duration).await; + let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") + .map(|value| { + value + .parse::() + .expect("Failed to prase sleep value into u64") + }) + .map(Duration::from_secs) + .unwrap_or(SLEEP_BEFORE_VERIFICATION); + info!("Sleeping for {sleep_duration:?} before verification"); + tokio::time::sleep(sleep_duration).await; -// let node_rpc_address = get_all_rpc_addresses(false)?; + let node_rpc_address = get_all_rpc_addresses(false)?; -// let all_peers = get_all_peer_ids(&node_rpc_address).await?; -// trace!("All peers: {all_peers:?}"); -// let mut all_failed_list = BTreeMap::new(); + let all_peers = get_all_peer_ids(&node_rpc_address).await?; + trace!("All peers: {all_peers:?}"); + let mut all_failed_list = BTreeMap::new(); -// for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { -// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; + for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { + let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; -// let response = rpc_client -// .k_buckets(Request::new(KBucketsRequest {})) -// .await?; + let response = rpc_client + .k_buckets(Request::new(KBucketsRequest {})) + .await?; -// let k_buckets = response.get_ref().kbuckets.clone(); -// let k_buckets = k_buckets -// .into_iter() -// .map(|(ilog2, peers)| { -// let peers = peers -// .peers -// .into_iter() -// .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) -// .collect::>(); -// (ilog2, peers) -// }) -// .collect::>(); + let k_buckets = response.get_ref().kbuckets.clone(); + let k_buckets = k_buckets + .into_iter() + .map(|(ilog2, peers)| { + let peers = peers + .peers + .into_iter() + .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) + .collect::>(); + (ilog2, peers) + }) + .collect::>(); -// let current_peer = all_peers[node_index]; -// let current_peer_key = KBucketKey::from(current_peer); -// trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); + let current_peer = all_peers[node_index]; + let current_peer_key = KBucketKey::from(current_peer); + trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); -// let mut failed_list = Vec::new(); -// for peer in all_peers.iter() { -// let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { -// Some(distance) => distance, -// // None if same key -// None => continue, -// }; -// match k_buckets.get(&ilog2_distance) { -// Some(bucket) => { -// if bucket.contains(peer) { -// println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); -// continue; -// } else if bucket.len() == K_VALUE.get() { -// println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); -// info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); -// continue; -// } else { -// println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); -// error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); -// failed_list.push(*peer); -// } -// } -// None => { -// info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); -// failed_list.push(*peer); -// } -// } -// } -// if !failed_list.is_empty() { -// all_failed_list.insert(current_peer, failed_list); -// } -// } -// if !all_failed_list.is_empty() { -// error!("Failed to verify routing table:\n{all_failed_list:?}"); -// panic!("Failed to verify routing table."); -// } -// Ok(()) -// } + let mut failed_list = Vec::new(); + for peer in all_peers.iter() { + let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { + Some(distance) => distance, + // None if same key + None => continue, + }; + match k_buckets.get(&ilog2_distance) { + Some(bucket) => { + if bucket.contains(peer) { + println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); + continue; + } else if bucket.len() == K_VALUE.get() { + println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); + info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); + continue; + } else { + println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); + error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); + failed_list.push(*peer); + } + } + None => { + info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); + failed_list.push(*peer); + } + } + } + if !failed_list.is_empty() { + all_failed_list.insert(current_peer, failed_list); + } + } + if !all_failed_list.is_empty() { + error!("Failed to verify routing table:\n{all_failed_list:?}"); + panic!("Failed to verify routing table."); + } + Ok(()) +} From fd23e269ff388a3af1999cc555b173ceea23bbd4 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 2 Oct 2024 12:59:06 +0200 Subject: [PATCH 102/255] fix(launchpad): we change discord username to lowercase --- node-launchpad/src/components/popup/beta_programme.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 8f0a547fe9..615c20bcf4 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -58,14 +58,15 @@ impl BetaProgramme { fn capture_inputs(&mut self, key: KeyEvent) -> Vec { let send_back = match key.code { KeyCode::Enter => { - let username = self.discord_input_filed.value().to_string(); + let username = self.discord_input_filed.value().to_string().to_lowercase(); + self.discord_input_filed = username.clone().into(); debug!( "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", ); self.state = BetaProgrammeState::DiscordIdAlreadySet; vec![ - Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), + Action::StoreDiscordUserName(username.clone()), Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), Action::SwitchScene(Scene::Status), ] From c1c3be33da39ef56959f92859e590ff9df98bd35 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 2 Oct 2024 12:59:33 +0200 Subject: [PATCH 103/255] chore(launchpad): we reset but do not start nodes after username change --- node-launchpad/src/components/status.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 7d31f1fc92..43e0970782 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -299,7 +299,7 @@ impl Component for Status { self.lock_registry = Some(LockRegistryState::ResettingNodes); info!("Resetting safenode services because the Discord Username was reset."); let action_sender = self.get_actions_sender()?; - reset_nodes(action_sender, true); + reset_nodes(action_sender, false); } } Action::StoreStorageDrive(ref drive_mountpoint, ref _drive_name) => { From c6195310b5afa7b86861aa7d6bc9266123a247ec Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 3 Oct 2024 15:32:37 +0900 Subject: [PATCH 104/255] feat: remove path from Network --- autonomi/src/client/mod.rs | 4 +--- sn_networking/src/driver.rs | 14 +++++++------- sn_networking/src/lib.rs | 12 +----------- sn_node/src/lib.rs | 3 ++- sn_node/src/node.rs | 7 ++++--- sn_node_manager/src/local.rs | 11 ++++++++--- 6 files changed, 23 insertions(+), 28 deletions(-) diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 11a9a38796..c0f097501f 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -91,9 +91,7 @@ impl Client { } fn build_client_and_run_swarm(local: bool) -> (Network, Receiver) { - // TODO: `root_dir` is only used for nodes. `NetworkBuilder` should not require it. - let root_dir = std::env::temp_dir(); - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); + let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local); // TODO: Re-export `Receiver` from `sn_networking`. Else users need to keep their `tokio` dependency in sync. // TODO: Think about handling the mDNS error here. diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 64b96986ea..2a56235bde 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -251,7 +251,6 @@ pub struct NetworkBuilder { is_behind_home_network: bool, keypair: Keypair, local: bool, - root_dir: PathBuf, listen_addr: Option, request_timeout: Option, concurrency_limit: Option, @@ -265,12 +264,11 @@ pub struct NetworkBuilder { } impl NetworkBuilder { - pub fn new(keypair: Keypair, local: bool, root_dir: PathBuf) -> Self { + pub fn new(keypair: Keypair, local: bool) -> Self { Self { is_behind_home_network: false, keypair, local, - root_dir, listen_addr: None, request_timeout: None, concurrency_limit: None, @@ -335,7 +333,10 @@ impl NetworkBuilder { /// # Errors /// /// Returns an error if there is a problem initializing the mDNS behaviour. - pub fn build_node(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { + pub fn build_node( + self, + root_dir: PathBuf, + ) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { let mut kad_cfg = kad::Config::new(KAD_STREAM_PROTOCOL_ID); let _ = kad_cfg .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) @@ -363,7 +364,7 @@ impl NetworkBuilder { let store_cfg = { // Configures the disk_store to store records under the provided path and increase the max record size - let storage_dir_path = self.root_dir.join("record_store"); + let storage_dir_path = root_dir.join("record_store"); if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, @@ -373,7 +374,7 @@ impl NetworkBuilder { NodeRecordStoreConfig { max_value_bytes: MAX_PACKET_SIZE, // TODO, does this need to be _less_ than MAX_PACKET_SIZE storage_dir: storage_dir_path, - historic_quote_dir: self.root_dir.clone(), + historic_quote_dir: root_dir.clone(), ..Default::default() } }; @@ -700,7 +701,6 @@ impl NetworkBuilder { network_swarm_cmd_sender, local_swarm_cmd_sender, peer_id, - self.root_dir, self.keypair, ); diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 79d7d0c380..856f0559a3 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -68,7 +68,6 @@ use sn_protocol::{ use std::{ collections::{BTreeMap, BTreeSet, HashMap}, net::IpAddr, - path::PathBuf, sync::Arc, }; use tokio::sync::{ @@ -160,7 +159,6 @@ struct NetworkInner { network_swarm_cmd_sender: mpsc::Sender, local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, - root_dir_path: PathBuf, keypair: Keypair, } @@ -169,7 +167,6 @@ impl Network { network_swarm_cmd_sender: mpsc::Sender, local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, - root_dir_path: PathBuf, keypair: Keypair, ) -> Self { Self { @@ -177,7 +174,6 @@ impl Network { network_swarm_cmd_sender, local_swarm_cmd_sender, peer_id, - root_dir_path, keypair, }), } @@ -193,11 +189,6 @@ impl Network { &self.inner.keypair } - /// Returns the root directory path of the instance. - pub fn root_dir_path(&self) -> &PathBuf { - &self.inner.root_dir_path - } - /// Get the sender to send a `NetworkSwarmCmd` to the underlying `Swarm`. pub(crate) fn network_swarm_cmd_sender(&self) -> &mpsc::Sender { &self.inner.network_swarm_cmd_sender @@ -1179,8 +1170,7 @@ mod tests { #[test] fn test_network_sign_verify() -> eyre::Result<()> { let (network, _, _) = - NetworkBuilder::new(Keypair::generate_ed25519(), false, std::env::temp_dir()) - .build_client()?; + NetworkBuilder::new(Keypair::generate_ed25519(), false).build_client()?; let msg = b"test message"; let sig = network.sign(msg)?; assert!(network.verify(msg, &sig)); diff --git a/sn_node/src/lib.rs b/sn_node/src/lib.rs index 7dbd88ce5e..60f0222abf 100644 --- a/sn_node/src/lib.rs +++ b/sn_node/src/lib.rs @@ -59,6 +59,7 @@ use std::{ pub struct RunningNode { network: Network, node_events_channel: NodeEventsChannel, + root_dir_path: PathBuf, } impl RunningNode { @@ -76,7 +77,7 @@ impl RunningNode { /// - Windows: C:\Users\\AppData\Roaming\safe\node\ #[expect(rustdoc::invalid_html_tags)] pub fn root_dir_path(&self) -> PathBuf { - self.network.root_dir_path().clone() + self.root_dir_path.clone() } /// Returns a `SwarmLocalState` with some information obtained from swarm's local state. diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 36c2d586e5..83666ddd39 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -130,8 +130,7 @@ impl NodeBuilder { /// /// Returns an error if there is a problem initializing the `SwarmDriver`. pub fn build_and_run(self) -> Result { - let mut network_builder = - NetworkBuilder::new(self.identity_keypair, self.local, self.root_dir); + let mut network_builder = NetworkBuilder::new(self.identity_keypair, self.local); #[cfg(feature = "open-metrics")] let metrics_recorder = if self.metrics_server_port.is_some() { @@ -155,7 +154,8 @@ impl NodeBuilder { #[cfg(feature = "upnp")] network_builder.upnp(self.upnp); - let (network, network_event_receiver, swarm_driver) = network_builder.build_node()?; + let (network, network_event_receiver, swarm_driver) = + network_builder.build_node(self.root_dir.clone())?; let node_events_channel = NodeEventsChannel::default(); let node = NodeInner { @@ -173,6 +173,7 @@ impl NodeBuilder { let running_node = RunningNode { network, node_events_channel, + root_dir_path: self.root_dir, }; // Run the node diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index e729022d69..ed668c4b38 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,8 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, - increment_port_option, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; @@ -20,13 +19,19 @@ use mockall::automock; use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; -use sn_service_management::FaucetServiceData; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; + +#[cfg(feature = "faucet")] +use crate::helpers::get_username; +#[cfg(feature = "faucet")] +use sn_service_management::FaucetServiceData; +#[cfg(feature = "faucet")] use sn_transfers::get_faucet_data_dir; + use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, From fe5bc367a3a5abdbe8cbdfcf831be84971270d82 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 2 Oct 2024 11:25:56 +0200 Subject: [PATCH 105/255] refactor(autonomi): fix up wasm test for put test --- Cargo.lock | 79 ++++++++++++++++--------------- Cargo.toml | 3 ++ autonomi/Cargo.toml | 8 +++- autonomi/README.md | 15 ++++++ autonomi/src/client/data.rs | 5 +- autonomi/src/client/files.rs | 21 ++++---- autonomi/src/client/mod.rs | 16 ++++--- autonomi/src/lib.rs | 2 + autonomi/tests/common.rs | 24 +++++----- autonomi/tests/file.rs | 2 +- autonomi/tests/wasm.rs | 25 ++++------ autonomi_cli/Cargo.toml | 2 +- autonomi_cli/src/commands/file.rs | 2 +- evmlib/Cargo.toml | 3 ++ sn_networking/src/bootstrap.rs | 13 ++++- sn_networking/src/event/kad.rs | 8 ++-- sn_networking/src/lib.rs | 27 +++++++---- sn_node/src/metrics.rs | 2 +- test_utils/Cargo.toml | 1 + test_utils/src/evm.rs | 19 ++++---- test_utils/src/lib.rs | 29 ++++++++++++ 21 files changed, 190 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47d0cdfb71..b1311750c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,8 +119,7 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4a4aaae80afd4be443a6aecd92a6b255dcdd000f97996928efb33d8a71e100" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-consensus", "alloy-contract", @@ -152,8 +151,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c309895995eaa4bfcc345f5515a39c7df9447798645cc8bf462b6c5bf1dc96" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,8 +164,7 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4e0ef72b0876ae3068b2ed7dfae9ae1779ce13cfaec2ee1f08f5bd0348dc57" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -215,8 +212,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9431c99a3b3fe606ede4b3d4043bdfbcb780c45b8d8d226c3804e2b75cfbe68" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -230,8 +226,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79614dfe86144328da11098edcc7bc1a3f25ad8d3134a9eb9e857e06f0d9840d" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "alloy-serde", @@ -253,8 +248,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e2865c4c3bb4cdad3f0d9ec1ab5c0c657ba69a375651bd35e32fb6c180ccc2" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -267,8 +261,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e701fc87ef9a3139154b0b4ccb935b565d27ffd9de020fe541bf2dec5ae4ede" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-consensus", "alloy-eips", @@ -288,8 +281,7 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d5a0f9170b10988b6774498a022845e13eda94318440d17709d50687f67f9" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "alloy-serde", @@ -299,8 +291,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16faebb9ea31a244fd6ce3288d47df4be96797d9c3c020144b8f2c31543a4512" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -337,8 +328,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9c0ab10b93de601a6396fc7ff2ea10d3b28c46f079338fa562107ebf9857c8" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-chains", "alloy-consensus", @@ -368,6 +358,7 @@ dependencies = [ "tokio", "tracing", "url", + "wasmtimer", ] [[package]] @@ -395,8 +386,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b38e3ffdb285df5d9f60cb988d336d9b8e3505acb78750c3bc60336a7af41d3" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -411,13 +401,13 @@ dependencies = [ "tower", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c31a3750b8f5a350d17354e46a52b0f2f19ec5f2006d816935af599dedc521" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-rpc-types-anvil", "alloy-rpc-types-eth", @@ -428,8 +418,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ab6509cd38b2e8c8da726e0f61c1e314a81df06a38d37ddec8bced3f8d25ed" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "alloy-serde", @@ -439,8 +428,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e18424d962d7700a882fe423714bd5b9dde74c7a7589d4255ea64068773aef" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-consensus", "alloy-eips", @@ -458,8 +446,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33feda6a53e6079895aed1d08dcb98a1377b000d80d16370fbbdb8155d547ef" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "serde", @@ -469,8 +456,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740a25b92e849ed7b0fa013951fe2f64be9af1ad5abe805037b44fb7770c5c47" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-primitives", "async-trait", @@ -483,8 +469,7 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0707d4f63e4356a110b30ef3add8732ab6d181dd7be4607bf79b8777105cee" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-consensus", "alloy-network", @@ -572,8 +557,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0590afbdacf2f8cca49d025a2466f3b6584a016a8b28f532f29f8da1007bae" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -586,13 +570,13 @@ dependencies = [ "tower", "tracing", "url", + "wasm-bindgen-futures", ] [[package]] name = "alloy-transport-http" version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2437d145d80ea1aecde8574d2058cceb8b3c9cba05f6aea8e67907c660d46698" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-wasm-instant#f80f2ebf6ceed29f122614e072d404063b790231" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -1028,9 +1012,11 @@ dependencies = [ "bip39", "blsttc", "bytes", + "console_error_panic_hook", "const-hex", "evmlib", "eyre", + "futures", "hex 0.4.3", "instant", "libp2p 0.54.1", @@ -1051,6 +1037,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "tracing-web", "walkdir", "wasm-bindgen-test", "xor_name", @@ -8660,6 +8647,7 @@ dependencies = [ "libp2p 0.54.1", "serde", "serde_json", + "sn_peers_acquisition", ] [[package]] @@ -9205,6 +9193,19 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "tracing-web" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e6a141feebd51f8d91ebfd785af50fca223c570b86852166caa3b141defe7c" +dependencies = [ + "js-sys", + "tracing-core", + "tracing-subscriber", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -9749,7 +9750,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c34946d706..429515c93a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,3 +59,6 @@ pre-release-commit-message = "chore(release): release commit, tags, deps and cha publish = false push = false tag = false + +[patch.crates-io] +alloy = { git = 'https://github.com/b-zee/alloy.git', branch = "fix-wasm-instant" } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index d5990e8b53..8ac45f7c5b 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -14,8 +14,8 @@ default = ["data"] full = ["data", "registers", "vault"] data = [] vault = ["data"] -files = ["fs", "data"] -fs = ["tokio/fs"] +files = ["data"] +fs = ["tokio/fs", "files"] local = ["sn_networking/local-discovery"] registers = [] @@ -44,6 +44,7 @@ tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } walkdir = "2.5.0" xor_name = "5.0.0" +futures = "0.3.30" [dev-dependencies] eyre = "0.6.5" @@ -54,8 +55,11 @@ test_utils = { path = "../test_utils" } wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] +console_error_panic_hook = "0.1.7" +evmlib = { path = "../evmlib", version = "0.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = [ "wasm-bindgen", "inaccurate" ] } +tracing-web = "0.1.3" [lints] workspace = true diff --git a/autonomi/README.md b/autonomi/README.md index 2603768aea..eafb99718a 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -62,6 +62,21 @@ $ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=au $ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture ``` +### WebAssembly + +To run a WASM test +- Install `wasm-pack` +- Make sure your Rust supports the `wasm32-unknown-unknown` target. (If you have `rustup`: `rustup target add wasm32-unknown-unknown`.) +- Pass a bootstrap peer via `SAFE_PEERS`. This *has* to be the websocket address, e.g. `/ip4//tcp//ws/p2p/`. + - As well as the other environment variables needed for EVM payments (e.g. `RPC_URL`). +- Optionally specify the specific test, e.g. `-- put` to run `put()` in `wasm.rs` only. + +Example: +````sh +SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --features=data,files --firefox autonomi --test wasm -- put +``` + + ## Faucet (local) There is no faucet server, but instead you can use the `Deployer wallet private key` printed in the EVM node output to diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index e0650a2ca9..fea77ed15b 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -155,7 +155,7 @@ impl Client { /// Upload a piece of data to the network. This data will be self-encrypted, /// and the data map XOR address will be returned. pub async fn put(&self, data: Bytes, wallet: &Wallet) -> Result { - let now = std::time::Instant::now(); + let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; tracing::debug!("Encryption took: {:.2?}", now.elapsed()); @@ -186,8 +186,9 @@ impl Client { Ok(map_xor_name) } + #[cfg_attr(not(feature = "fs"), allow(dead_code, reason = "used only with `fs`"))] pub(crate) async fn cost(&self, data: Bytes) -> Result { - let now = std::time::Instant::now(); + let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; tracing::debug!("Encryption took: {:.2?}", now.elapsed()); diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index c8317a2ed7..c791917799 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -1,12 +1,9 @@ use crate::client::data::{GetError, PutError}; use crate::client::Client; -use crate::self_encryption::encrypt; use bytes::Bytes; use serde::{Deserialize, Serialize}; -use sn_evm::{Amount, AttoTokens}; use std::collections::HashMap; use std::path::PathBuf; -use walkdir::WalkDir; use xor_name::XorName; /// Directory-like structure that containing file paths and their metadata. @@ -61,25 +58,26 @@ pub enum UploadError { impl Client { /// Fetch a directory from the network. - pub async fn fetch_root(&mut self, address: XorName) -> Result { + pub async fn fetch_root(&self, address: XorName) -> Result { let data = self.get(address).await?; Ok(Root::from_bytes(data)?) } /// Fetch the file pointed to by the given pointer. - pub async fn fetch_file(&mut self, file: &FilePointer) -> Result { + pub async fn fetch_file(&self, file: &FilePointer) -> Result { let data = self.get(file.data_map).await?; Ok(data) } /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - pub async fn file_cost(&mut self, path: &PathBuf) -> Result { + #[cfg(feature = "fs")] + pub async fn file_cost(&self, path: &PathBuf) -> Result { let mut map = HashMap::new(); - let mut total_cost = Amount::ZERO; + let mut total_cost = sn_evm::Amount::ZERO; - for entry in WalkDir::new(path) { + for entry in walkdir::WalkDir::new(path) { let entry = entry?; if !entry.file_type().is_file() { @@ -97,8 +95,8 @@ impl Client { // re-do encryption to get the correct map xorname here // this code needs refactor - let now = std::time::Instant::now(); - let (data_map_chunk, _) = encrypt(file_bytes).expect("TODO"); + let now = sn_networking::target_arch::Instant::now(); + let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes).expect("TODO"); tracing::debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); let data_map_xorname = FilePointer { @@ -126,7 +124,7 @@ impl Client { ) -> Result<(Root, XorName), UploadError> { let mut map = HashMap::new(); - for entry in WalkDir::new(path) { + for entry in walkdir::WalkDir::new(path) { let entry = entry?; if !entry.file_type().is_file() { @@ -150,6 +148,7 @@ impl Client { } } +#[cfg(feature = "fs")] async fn upload_from_file( client: &mut Client, path: PathBuf, diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index c0f097501f..1086de0800 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -12,9 +12,9 @@ pub mod vault; use std::{collections::HashSet, time::Duration}; use libp2p::{identity::Keypair, Multiaddr}; -use sn_networking::{multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; +use sn_networking::{interval, multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; -use tokio::{sync::mpsc::Receiver, time::interval}; +use tokio::sync::mpsc::Receiver; /// Time before considering the connection timed out. pub const CONNECT_TIMEOUT_SECS: u64 = 20; @@ -73,7 +73,7 @@ impl Client { // Spawn task to dial to the given peers let network_clone = network.clone(); let peers = peers.to_vec(); - let _handle = tokio::spawn(async move { + let _handle = sn_networking::target_arch::spawn(async move { for addr in peers { if let Err(err) = network_clone.dial(addr.clone()).await { eprintln!("addr={addr} Failed to dial: {err:?}"); @@ -81,8 +81,8 @@ impl Client { } }); - let (sender, receiver) = tokio::sync::oneshot::channel(); - tokio::spawn(handle_event_receiver(event_receiver, sender)); + let (sender, receiver) = futures::channel::oneshot::channel(); + sn_networking::target_arch::spawn(handle_event_receiver(event_receiver, sender)); receiver.await.expect("sender should not close")?; @@ -98,20 +98,22 @@ fn build_client_and_run_swarm(local: bool) -> (Network, Receiver) let (network, event_receiver, swarm_driver) = network_builder.build_client().expect("mdns to succeed"); - let _swarm_driver = tokio::spawn(swarm_driver.run()); + let _swarm_driver = sn_networking::target_arch::spawn(swarm_driver.run()); (network, event_receiver) } async fn handle_event_receiver( mut event_receiver: Receiver, - sender: tokio::sync::oneshot::Sender>, + sender: futures::channel::oneshot::Sender>, ) { // We switch this to `None` when we've sent the oneshot 'connect' result. let mut sender = Some(sender); let mut unsupported_protocols = vec![]; let mut timeout_timer = interval(Duration::from_secs(CONNECT_TIMEOUT_SECS)); + + #[cfg(not(target_arch = "wasm32"))] timeout_timer.tick().await; loop { diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 0e8ff3f61d..0d445cc414 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -32,5 +32,7 @@ pub use sn_evm::EvmWallet as Wallet; pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; +#[doc(no_inline)] // Place this under 'Re-exports' in the docs. +pub use xor_name::XorName; pub use client::Client; diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs index 77a057fde2..5277ea1d56 100644 --- a/autonomi/tests/common.rs +++ b/autonomi/tests/common.rs @@ -1,8 +1,5 @@ use bytes::Bytes; -use libp2p::Multiaddr; use rand::Rng; -use sn_peers_acquisition::parse_peer_addr; -use std::env; #[allow(dead_code)] pub fn gen_random_data(len: usize) -> Bytes { @@ -19,14 +16,19 @@ pub fn enable_logging() { .try_init(); } +#[cfg(target_arch = "wasm32")] #[allow(dead_code)] -/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. -/// -/// An empty `Vec` will be returned if the env var is not set. -pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { - let Ok(peers_str) = env::var("SAFE_PEERS") else { - return Ok(vec![]); - }; +pub fn enable_logging_wasm(directive: impl AsRef) { + use tracing_subscriber::prelude::*; - peers_str.split(',').map(parse_peer_addr).collect() + console_error_panic_hook::set_once(); + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) // Only partially supported across browsers + .without_time() // std::time is not available in browsers + .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console + tracing_subscriber::registry() + .with(fmt_layer) + .with(tracing_subscriber::EnvFilter::new(directive)) + .init(); } diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 6b2d58ad9d..efac39eacb 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -1,4 +1,4 @@ -#![cfg(feature = "files")] +#![cfg(all(feature = "files", feature = "fs"))] mod common; diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index e1265d8e59..25f3643e68 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -1,30 +1,25 @@ +#![cfg(target_arch = "wasm32")] + use std::time::Duration; use autonomi::Client; -use test_utils::evm::get_funded_wallet; -use tokio::time::sleep; +use sn_networking::target_arch::sleep; use wasm_bindgen_test::*; mod common; wasm_bindgen_test_configure!(run_in_browser); -#[tokio::test] #[wasm_bindgen_test] -async fn file() -> Result<(), Box> { - common::enable_logging(); - - let peers = vec![ - "/ip4/127.0.0.1/tcp/35499/ws/p2p/12D3KooWGN5RqREZ4RYtsUc3DNCkrNSVXEzTYEbMb1AZx2rNddoW" - .try_into() - .expect("str to be valid multiaddr"), - ]; - - let client = Client::connect(&peers).await.unwrap(); - let wallet = get_funded_wallet(); +async fn put() -> Result<(), Box> { + common::enable_logging_wasm("sn_networking,autonomi,wasm"); - let data = common::gen_random_data(1024 * 1024 * 10); + let client = Client::connect(&test_utils::peers_from_env()?) + .await + .unwrap(); + let wallet = test_utils::evm::get_funded_wallet(); + let data = common::gen_random_data(1024 * 1024 * 2); // 2MiB let addr = client.put(data.clone(), &wallet).await.unwrap(); sleep(Duration::from_secs(2)).await; diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index e779493126..e51ddb0bd7 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -10,7 +10,7 @@ metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] [dependencies] -autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files"] } +autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files", "fs"] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 45b60a24df..3ff2aaa55a 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -14,7 +14,7 @@ use color_eyre::eyre::Result; use std::path::PathBuf; pub async fn cost(file: &str, peers: Vec) -> Result<()> { - let mut client = crate::actions::connect_to_network(peers).await?; + let client = crate::actions::connect_to_network(peers).await?; println!("Getting upload cost..."); let cost = client diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 79c2170270..a57f694510 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -8,6 +8,9 @@ name = "evmlib" repository = "https://github.com/maidsafe/safe_network" version = "0.1.0" +[features] +wasm-bindgen = ["alloy/wasm-bindgen"] + [dependencies] alloy = { version = "0.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } serde = "1.0" diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index 608f8116af..f8b7cf1e59 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -134,8 +134,12 @@ impl ContinuousBootstrap { "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" ); + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - new_interval.tick().await; // the first tick completes immediately + #[cfg(not(target_arch = "wasm32"))] + new_interval.tick().await; + return (should_bootstrap, Some(new_interval)); } @@ -145,8 +149,13 @@ impl ContinuousBootstrap { let new_interval = BOOTSTRAP_INTERVAL * step; let new_interval = if new_interval > current_interval { info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); + + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] let mut interval = interval(new_interval); - interval.tick().await; // the first tick completes immediately + #[cfg(not(target_arch = "wasm32"))] + interval.tick().await; + Some(interval) } else { None diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 30a999a46b..6551f6e5f0 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -8,7 +8,8 @@ use crate::{ driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, - GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, + target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, + CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::kad::{ @@ -20,10 +21,7 @@ use sn_protocol::{ PrettyPrintRecordKey, }; use sn_transfers::SignedSpend; -use std::{ - collections::{hash_map::Entry, BTreeSet, HashSet}, - time::Instant, -}; +use std::collections::{hash_map::Entry, BTreeSet, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 856f0559a3..1b0d225a7c 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -48,7 +48,6 @@ pub use metrics::service::MetricsRegistries; pub use target_arch::{interval, sleep, spawn, Instant, Interval}; use self::{cmd::NetworkSwarmCmd, error::Result}; -use backoff::{Error as BackoffError, ExponentialBackoff}; use futures::future::select_all; use libp2p::{ identity::Keypair, @@ -514,7 +513,7 @@ impl Network { ) -> Result { let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( - ExponentialBackoff { + backoff::ExponentialBackoff { // None sets a random duration, but we'll be terminating with a BackoffError::Permanent, so retry will // be disabled. max_elapsed_time: retry_duration, @@ -532,7 +531,7 @@ impl Network { let result = receiver.await.map_err(|e| { error!("When fetching record {pretty_key:?}, encountered a channel error {e:?}"); NetworkError::InternalMsgChannelDropped - }).map_err(|err| BackoffError::Transient { err, retry_after: None })?; + }).map_err(|err| backoff::Error::Transient { err, retry_after: None })?; // log the results match &result { @@ -562,13 +561,13 @@ impl Network { // if we don't want to retry, throw permanent error if cfg.retry_strategy.is_none() { if let Err(e) = result { - return Err(BackoffError::Permanent(NetworkError::from(e))); + return Err(backoff::Error::Permanent(NetworkError::from(e))); } } if result.is_err() { debug!("Getting record from network of {pretty_key:?} via backoff..."); } - result.map_err(|err| BackoffError::Transient { + result.map_err(|err| backoff::Error::Transient { err: NetworkError::from(err), retry_after: None, }) @@ -621,6 +620,18 @@ impl Network { /// Put `Record` to network /// Optionally verify the record is stored after putting it to network /// If verify is on, retry multiple times within MAX_PUT_RETRY_DURATION duration. + #[cfg(target_arch = "wasm32")] + pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { + let pretty_key = PrettyPrintRecordKey::from(&record.key); + + info!("Attempting to PUT record with key: {pretty_key:?} to network, with cfg {cfg:?}"); + self.put_record_once(record.clone(), cfg).await + } + + /// Put `Record` to network + /// Optionally verify the record is stored after putting it to network + /// If verify is on, retry multiple times within MAX_PUT_RETRY_DURATION duration. + #[cfg(not(target_arch = "wasm32"))] pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { let pretty_key = PrettyPrintRecordKey::from(&record.key); @@ -628,7 +639,7 @@ impl Network { // So a long validation time will limit the number of PUT retries we attempt here. let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( - ExponentialBackoff { + backoff::ExponentialBackoff { // None sets a random duration, but we'll be terminating with a BackoffError::Permanent, so retry will // be disabled. max_elapsed_time: retry_duration, @@ -643,9 +654,9 @@ impl Network { warn!("Failed to PUT record with key: {pretty_key:?} to network (retry via backoff) with error: {err:?}"); if cfg.retry_strategy.is_some() { - BackoffError::Transient { err, retry_after: None } + backoff::Error::Transient { err, retry_after: None } } else { - BackoffError::Permanent(err) + backoff::Error::Permanent(err) } }) diff --git a/sn_node/src/metrics.rs b/sn_node/src/metrics.rs index b018a432a1..83ae86e4d6 100644 --- a/sn_node/src/metrics.rs +++ b/sn_node/src/metrics.rs @@ -17,7 +17,7 @@ use prometheus_client::{ info::Info, }, }; -use sn_networking::Instant; +use sn_networking::target_arch::Instant; #[cfg(feature = "open-metrics")] use sn_networking::MetricsRegistries; diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 2944073de5..920ec049cd 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -19,3 +19,4 @@ evmlib = { path = "../evmlib", version = "0.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } serde = { version = "1.0.133", features = [ "derive"]} serde_json = "1.0" +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 4ec41ca5d2..9deff40510 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -10,9 +10,7 @@ use const_hex::ToHexExt; use evmlib::CustomNetwork; use std::env; -fn get_var_or_panic(var: &str) -> String { - env::var(var).unwrap_or_else(|_| panic!("{var} environment variable needs to be set")) -} +use crate::env_from_runtime_or_compiletime; pub fn evm_network_from_env() -> evmlib::Network { let evm_network = env::var("EVM_NETWORK").ok(); @@ -30,9 +28,11 @@ pub fn evm_network_from_env() -> evmlib::Network { ) } else { ( - get_var_or_panic("RPC_URL"), - get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), - get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), + env_from_runtime_or_compiletime!("RPC_URL").expect("`RPC_URL` not set"), + env_from_runtime_or_compiletime!("PAYMENT_TOKEN_ADDRESS") + .expect("`PAYMENT_TOKEN_ADDRESS` not set"), + env_from_runtime_or_compiletime!("CHUNK_PAYMENTS_ADDRESS") + .expect("`CHUNK_PAYMENTS_ADDRESS` not set"), ) }; @@ -45,11 +45,10 @@ pub fn evm_network_from_env() -> evmlib::Network { pub fn get_funded_wallet() -> evmlib::wallet::Wallet { let network = evm_network_from_env(); - // Default deployer wallet of the testnet. - const DEFAULT_WALLET_PRIVATE_KEY: &str = - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + let private_key = env_from_runtime_or_compiletime!("EVM_PRIVATE_KEY").unwrap_or_else(|| { + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80".to_string() + }); evmlib::wallet::Wallet::new_from_private_key(network, &private_key) .expect("Invalid private key") diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index e2ddf72f2f..b75ac7149d 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -6,5 +6,34 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use sn_peers_acquisition::parse_peer_addr; + pub mod evm; pub mod testnet; + +// Get environment variable from runtime or build time, in that order. Returns `None` if not set. +macro_rules! env_from_runtime_or_compiletime { + ($var:literal) => {{ + if let Ok(val) = std::env::var($var) { + Some(val) + } else if let Some(val) = option_env!($var) { + Some(val.to_string()) + } else { + None + } + }}; +} + +pub(crate) use env_from_runtime_or_compiletime; +use libp2p::Multiaddr; + +/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. +/// +/// An empty `Vec` will be returned if the env var is not set. +pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { + let Some(peers_str) = env_from_runtime_or_compiletime!("SAFE_PEERS") else { + return Ok(vec![]); + }; + + peers_str.split(',').map(parse_peer_addr).collect() +} From 2686fb8e019de5a8013d96126421b66d1f4dc012 Mon Sep 17 00:00:00 2001 From: qima Date: Thu, 3 Oct 2024 22:10:53 +0800 Subject: [PATCH 106/255] fix(CI): fix bootstrap peer parsing error --- .github/workflows/memcheck.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml index b9965b64f3..99f5b93609 100644 --- a/.github/workflows/memcheck.yml +++ b/.github/workflows/memcheck.yml @@ -60,8 +60,9 @@ jobs: - name: Set SAFE_PEERS run: | - safe_peers=$(rg "Local node is listening .+ on \".+\"" $BOOTSTRAP_NODE_DATA_PATH -u | \ - rg '/ip4.*$' -m1 -o | rg '"' -r '') + safe_peers=$(rg "Local node is listening .+ on .+" $BOOTSTRAP_NODE_DATA_PATH -u | \ + rg '/ip4.*$' -m1 -o) + echo $safe_peers echo "SAFE_PEERS=$safe_peers" >> $GITHUB_ENV - name: Check SAFE_PEERS was set From cc6dcc0b1fc63739b40253e02c7cb29a1e192a69 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 30 Sep 2024 09:10:47 +0200 Subject: [PATCH 107/255] chore: fix typos --- README.md | 6 +++--- adr/libp2p/identify-interval.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 33bbd87661..856bce8bc5 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ The `websockets` feature is available for the `sn_networking` crate, and above, tcp over websockets. If building for `wasm32` then `websockets` are enabled by default as this is the only method -avilable to communicate with a network as things stand. (And that network must have `websockets` +available to communicate with a network as things stand. (And that network must have `websockets` enabled.) ##### Building for wasm32 @@ -129,7 +129,7 @@ YMMV until stabilised. - [Transfers](https://github.com/maidsafe/safe_network/blob/main/sn_transfers/README.md) The transfers crate, used to send and receive tokens on the network. - [Peers Acquisition](https://github.com/maidsafe/safe_network/blob/main/sn_peers_acquisition/README.md) - The peers peers acqisition crate, or: how the network layer discovers bootstrap peers. + The peers acquisition crate, or: how the network layer discovers bootstrap peers. - [Build Info](https://github.com/maidsafe/safe_network/blob/main/sn_build_info/README.md) Small helper used to get the build/commit versioning info for debug purposes. @@ -210,7 +210,7 @@ Make sure you made a backup copy of the "recovery secret" generated by the above one you have provided when prompted. If any changes are now made to files or directories within this folder (at this point all files and -folders are considered new since it has just been initalised for tracking), before trying to push +folders are considered new since it has just been initialised for tracking), before trying to push those changes to the network, we can get a report of the changes that have been made locally: ```bash diff --git a/adr/libp2p/identify-interval.md b/adr/libp2p/identify-interval.md index 59dd9db4c6..1b068c1637 100644 --- a/adr/libp2p/identify-interval.md +++ b/adr/libp2p/identify-interval.md @@ -8,7 +8,7 @@ Accepted Idle nodes in a network of moderate data have a high ongoing bandwidth. -This appears to be because of the identify polling of nodes, which occurs at the deafult libp2p rate, of once per 5 minutes. +This appears to be because of the identify polling of nodes, which occurs at the default libp2p rate, of once per 5 minutes. We see ~1mb/s traffic on nodes in a moderate network. From 71f93588b4420f0526074c918efbd7f7837e441e Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 4 Oct 2024 08:34:40 +0200 Subject: [PATCH 108/255] docs(autonomi): fix wasm-pack test cli in README --- autonomi/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autonomi/README.md b/autonomi/README.md index eafb99718a..ad43040969 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -73,7 +73,7 @@ To run a WASM test Example: ````sh -SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --features=data,files --firefox autonomi --test wasm -- put +SAFE_PEERS=/ip4//tcp//ws/p2p/ wasm-pack test --release --firefox autonomi --features=data,files --test wasm -- put ``` From 00fd4d6749a79b943147a055fe7df8040da9792f Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Thu, 3 Oct 2024 17:26:31 +0200 Subject: [PATCH 109/255] chore(evmlib): update alloy to `0.4.2` --- Cargo.lock | 255 +++++++++++++------- evmlib/Cargo.toml | 2 +- evmlib/src/contract/chunk_payments/error.rs | 2 + evmlib/src/contract/chunk_payments/mod.rs | 24 +- evmlib/src/contract/network_token.rs | 2 + evmlib/src/testnet.rs | 28 ++- evmlib/src/wallet.rs | 20 +- evmlib/tests/chunk_payments.rs | 40 ++- evmlib/tests/network_token.rs | 17 +- 9 files changed, 273 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47d0cdfb71..af208196a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,9 +118,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4a4aaae80afd4be443a6aecd92a6b255dcdd000f97996928efb33d8a71e100" +checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,23 +151,25 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c309895995eaa4bfcc345f5515a39c7df9447798645cc8bf462b6c5bf1dc96" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "auto_impl", "c-kzg", + "derive_more", "serde", ] [[package]] name = "alloy-contract" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4e0ef72b0876ae3068b2ed7dfae9ae1779ce13cfaec2ee1f08f5bd0348dc57" +checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -185,21 +187,22 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "529fc6310dc1126c8de51c376cbc59c79c7f662bd742be7dc67055d5421a81b4" +checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", "alloy-primitives", + "alloy-rlp", "alloy-sol-types", ] [[package]] name = "alloy-dyn-abi" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413902aa18a97569e60f679c23f46a18db1656d87ab4d4e49d0e1e52042f66df" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -212,16 +215,41 @@ dependencies = [ "winnow", ] +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + [[package]] name = "alloy-eips" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9431c99a3b3fe606ede4b3d4043bdfbcb780c45b8d8d226c3804e2b75cfbe68" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" dependencies = [ + "alloy-eip2930", + "alloy-eip7702", "alloy-primitives", "alloy-rlp", "alloy-serde", "c-kzg", + "derive_more", "once_cell", "serde", "sha2 0.10.8", @@ -229,9 +257,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79614dfe86144328da11098edcc7bc1a3f25ad8d3134a9eb9e857e06f0d9840d" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -240,9 +268,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc05b04ac331a9f07e3a4036ef7926e49a8bf84a99a1ccfc7e2ab55a5fcbb372" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -252,9 +280,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e2865c4c3bb4cdad3f0d9ec1ab5c0c657ba69a375651bd35e32fb6c180ccc2" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -266,9 +294,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e701fc87ef9a3139154b0b4ccb935b565d27ffd9de020fe541bf2dec5ae4ede" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -287,10 +315,12 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d5a0f9170b10988b6774498a022845e13eda94318440d17709d50687f67f9" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-serde", "serde", @@ -298,13 +328,14 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16faebb9ea31a244fd6ce3288d47df4be96797d9c3c020144b8f2c31543a4512" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" dependencies = [ "alloy-genesis", "alloy-primitives", "k256", + "rand 0.8.5", "serde_json", "tempfile", "thiserror", @@ -314,31 +345,36 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", "derive_more", + "hashbrown 0.14.5", "hex-literal", + "indexmap 2.5.0", "itoa", "k256", "keccak-asm", + "paste", "proptest", "rand 0.8.5", "ruint", + "rustc-hash", "serde", + "sha3 0.10.8", "tiny-keccak", ] [[package]] name = "alloy-provider" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9c0ab10b93de601a6396fc7ff2ea10d3b28c46f079338fa562107ebf9857c8" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" dependencies = [ "alloy-chains", "alloy-consensus", @@ -365,6 +401,7 @@ dependencies = [ "reqwest 0.12.7", "serde", "serde_json", + "thiserror", "tokio", "tracing", "url", @@ -394,11 +431,12 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b38e3ffdb285df5d9f60cb988d336d9b8e3505acb78750c3bc60336a7af41d3" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" dependencies = [ "alloy-json-rpc", + "alloy-primitives", "alloy-transport", "alloy-transport-http", "futures", @@ -408,17 +446,18 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-rpc-types" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c31a3750b8f5a350d17354e46a52b0f2f19ec5f2006d816935af599dedc521" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" dependencies = [ + "alloy-primitives", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", "alloy-serde", @@ -427,9 +466,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ab6509cd38b2e8c8da726e0f61c1e314a81df06a38d37ddec8bced3f8d25ed" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" dependencies = [ "alloy-primitives", "alloy-serde", @@ -438,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e18424d962d7700a882fe423714bd5b9dde74c7a7589d4255ea64068773aef" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" dependencies = [ "alloy-consensus", "alloy-eips", @@ -449,17 +488,17 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", + "derive_more", "itertools 0.13.0", "serde", "serde_json", - "thiserror", ] [[package]] name = "alloy-serde" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33feda6a53e6079895aed1d08dcb98a1377b000d80d16370fbbdb8155d547ef" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" dependencies = [ "alloy-primitives", "serde", @@ -468,9 +507,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740a25b92e849ed7b0fa013951fe2f64be9af1ad5abe805037b44fb7770c5c47" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" dependencies = [ "alloy-primitives", "async-trait", @@ -482,9 +521,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0707d4f63e4356a110b30ef3add8732ab6d181dd7be4607bf79b8777105cee" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" dependencies = [ "alloy-consensus", "alloy-network", @@ -498,13 +537,13 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b40397ddcdcc266f59f959770f601ce1280e699a91fc1862f29cef91707cd09" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", "syn 2.0.77", @@ -512,16 +551,16 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "867a5469d61480fea08c7333ffeca52d5b621f5ca2e44f271b117ec1fc9a0525" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", "indexmap 2.5.0", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", "syn 2.0.77", @@ -531,9 +570,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e482dc33a32b6fadbc0f599adea520bd3aaa585c141a80b404d0a3e3fa72528" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" dependencies = [ "alloy-json-abi", "const-hex", @@ -548,9 +587,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" dependencies = [ "serde", "winnow", @@ -558,9 +597,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91ca40fa20793ae9c3841b83e74569d1cc9af29a2f5237314fd3452d51e38c7" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -571,9 +610,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0590afbdacf2f8cca49d025a2466f3b6584a016a8b28f532f29f8da1007bae" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -583,22 +622,22 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-transport-http" -version = "0.2.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2437d145d80ea1aecde8574d2058cceb8b3c9cba05f6aea8e67907c660d46698" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.7", "serde_json", - "tower", + "tower 0.5.1", "tracing", "url", ] @@ -1096,7 +1135,7 @@ dependencies = [ "rustversion", "serde", "sync_wrapper 0.1.2", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", ] @@ -1802,7 +1841,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" dependencies = [ "async-trait", - "convert_case 0.6.0", + "convert_case", "json5", "lazy_static", "nom", @@ -1883,12 +1922,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "convert_case" version = "0.6.0" @@ -2233,11 +2266,12 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", + "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2346,15 +2380,23 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.18" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "convert_case 0.4.0", "proc-macro2", "quote", - "rustc_version 0.4.1", "syn 2.0.77", + "unicode-xid", ] [[package]] @@ -3763,6 +3805,7 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", + "serde", ] [[package]] @@ -4132,7 +4175,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -4294,6 +4337,7 @@ checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", + "serde", ] [[package]] @@ -6461,6 +6505,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -6782,6 +6848,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -7817,6 +7884,16 @@ dependencies = [ "opaque-debug 0.3.1", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + [[package]] name = "sha3-asm" version = "0.1.4" @@ -8020,7 +8097,7 @@ dependencies = [ "serde_derive", "sha2 0.8.2", "sha2 0.9.9", - "sha3", + "sha3 0.9.1", "thiserror", "typenum", "zeroize", @@ -8518,9 +8595,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c837dc8852cb7074e46b444afb81783140dab12c58867b49fb3898fbafedf7ea" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" dependencies = [ "paste", "proc-macro2", @@ -8971,7 +9048,7 @@ dependencies = [ "tokio-rustls 0.22.0", "tokio-stream", "tokio-util 0.6.10", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -9000,7 +9077,7 @@ dependencies = [ "prost 0.11.9", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -9038,6 +9115,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 79c2170270..14880f3455 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.1.0" [dependencies] -alloy = { version = "0.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } serde = "1.0" thiserror = "1.0" tokio = "1.38.0" diff --git a/evmlib/src/contract/chunk_payments/error.rs b/evmlib/src/contract/chunk_payments/error.rs index 9e0770a0a8..84bd2c6c9a 100644 --- a/evmlib/src/contract/chunk_payments/error.rs +++ b/evmlib/src/contract/chunk_payments/error.rs @@ -9,6 +9,8 @@ pub enum Error { RpcError(#[from] RpcError), #[error(transparent)] NetworkTokenError(#[from] network_token::Error), + #[error(transparent)] + PendingTransactionError(#[from] alloy::providers::PendingTransactionError), #[error("The transfer limit of 256 has been exceeded")] TransferLimitExceeded, } diff --git a/evmlib/src/contract/chunk_payments/mod.rs b/evmlib/src/contract/chunk_payments/mod.rs index 9a8378c4bd..1d92dd035f 100644 --- a/evmlib/src/contract/chunk_payments/mod.rs +++ b/evmlib/src/contract/chunk_payments/mod.rs @@ -3,7 +3,7 @@ pub mod error; use crate::common; use crate::common::{Address, TxHash}; use crate::contract::chunk_payments::error::Error; -use crate::contract::chunk_payments::ChunkPaymentsContract::ChunkPaymentsContractInstance; +use crate::contract::chunk_payments::DataPaymentsContract::DataPaymentsContractInstance; use alloy::providers::{Network, Provider}; use alloy::sol; use alloy::transports::Transport; @@ -15,15 +15,15 @@ sol!( #[allow(clippy::too_many_arguments)] #[allow(missing_docs)] #[sol(rpc)] - ChunkPaymentsContract, + DataPaymentsContract, "artifacts/ChunkPayments.json" ); -pub struct ChunkPayments, N: Network> { - pub contract: ChunkPaymentsContractInstance, +pub struct DataPayments, N: Network> { + pub contract: DataPaymentsContractInstance, } -impl ChunkPayments +impl DataPayments where T: Transport + Clone, P: Provider, @@ -31,23 +31,23 @@ where { /// Create a new ChunkPayments contract instance. pub fn new(contract_address: Address, provider: P) -> Self { - let contract = ChunkPaymentsContract::new(contract_address, provider); - ChunkPayments { contract } + let contract = DataPaymentsContract::new(contract_address, provider); + DataPayments { contract } } /// Deploys the ChunkPayments smart contract to the network of the provider. /// ONLY DO THIS IF YOU KNOW WHAT YOU ARE DOING! pub async fn deploy(provider: P, payment_token_address: Address) -> Self { - let contract = ChunkPaymentsContract::deploy(provider, payment_token_address) + let contract = DataPaymentsContract::deploy(provider, payment_token_address) .await .expect("Could not deploy contract"); - ChunkPayments { contract } + DataPayments { contract } } pub fn set_provider(&mut self, provider: P) { let address = *self.contract.address(); - self.contract = ChunkPaymentsContract::new(address, provider); + self.contract = DataPaymentsContract::new(address, provider); } /// Pay for quotes. @@ -56,9 +56,9 @@ where &self, chunk_payments: I, ) -> Result { - let chunk_payments: Vec = chunk_payments + let chunk_payments: Vec = chunk_payments .into_iter() - .map(|(hash, addr, amount)| ChunkPaymentsContract::ChunkPayment { + .map(|(hash, addr, amount)| ChunkPayments::ChunkPayment { rewardAddress: addr, amount, quoteHash: hash, diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 588f5eb12d..0b02a7be8d 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -18,6 +18,8 @@ pub enum Error { ContractError(#[from] alloy::contract::Error), #[error(transparent)] RpcError(#[from] RpcError), + #[error(transparent)] + PendingTransactionError(#[from] alloy::providers::PendingTransactionError), } pub struct NetworkToken, N: Network> { diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 015ee035c1..06e5da1361 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -1,12 +1,14 @@ use crate::common::Address; -use crate::contract::chunk_payments::ChunkPayments; +use crate::contract::chunk_payments::DataPayments; use crate::contract::network_token::NetworkToken; use crate::{CustomNetwork, Network}; use alloy::hex::ToHexExt; use alloy::network::{Ethereum, EthereumWallet}; use alloy::node_bindings::{Anvil, AnvilInstance}; -use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; -use alloy::providers::{ProviderBuilder, ReqwestProvider}; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider}; use alloy::signers::local::PrivateKeySigner; use alloy::transports::http::{Client, Http}; @@ -67,7 +69,13 @@ pub async fn deploy_network_token_contract( ) -> NetworkToken< Http, FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, @@ -92,10 +100,16 @@ pub async fn deploy_network_token_contract( pub async fn deploy_chunk_payments_contract( anvil: &AnvilInstance, token_address: Address, -) -> ChunkPayments< +) -> DataPayments< Http, FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, @@ -114,5 +128,5 @@ pub async fn deploy_chunk_payments_contract( .on_http(rpc_url); // Deploy the contract. - ChunkPayments::deploy(provider, token_address).await + DataPayments::deploy(provider, token_address).await } diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 498eb3afc2..dfc79ff990 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -1,13 +1,13 @@ use std::collections::BTreeMap; use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; -use crate::contract::chunk_payments::{ChunkPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use crate::contract::chunk_payments::{DataPayments, MAX_TRANSFERS_PER_TRANSACTION}; use crate::contract::network_token::NetworkToken; use crate::contract::{chunk_payments, network_token}; use crate::Network; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; use alloy::providers::fillers::{ - ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, RecommendedFiller, WalletFiller, + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, }; use alloy::providers::{Identity, Provider, ProviderBuilder, ReqwestProvider}; use alloy::rpc::types::TransactionRequest; @@ -123,7 +123,10 @@ fn from_private_key(private_key: &str) -> Result { fn http_provider( rpc_url: reqwest::Url, ) -> FillProvider< - JoinFill, NonceFiller>, ChainIdFiller>, + JoinFill< + Identity, + JoinFill>>, + >, ReqwestProvider, Http, Ethereum, @@ -133,11 +136,18 @@ fn http_provider( .on_http(rpc_url) } +#[allow(clippy::type_complexity)] fn http_provider_with_wallet( rpc_url: reqwest::Url, wallet: EthereumWallet, ) -> FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, @@ -241,7 +251,7 @@ pub async fn pay_for_quotes>( .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); - let chunk_payments = ChunkPayments::new(*network.chunk_payments_address(), provider); + let chunk_payments = DataPayments::new(*network.chunk_payments_address(), provider); // Divide transfers over multiple transactions if they exceed the max per transaction. let chunks = payments.chunks(MAX_TRANSFERS_PER_TRANSACTION); diff --git a/evmlib/tests/chunk_payments.rs b/evmlib/tests/chunk_payments.rs index 244cceab43..e101e57e15 100644 --- a/evmlib/tests/chunk_payments.rs +++ b/evmlib/tests/chunk_payments.rs @@ -5,12 +5,14 @@ use alloy::network::{Ethereum, EthereumWallet}; use alloy::node_bindings::AnvilInstance; use alloy::primitives::utils::parse_ether; use alloy::providers::ext::AnvilApi; -use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; -use alloy::providers::{ProviderBuilder, ReqwestProvider, WalletProvider}; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider, WalletProvider}; use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{Client, Http}; use evmlib::common::U256; -use evmlib::contract::chunk_payments::{ChunkPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use evmlib::contract::chunk_payments::{DataPayments, MAX_TRANSFERS_PER_TRANSACTION}; use evmlib::contract::network_token::NetworkToken; use evmlib::testnet::{deploy_chunk_payments_contract, deploy_network_token_contract, start_node}; use evmlib::wallet::wallet_address; @@ -20,17 +22,35 @@ async fn setup() -> ( NetworkToken< Http, FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, >, Ethereum, >, - ChunkPayments< + DataPayments< Http, FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, @@ -54,7 +74,13 @@ async fn setup() -> ( async fn provider_with_gas_funded_wallet( anvil: &AnvilInstance, ) -> FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill>>, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, diff --git a/evmlib/tests/network_token.rs b/evmlib/tests/network_token.rs index 4f7a521abd..40ea9ba041 100644 --- a/evmlib/tests/network_token.rs +++ b/evmlib/tests/network_token.rs @@ -3,8 +3,10 @@ mod common; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet}; use alloy::node_bindings::AnvilInstance; use alloy::primitives::U256; -use alloy::providers::fillers::{FillProvider, JoinFill, RecommendedFiller, WalletFiller}; -use alloy::providers::{ReqwestProvider, WalletProvider}; +use alloy::providers::fillers::{ + BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller, +}; +use alloy::providers::{Identity, ReqwestProvider, WalletProvider}; use alloy::signers::local::PrivateKeySigner; use alloy::transports::http::{Client, Http}; use evmlib::contract::network_token::NetworkToken; @@ -17,7 +19,16 @@ async fn setup() -> ( NetworkToken< Http, FillProvider< - JoinFill>, + JoinFill< + JoinFill< + Identity, + JoinFill< + GasFiller, + JoinFill>, + >, + >, + WalletFiller, + >, ReqwestProvider, Http, Ethereum, From fa5c064b2f6b61b41d09f6bf46c87336cb294629 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 15:07:10 +0530 Subject: [PATCH 110/255] chore: use port 4343 for the local anvil node --- evmlib/src/testnet.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 06e5da1361..e320c52795 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -60,6 +60,7 @@ pub fn start_node() -> AnvilInstance { // Spin up a local Anvil node. // Requires you to have Foundry installed: https://book.getfoundry.sh/getting-started/installation Anvil::new() + .port(4343_u16) .try_spawn() .expect("Could not spawn Anvil node") } From 9615c0f3b4de3d00b6fab3dcc292442297fa6b34 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 16:23:13 +0530 Subject: [PATCH 111/255] feat: make use of local EVM_NETWROK variable to fetch local network details --- Cargo.lock | 5 +- autonomi/tests/wallet.rs | 9 ++- evm_testnet/Cargo.toml | 1 + evm_testnet/src/main.rs | 103 ++++++++++++++++++++++++--------- evmlib/Cargo.toml | 2 + evmlib/src/lib.rs | 3 + evmlib/src/utils.rs | 66 ++++++++++++++++++--- sn_node/tests/common/client.rs | 15 +---- sn_node_manager/src/local.rs | 10 ++-- test_utils/Cargo.toml | 2 - test_utils/src/evm.rs | 39 +------------ 11 files changed, 160 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af208196a3..6110e42b18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2736,6 +2736,7 @@ name = "evm_testnet" version = "0.1.0" dependencies = [ "clap", + "dirs-next", "evmlib", "tokio", ] @@ -2745,11 +2746,13 @@ name = "evmlib" version = "0.1.0" dependencies = [ "alloy", + "dirs-next", "getrandom 0.2.15", "rand 0.8.5", "serde", "thiserror", "tokio", + "tracing", ] [[package]] @@ -8729,9 +8732,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" name = "test_utils" version = "0.4.6" dependencies = [ - "autonomi", "color-eyre", - "const-hex", "dirs-next", "evmlib", "libp2p 0.54.1", diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index faf24109c7..d8245c7457 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -2,13 +2,15 @@ mod common; use const_hex::traits::FromHex; use evmlib::common::{Address, Amount}; +use evmlib::utils::evm_network_from_env; use evmlib::wallet::Wallet; -use test_utils::evm::{evm_network_from_env, get_funded_wallet}; +use test_utils::evm::get_funded_wallet; #[tokio::test] async fn from_private_key() { let private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa"; - let network = evm_network_from_env(); + let network = + evm_network_from_env().expect("Could not get EVM network from environment variables"); let wallet = Wallet::new_from_private_key(network, private_key).unwrap(); assert_eq!( @@ -19,7 +21,8 @@ async fn from_private_key() { #[tokio::test] async fn send_tokens() { - let network = evm_network_from_env(); + let network = + evm_network_from_env().expect("Could not get EVM network from environment variables"); let wallet = get_funded_wallet(); let receiving_wallet = Wallet::new_with_random_wallet(network); diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 0bea2cd685..0828dfe6dd 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -10,6 +10,7 @@ version = "0.1.0" [dependencies] clap = { version = "4.5", features = ["derive"] } +dirs-next = "~2.0.0" evmlib = { path = "../evmlib" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index 8cef9ceb98..b0f50226cc 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -39,9 +39,12 @@ async fn start_node(genesis_wallet: Option
) { transfer_funds(&testnet, genesis).await; } - print_testnet_details(&testnet, genesis_wallet).await; + let testnet_data = TestnetData::new(&testnet, genesis_wallet).await; + testnet_data.save_csv(); + testnet_data.print(); keep_alive(testnet).await; + TestnetData::remove_csv(); println!("Ethereum node stopped."); } @@ -71,35 +74,83 @@ async fn transfer_funds(testnet: &Testnet, genesis_wallet: Address) { .await; } -async fn print_testnet_details(testnet: &Testnet, genesis_wallet: Option
) { - let network = testnet.to_network(); +async fn keep_alive(variable: T) { + let _ = tokio::signal::ctrl_c().await; + println!("Received Ctrl-C, stopping..."); + drop(variable); +} - println!("RPC URL: {}", network.rpc_url()); - println!("Payment token address: {}", network.payment_token_address()); - println!( - "Chunk payments address: {}", - network.chunk_payments_address() - ); - println!( - "Deployer wallet private key: {}", - testnet.default_wallet_private_key() - ); +#[derive(Debug)] +struct TestnetData { + rpc_url: String, + payment_token_address: String, + chunk_payments_address: String, + deployer_wallet_private_key: String, + tokens_and_gas: Option<(Amount, Amount)>, +} - if let Some(genesis) = genesis_wallet { - let tokens = balance_of_tokens(genesis, &network) - .await - .unwrap_or(Amount::MIN); +impl TestnetData { + async fn new(testnet: &Testnet, genesis_wallet: Option
) -> Self { + let network = testnet.to_network(); + + let tokens_and_gas = if let Some(genesis) = genesis_wallet { + let tokens = balance_of_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + + let gas = balance_of_gas_tokens(genesis, &network) + .await + .unwrap_or(Amount::MIN); + Some((tokens, gas)) + } else { + None + }; + Self { + rpc_url: network.rpc_url().to_string(), + payment_token_address: network.payment_token_address().to_string(), + chunk_payments_address: network.chunk_payments_address().to_string(), + deployer_wallet_private_key: testnet.default_wallet_private_key(), + tokens_and_gas, + } + } - let gas = balance_of_gas_tokens(genesis, &network) - .await - .unwrap_or(Amount::MIN); + fn print(&self) { + println!("RPC URL: {}", self.rpc_url); + println!("Payment token address: {}", self.payment_token_address); + println!("Chunk payments address: {}", self.chunk_payments_address); + println!( + "Deployer wallet private key: {}", + self.deployer_wallet_private_key + ); + if let Some((tokens, gas)) = self.tokens_and_gas { + println!("Genesis wallet balance (atto): (tokens: {tokens}, gas: {gas})"); + } + } - println!("Genesis wallet balance (atto): (tokens: {tokens}, gas: {gas})"); + fn save_csv(&self) { + let path = dirs_next::data_dir() + .expect("Could not get data_dir to save evm testnet data") + .join("safe"); + if !path.exists() { + std::fs::create_dir_all(&path).expect("Could not create safe directory"); + } + let path = path.join("evm_testnet_data.csv"); + + let csv = format!( + "{},{},{}", + self.rpc_url, self.payment_token_address, self.chunk_payments_address + ); + std::fs::write(&path, csv).expect("Could not write to evm_testnet_data.csv file"); + println!("EVM testnet data saved to: {path:?}"); } -} -async fn keep_alive(variable: T) { - let _ = tokio::signal::ctrl_c().await; - println!("Received Ctrl-C, stopping..."); - drop(variable); + fn remove_csv() { + let path = dirs_next::data_dir() + .expect("Could not get data_dir to remove evm testnet data") + .join("safe") + .join("evm_testnet_data.csv"); + if path.exists() { + std::fs::remove_file(&path).expect("Could not remove evm_testnet_data.csv file"); + } + } } diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 14880f3455..a6563e6ddd 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -10,8 +10,10 @@ version = "0.1.0" [dependencies] alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } +dirs-next = "~2.0.0" serde = "1.0" thiserror = "1.0" +tracing = { version = "~0.1.26" } tokio = "1.38.0" rand = "0.8.5" diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index cd853bbb96..9f29044916 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -5,6 +5,9 @@ use alloy::transports::http::reqwest; use std::str::FromStr; use std::sync::LazyLock; +#[macro_use] +extern crate tracing; + pub mod common; pub mod contract; pub mod cryptography; diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 8fde529508..6af4fd61e6 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -1,8 +1,16 @@ use crate::common::{Address, Hash}; use crate::{CustomNetwork, Network}; +use dirs_next::data_dir; use rand::Rng; use std::env; -use std::env::VarError; + +pub const EVM_TESTNET_CSV_FILENAME: &str = "evm_testnet_data.csv"; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Failed to get EVM network")] + FailedToGetEvmNetwork, +} /// Generate a random Address. pub fn dummy_address() -> Address { @@ -15,17 +23,59 @@ pub fn dummy_hash() -> Hash { } /// Get the `Network` from environment variables -pub fn evm_network_from_env() -> Result { - const EVM_VARS: [&str; 3] = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "CHUNK_PAYMENTS_ADDRESS"]; - let custom_vars_exist = EVM_VARS.iter().all(|var| env::var(var).is_ok()); +pub fn evm_network_from_env() -> Result { + let evm_vars = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "CHUNK_PAYMENTS_ADDRESS"] + .iter() + .map(|var| env::var(var).map_err(|_| Error::FailedToGetEvmNetwork)) + .collect::, Error>>(); - if custom_vars_exist { + let use_local_evm = std::env::var("EVM_NETWORK") + .map(|v| v == "local") + .unwrap_or(false); + let use_arbitrum_one = std::env::var("EVM_NETWORK") + .map(|v| v == "arbitrum-one") + .unwrap_or(false); + + if use_arbitrum_one { + Ok(Network::ArbitrumOne) + } else if use_local_evm { + local_evm_network_from_csv() + } else if let Ok(evm_vars) = evm_vars { Ok(Network::Custom(CustomNetwork::new( - &env::var(EVM_VARS[0])?, - &env::var(EVM_VARS[1])?, - &env::var(EVM_VARS[2])?, + &evm_vars[0], + &evm_vars[1], + &evm_vars[2], ))) } else { Ok(Network::ArbitrumOne) } } + +/// Get the `Network::Custom` from the local EVM testnet CSV file +pub fn local_evm_network_from_csv() -> Result { + // load the csv + let csv_path = data_dir() + .ok_or(Error::FailedToGetEvmNetwork) + .inspect_err(|_| error!("Failed to get data dir when fetching evm testnet CSV file"))? + .join("safe") + .join(EVM_TESTNET_CSV_FILENAME); + + if !csv_path.exists() { + error!("evm data csv path does not exist {:?}", csv_path); + return Err(Error::FailedToGetEvmNetwork); + } + + let csv = std::fs::read_to_string(&csv_path) + .map_err(|_| Error::FailedToGetEvmNetwork) + .inspect_err(|_| error!("Failed to read evm testnet CSV file"))?; + let parts: Vec<&str> = csv.split(',').collect(); + match parts.as_slice() { + [rpc_url, payment_token_address, chunk_payments_address] => Ok(Network::Custom( + CustomNetwork::new(rpc_url, payment_token_address, chunk_payments_address), + )), + _ => { + error!("Invalid data in evm testnet CSV file"); + Err(Error::FailedToGetEvmNetwork) + } + } +} diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index 9e9336f492..849da14332 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -11,9 +11,9 @@ use eyre::Result; use sn_peers_acquisition::parse_peer_addr; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; use sn_service_management::{get_local_node_registry_path, NodeRegistry}; -use std::env; use std::{net::SocketAddr, path::Path}; -use test_utils::{evm::evm_network_from_env, testnet::DeploymentInventory}; +use test_utils::evm::get_funded_wallet; +use test_utils::testnet::DeploymentInventory; use tokio::sync::Mutex; use tonic::Request; use tracing::{debug, info}; @@ -147,16 +147,7 @@ impl LocalNetwork { } fn get_funded_wallet() -> evmlib::wallet::Wallet { - let network = evm_network_from_env(); - // Default deployer wallet of the testnet. - const DEFAULT_WALLET_PRIVATE_KEY: &str = - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - - let private_key = - env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); - - evmlib::wallet::Wallet::new_from_private_key(network, &private_key) - .expect("Invalid private key") + get_funded_wallet() } // Restart a local node by sending in the SafenodeRpcCmd::Restart to the node's RPC endpoint. diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ed668c4b38..32442dd37d 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -7,6 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::add_services::config::PortRange; +#[cfg(feature = "faucet")] +use crate::helpers::get_username; use crate::helpers::{ check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; @@ -16,19 +18,15 @@ use colored::Colorize; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; #[cfg(test)] use mockall::automock; - use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; +#[cfg(feature = "faucet")] +use sn_service_management::FaucetServiceData; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; - -#[cfg(feature = "faucet")] -use crate::helpers::get_username; -#[cfg(feature = "faucet")] -use sn_service_management::FaucetServiceData; #[cfg(feature = "faucet")] use sn_transfers::get_faucet_data_dir; diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 2944073de5..b84708a395 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -11,9 +11,7 @@ version = "0.4.6" [dependencies] -autonomi = { path ="../autonomi", version = "0.1" } color-eyre = "~0.6.2" -const-hex = "1.12.0" dirs-next = "~2.0.0" evmlib = { path = "../evmlib", version = "0.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 4ec41ca5d2..f9025a8cf3 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -6,45 +6,12 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use const_hex::ToHexExt; -use evmlib::CustomNetwork; +use evmlib::utils::evm_network_from_env; use std::env; -fn get_var_or_panic(var: &str) -> String { - env::var(var).unwrap_or_else(|_| panic!("{var} environment variable needs to be set")) -} - -pub fn evm_network_from_env() -> evmlib::Network { - let evm_network = env::var("EVM_NETWORK").ok(); - let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); - - let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { - ( - evmlib::Network::ArbitrumOne.rpc_url().to_string(), - evmlib::Network::ArbitrumOne - .payment_token_address() - .encode_hex_with_prefix(), - evmlib::Network::ArbitrumOne - .chunk_payments_address() - .encode_hex_with_prefix(), - ) - } else { - ( - get_var_or_panic("RPC_URL"), - get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), - get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), - ) - }; - - evmlib::Network::Custom(CustomNetwork::new( - &rpc_url, - &payment_token_address, - &chunk_payments_address, - )) -} - pub fn get_funded_wallet() -> evmlib::wallet::Wallet { - let network = evm_network_from_env(); + let network = + evm_network_from_env().expect("Failed to get EVM network from environment variables"); // Default deployer wallet of the testnet. const DEFAULT_WALLET_PRIVATE_KEY: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; From ecef90fc7fa07ce424b2abcfd0c2201ac5758c03 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 17:15:02 +0530 Subject: [PATCH 112/255] feat: implement evm-local flag for safenodeman --- autonomi/README.md | 10 +++--- sn_logging/src/layers.rs | 6 +++- sn_node_manager/src/bin/cli/main.rs | 16 ++++++++-- .../src/bin/cli/subcommands/evm_network.rs | 32 +++++++++++++++---- .../src/bin/cli/subcommands/mod.rs | 8 +++++ 5 files changed, 56 insertions(+), 16 deletions(-) diff --git a/autonomi/README.md b/autonomi/README.md index 2603768aea..c67b19001e 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -26,20 +26,18 @@ autonomi = { path = "../autonomi", version = "0.1.0" } cargo run --bin evm_testnet ``` -Take note of the console output for the next step (`RPC URL`, `Payment token address` & `Chunk payments address`). - -3. Run a local network with the `local-discovery` feature and pass the EVM params: +3. Run a local network with the `local-discovery` feature and use the local evm node. ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --chunk-payments-address +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-local ``` 4. Then run the tests with the `local` feature and pass the EVM params again: ```sh -$ RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local +$ EVM_NETWORK=local cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture ``` ### Using a live testnet or mainnet diff --git a/sn_logging/src/layers.rs b/sn_logging/src/layers.rs index 4fbd3c07ea..91f771e6b9 100644 --- a/sn_logging/src/layers.rs +++ b/sn_logging/src/layers.rs @@ -266,6 +266,8 @@ fn get_logging_targets(logging_env_value: &str) -> Result> if contains_keyword_all_sn_logs || contains_keyword_verbose_sn_logs { let mut t = BTreeMap::from_iter(vec![ // bins + ("autonomi_cli".to_string(), Level::TRACE), + ("evm_testnet".to_string(), Level::TRACE), ("faucet".to_string(), Level::TRACE), ("safenode".to_string(), Level::TRACE), ("safenode_rpc_client".to_string(), Level::TRACE), @@ -273,8 +275,10 @@ fn get_logging_targets(logging_env_value: &str) -> Result> ("safenode_manager".to_string(), Level::TRACE), ("safenodemand".to_string(), Level::TRACE), // libs - ("sn_build_info".to_string(), Level::TRACE), ("autonomi".to_string(), Level::TRACE), + ("evmlib".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), + ("sn_build_info".to_string(), Level::TRACE), ("sn_client".to_string(), Level::TRACE), ("sn_faucet".to_string(), Level::TRACE), ("sn_logging".to_string(), Level::TRACE), diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index e1cf5faf6c..3e2598e676 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -1223,6 +1223,11 @@ async fn main() -> Result<()> { evm_network, skip_validation: _, } => { + let evm_network = if let Some(evm_network) = evm_network { + Some(evm_network.try_into()?) + } else { + None + }; cmd::local::join( build, count, @@ -1240,7 +1245,7 @@ async fn main() -> Result<()> { peers, rpc_port, rewards_address, - evm_network.map(|v| v.into()), + evm_network, true, verbosity, ) @@ -1267,6 +1272,11 @@ async fn main() -> Result<()> { evm_network, skip_validation: _, } => { + let evm_network = if let Some(evm_network) = evm_network { + Some(evm_network.try_into()?) + } else { + None + }; cmd::local::run( build, clean, @@ -1284,7 +1294,7 @@ async fn main() -> Result<()> { owner_prefix, rpc_port, rewards_address, - evm_network.map(|v| v.into()), + evm_network, true, verbosity, ) @@ -1368,6 +1378,8 @@ async fn main() -> Result<()> { fn get_log_builder(level: Level) -> Result { let logging_targets = vec![ + ("evmlib".to_string(), level), + ("evm_testnet".to_string(), level), ("sn_peers_acquisition".to_string(), level), ("sn_node_manager".to_string(), level), ("safenode_manager".to_string(), level), diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs index 89c39a16f6..81e3535ab5 100644 --- a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -1,7 +1,17 @@ +// Copyright (C) 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use clap::Subcommand; -use sn_evm::{EvmNetwork, EvmNetworkCustom}; +use color_eyre::eyre::Result; +use sn_evm::{utils::local_evm_network_from_csv, EvmNetwork, EvmNetworkCustom}; #[derive(Subcommand, Clone, Debug)] +#[allow(clippy::enum_variant_names)] pub enum EvmNetworkCommand { /// Use the Arbitrum One network EvmArbitrumOne, @@ -20,22 +30,30 @@ pub enum EvmNetworkCommand { #[arg(long, short)] chunk_payments_address: String, }, + + /// Use the local EVM testnet, loaded from a CSV file. + EvmLocal, } -#[allow(clippy::from_over_into)] -impl Into for EvmNetworkCommand { - fn into(self) -> EvmNetwork { +impl TryInto for EvmNetworkCommand { + type Error = color_eyre::eyre::Error; + + fn try_into(self) -> Result { match self { - Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmArbitrumOne => Ok(EvmNetwork::ArbitrumOne), + Self::EvmLocal => { + let network = local_evm_network_from_csv()?; + Ok(network) + } Self::EvmCustom { rpc_url, payment_token_address, chunk_payments_address, - } => EvmNetwork::Custom(EvmNetworkCustom::new( + } => Ok(EvmNetwork::Custom(EvmNetworkCustom::new( &rpc_url, &payment_token_address, &chunk_payments_address, - )), + ))), } } } diff --git a/sn_node_manager/src/bin/cli/subcommands/mod.rs b/sn_node_manager/src/bin/cli/subcommands/mod.rs index 80b95f1ea5..7bc6eae583 100644 --- a/sn_node_manager/src/bin/cli/subcommands/mod.rs +++ b/sn_node_manager/src/bin/cli/subcommands/mod.rs @@ -1 +1,9 @@ +// Copyright (C) 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + pub mod evm_network; From 1ffcfc151a016cf7c8e02e3d0135db68b4b466e7 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 1 Oct 2024 16:29:31 +0530 Subject: [PATCH 113/255] feat(metrics): add close group shunned metrics --- sn_networking/src/driver.rs | 11 ++ sn_networking/src/event/mod.rs | 36 ++++- sn_networking/src/metrics/bad_node.rs | 212 ++++++++++++++++++++++---- sn_networking/src/metrics/mod.rs | 68 +++++++-- 4 files changed, 278 insertions(+), 49 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 823db7845e..9952ea671a 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -665,6 +665,8 @@ impl NetworkBuilder { local: self.local, is_client, is_behind_home_network: self.is_behind_home_network, + #[cfg(feature = "open-metrics")] + close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, bootstrap, relay_manager, @@ -715,6 +717,8 @@ pub struct SwarmDriver { pub(crate) local: bool, pub(crate) is_client: bool, pub(crate) is_behind_home_network: bool, + #[cfg(feature = "open-metrics")] + pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, pub(crate) bootstrap: ContinuousBootstrap, pub(crate) external_address_manager: ExternalAddressManager, @@ -991,6 +995,13 @@ impl SwarmDriver { metrics_recorder.record_from_marker(marker) } } + #[cfg(feature = "open-metrics")] + /// Updates metrics that rely on our current close group. + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + if let Some(metrics_recorder) = self.metrics_recorder.as_ref() { + metrics_recorder.record_change_in_close_group(new_close_group); + } + } /// Listen on the provided address. Also records it within RelayManager pub(crate) fn listen_on(&mut self, addr: Multiaddr) -> Result<()> { diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index ede545ae9e..7ad5db07c7 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -23,11 +23,11 @@ use libp2p::{ use sn_protocol::{ messages::{Query, Request, Response}, - NetworkAddress, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use sn_transfers::PaymentQuote; use std::{ - collections::BTreeSet, + collections::{BTreeSet, HashSet}, fmt::{Debug, Formatter}, }; use tokio::sync::oneshot; @@ -216,6 +216,28 @@ impl Debug for NetworkEvent { } impl SwarmDriver { + /// Check for changes in our close group + #[cfg(feature = "open-metrics")] + pub(crate) fn check_for_change_in_our_close_group(&mut self) { + // this includes self + let closest_k_peers = self.get_closest_k_value_local_peers(); + + let new_closest_peers: Vec<_> = + closest_k_peers.into_iter().take(CLOSE_GROUP_SIZE).collect(); + + let old = self.close_group.iter().cloned().collect::>(); + let new_members: Vec<_> = new_closest_peers + .iter() + .filter(|p| !old.contains(p)) + .collect(); + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}"); + debug!("New close group: {new_closest_peers:?}"); + self.close_group = new_closest_peers.clone(); + self.record_change_in_close_group(new_closest_peers); + } + } + /// Update state on addition of a peer to the routing table. pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_add(1); @@ -226,6 +248,11 @@ impl SwarmDriver { self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = &self.metrics_recorder { metrics_recorder @@ -244,6 +271,11 @@ impl SwarmDriver { self.log_kbuckets(&removed_peer); self.send_event(NetworkEvent::PeerRemoved(removed_peer, self.peers_in_rt)); + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = &self.metrics_recorder { metrics_recorder diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index 578ba25cce..2499c1862e 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -7,20 +7,45 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::target_arch::interval; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::{family::Family, gauge::Gauge}; -use std::time::{Duration, Instant}; +use libp2p::PeerId; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{family::Family, gauge::Gauge}, +}; +use sn_protocol::CLOSE_GROUP_SIZE; +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; use strum::IntoEnumIterator; const UPDATE_INTERVAL: Duration = Duration::from_secs(20); +pub struct BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames, + shunned_by_close_group: Gauge, + shunned_by_old_close_group: Gauge, + + // trackers + close_group_peers: Vec, + old_close_group_peers: Vec<(PeerId, Instant)>, + // The close group peer that shunned us + close_group_peers_that_have_shunned_us: HashSet, + old_close_group_peers_that_have_shunned_us: HashSet, +} + +pub enum BadNodeMetricsMsg { + ShunnedByPeer(PeerId), + CloseGroupUpdated(Vec), +} + /// A struct to record the the number of reports against our node across different time frames. -pub struct ShunnedCountAcrossTimeFrames { +struct ShunnedCountAcrossTimeFrames { metric: Family, - tracked_values: Vec, + shunned_report_tracker: Vec, } -struct TrackedValue { +struct ShunnedReportTracker { time: Instant, least_bucket_it_fits_in: TimeFrameType, } @@ -77,28 +102,74 @@ impl TimeFrameType { } } -impl ShunnedCountAcrossTimeFrames { +impl BadNodeMetrics { pub fn spawn_background_task( time_based_shunned_count: Family, - ) -> tokio::sync::mpsc::Sender<()> { - let (tx, mut rx) = tokio::sync::mpsc::channel(10); + shunned_by_close_group: Gauge, + shunned_by_old_close_group: Gauge, + ) -> tokio::sync::mpsc::Sender { + let mut bad_node_metrics = BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames { + metric: time_based_shunned_count, + shunned_report_tracker: Vec::new(), + }, + shunned_by_close_group, + shunned_by_old_close_group, + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + }; + let (tx, mut rx) = tokio::sync::mpsc::channel(10); tokio::spawn(async move { - let mut shunned_metrics = ShunnedCountAcrossTimeFrames { - metric: time_based_shunned_count, - tracked_values: Vec::new(), - }; let mut update_interval = interval(UPDATE_INTERVAL); update_interval.tick().await; loop { tokio::select! { - _ = rx.recv() => { - shunned_metrics.record_shunned_metric(); + msg = rx.recv() => { + match msg { + Some(BadNodeMetricsMsg::ShunnedByPeer(peer)) => { + bad_node_metrics.shunned_count_across_time_frames.record_shunned_metric(); + + // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before + if bad_node_metrics.close_group_peers.contains(&peer) { + if !bad_node_metrics + .close_group_peers_that_have_shunned_us + .contains(&peer) + { + bad_node_metrics.shunned_by_close_group.inc(); + bad_node_metrics + .close_group_peers_that_have_shunned_us + .insert(peer); + } + } else if bad_node_metrics + .old_close_group_peers + .iter() + .any(|(p, _)| p == &peer) + && !bad_node_metrics + .old_close_group_peers_that_have_shunned_us + .contains(&peer) + { + bad_node_metrics.shunned_by_old_close_group.inc(); + bad_node_metrics + .old_close_group_peers_that_have_shunned_us + .insert(peer); + } + + } + Some(BadNodeMetricsMsg::CloseGroupUpdated(new_closest_peers)) => { + bad_node_metrics.update_close_group_peers(new_closest_peers); + } + None => break, + } + } _ = update_interval.tick() => { - shunned_metrics.update(); + bad_node_metrics.shunned_count_across_time_frames.try_update(); } } } @@ -106,9 +177,84 @@ impl ShunnedCountAcrossTimeFrames { tx } - pub fn record_shunned_metric(&mut self) { + pub(crate) fn update_close_group_peers(&mut self, new_closest_peers: Vec) { + let new_members: Vec = new_closest_peers + .iter() + .filter(|p| !self.close_group_peers.contains(p)) + .cloned() + .collect(); + let evicted_members: Vec = self + .close_group_peers + .iter() + .filter(|p| !new_closest_peers.contains(p)) + .cloned() + .collect(); + for new_member in &new_members { + // if it has shunned us before, update the metrics. + if self + .old_close_group_peers_that_have_shunned_us + .contains(new_member) + { + self.shunned_by_old_close_group.dec(); + self.old_close_group_peers_that_have_shunned_us + .remove(new_member); + + self.shunned_by_close_group.inc(); + self.close_group_peers_that_have_shunned_us + .insert(*new_member); + } + } + + for evicted_member in &evicted_members { + self.old_close_group_peers + .push((*evicted_member, Instant::now())); + + // if it has shunned us before, update the metrics. + if self + .close_group_peers_that_have_shunned_us + .contains(evicted_member) + { + self.shunned_by_close_group.dec(); + self.close_group_peers_that_have_shunned_us + .remove(evicted_member); + + self.shunned_by_old_close_group.inc(); + self.old_close_group_peers_that_have_shunned_us + .insert(*evicted_member); + } + } + + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}. The evicted members are {evicted_members:?}"); + self.close_group_peers = new_closest_peers; + + if self.old_close_group_peers.len() > 5 * CLOSE_GROUP_SIZE { + // clean the oldest Instant ones + self.old_close_group_peers + .sort_by_key(|(_, instant)| *instant); + // get the list of the peers that are about to be truncated + let truncated_peers = self.old_close_group_peers.split_off(5 * CLOSE_GROUP_SIZE); + // remove tracking for the truncated peers + for (peer, _) in truncated_peers { + if self + .old_close_group_peers_that_have_shunned_us + .remove(&peer) + { + self.shunned_by_old_close_group.dec(); + } + if self.close_group_peers_that_have_shunned_us.remove(&peer) { + self.shunned_by_close_group.dec(); + } + } + } + } + } +} + +impl ShunnedCountAcrossTimeFrames { + fn record_shunned_metric(&mut self) { let now = Instant::now(); - self.tracked_values.push(TrackedValue { + self.shunned_report_tracker.push(ShunnedReportTracker { time: now, least_bucket_it_fits_in: TimeFrameType::LastTenMinutes, }); @@ -121,11 +267,11 @@ impl ShunnedCountAcrossTimeFrames { } } - pub fn update(&mut self) { + fn try_update(&mut self) { let now = Instant::now(); let mut idx_to_remove = Vec::new(); - for (idx, tracked_value) in self.tracked_values.iter_mut().enumerate() { + for (idx, tracked_value) in self.shunned_report_tracker.iter_mut().enumerate() { let time_elapsed_since_adding = now.duration_since(tracked_value.time).as_secs(); if time_elapsed_since_adding > tracked_value.least_bucket_it_fits_in.get_duration_sec() @@ -145,7 +291,7 @@ impl ShunnedCountAcrossTimeFrames { } // remove the ones that are now indefinite for idx in idx_to_remove { - self.tracked_values.remove(idx); + self.shunned_report_tracker.remove(idx); } } } @@ -158,11 +304,11 @@ mod tests { fn update_should_move_to_next_state() -> eyre::Result<()> { let mut shunned_metrics = ShunnedCountAcrossTimeFrames { metric: Family::default(), - tracked_values: Vec::new(), + shunned_report_tracker: Vec::new(), }; shunned_metrics.record_shunned_metric(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastTenMinutes)); // all the counters should be 1 for variant in TimeFrameType::iter() { @@ -179,8 +325,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastHour)); // all the counters except LastTenMinutes should be 1 for variant in TimeFrameType::iter() { @@ -201,8 +347,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastSixHours)); // all the counters except LastTenMinutes and LastHour should be 1 for variant in TimeFrameType::iter() { @@ -223,8 +369,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastDay)); // all the counters except LastTenMinutes, LastHour and LastSixHours should be 1 for variant in TimeFrameType::iter() { @@ -248,8 +394,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastWeek)); // all the counters except LastTenMinutes, LastHour, LastSixHours and LastDay should be 1 for variant in TimeFrameType::iter() { @@ -274,8 +420,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - assert_eq!(shunned_metrics.tracked_values.len(), 0); + shunned_metrics.try_update(); + assert_eq!(shunned_metrics.shunned_report_tracker.len(), 0); // all the counters except Indefinite should be 0 for variant in TimeFrameType::iter() { let time_frame = TimeFrame { diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index 6e8fa60812..b2a701b576 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -12,11 +12,13 @@ pub mod service; #[cfg(feature = "upnp")] mod upnp; -#[cfg(feature = "open-metrics")] use crate::MetricsRegistries; use crate::{log_markers::Marker, target_arch::sleep}; -use bad_node::{ShunnedCountAcrossTimeFrames, TimeFrame}; -use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; +use bad_node::{BadNodeMetrics, BadNodeMetricsMsg, TimeFrame}; +use libp2p::{ + metrics::{Metrics as Libp2pMetrics, Recorder}, + PeerId, +}; use prometheus_client::{ metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, @@ -52,16 +54,20 @@ pub(crate) struct NetworkMetricsRecorder { // bad node metrics bad_peers_count: Counter, - #[allow(dead_code)] // This is updated by the background task - shunned_across_time_frames: Family, shunned_count: Counter, + #[allow(dead_code)] // updated by background task + shunned_count_across_time_frames: Family, + #[allow(dead_code)] + shunned_by_close_group: Gauge, + #[allow(dead_code)] + shunned_by_old_close_group: Gauge, // system info process_memory_used_mb: Gauge, process_cpu_usage_percentage: Gauge, // helpers - shunned_report_notifier: tokio::sync::mpsc::Sender<()>, + bad_nodes_notifier: tokio::sync::mpsc::Sender, } impl NetworkMetricsRecorder { @@ -181,13 +187,29 @@ impl NetworkMetricsRecorder { .extended_metrics .sub_registry_with_prefix("sn_networking"); let shunned_count_across_time_frames = Family::default(); - let shunned_report_notifier = ShunnedCountAcrossTimeFrames::spawn_background_task( + extended_metrics_sub_registry.register( + "shunned_count_across_time_frames", + "The number of times our node has been shunned by other nodes across different time frames", shunned_count_across_time_frames.clone(), ); + + let shunned_by_close_group = Gauge::default(); extended_metrics_sub_registry.register( - "shunned_count_across_time_frames", - "The number of peers that have been shunned across different time frames", + "shunned_by_close_group", + "The number of close group peers that have shunned our node", + shunned_by_close_group.clone(), + ); + + let shunned_by_old_close_group = Gauge::default(); + extended_metrics_sub_registry.register( + "shunned_by_old_close_group", + "The number of close group peers that have shunned our node. This contains the peers that were once in our close group but have since been evicted.", + shunned_by_old_close_group.clone(), + ); + let bad_nodes_notifier = BadNodeMetrics::spawn_background_task( shunned_count_across_time_frames.clone(), + shunned_by_close_group.clone(), + shunned_by_old_close_group.clone(), ); let network_metrics = Self { @@ -207,13 +229,15 @@ impl NetworkMetricsRecorder { live_time, bad_peers_count, - shunned_across_time_frames: shunned_count_across_time_frames, + shunned_count_across_time_frames, shunned_count, + shunned_by_close_group, + shunned_by_old_close_group, process_memory_used_mb, process_cpu_usage_percentage, - shunned_report_notifier, + bad_nodes_notifier, }; network_metrics.system_metrics_recorder_task(); @@ -255,11 +279,15 @@ impl NetworkMetricsRecorder { Marker::PeerConsideredAsBad { .. } => { let _ = self.bad_peers_count.inc(); } - Marker::FlaggedAsBadNode { .. } => { + Marker::FlaggedAsBadNode { flagged_by } => { let _ = self.shunned_count.inc(); - let shunned_report_notifier = self.shunned_report_notifier.clone(); + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + let flagged_by = *flagged_by; crate::target_arch::spawn(async move { - if let Err(err) = shunned_report_notifier.send(()).await { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::ShunnedByPeer(flagged_by)) + .await + { error!("Failed to send shunned report via notifier: {err:?}"); } }); @@ -281,6 +309,18 @@ impl NetworkMetricsRecorder { _ => {} } } + + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + crate::target_arch::spawn(async move { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::CloseGroupUpdated(new_close_group)) + .await + { + error!("Failed to send shunned report via notifier: {err:?}"); + } + }); + } } /// Impl the Recorder traits again for our struct. From a507bd61baa3d75663964da59bd119b1945fb3d6 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 1 Oct 2024 18:22:52 +0530 Subject: [PATCH 114/255] test(metrics): add tests for metrics based on close group shunning --- sn_networking/src/metrics/bad_node.rs | 341 +++++++++++++++++++++----- 1 file changed, 284 insertions(+), 57 deletions(-) diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index 2499c1862e..b9a6ab2877 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -21,10 +21,24 @@ use strum::IntoEnumIterator; const UPDATE_INTERVAL: Duration = Duration::from_secs(20); +#[cfg(not(test))] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = 5 * CLOSE_GROUP_SIZE; +#[cfg(test)] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = CLOSE_GROUP_SIZE + 2; + pub struct BadNodeMetrics { shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames, - shunned_by_close_group: Gauge, - shunned_by_old_close_group: Gauge, + shunned_by_close_group: ShunnedByCloseGroup, +} + +pub enum BadNodeMetricsMsg { + ShunnedByPeer(PeerId), + CloseGroupUpdated(Vec), +} + +struct ShunnedByCloseGroup { + metric_current_group: Gauge, + metric_old_group: Gauge, // trackers close_group_peers: Vec, @@ -34,11 +48,6 @@ pub struct BadNodeMetrics { old_close_group_peers_that_have_shunned_us: HashSet, } -pub enum BadNodeMetricsMsg { - ShunnedByPeer(PeerId), - CloseGroupUpdated(Vec), -} - /// A struct to record the the number of reports against our node across different time frames. struct ShunnedCountAcrossTimeFrames { metric: Family, @@ -113,13 +122,15 @@ impl BadNodeMetrics { metric: time_based_shunned_count, shunned_report_tracker: Vec::new(), }, - shunned_by_close_group, - shunned_by_old_close_group, - - close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), + shunned_by_close_group: ShunnedByCloseGroup { + metric_current_group: shunned_by_close_group, + metric_old_group: shunned_by_old_close_group, + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + }, }; let (tx, mut rx) = tokio::sync::mpsc::channel(10); @@ -133,35 +144,11 @@ impl BadNodeMetrics { match msg { Some(BadNodeMetricsMsg::ShunnedByPeer(peer)) => { bad_node_metrics.shunned_count_across_time_frames.record_shunned_metric(); - - // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before - if bad_node_metrics.close_group_peers.contains(&peer) { - if !bad_node_metrics - .close_group_peers_that_have_shunned_us - .contains(&peer) - { - bad_node_metrics.shunned_by_close_group.inc(); - bad_node_metrics - .close_group_peers_that_have_shunned_us - .insert(peer); - } - } else if bad_node_metrics - .old_close_group_peers - .iter() - .any(|(p, _)| p == &peer) - && !bad_node_metrics - .old_close_group_peers_that_have_shunned_us - .contains(&peer) - { - bad_node_metrics.shunned_by_old_close_group.inc(); - bad_node_metrics - .old_close_group_peers_that_have_shunned_us - .insert(peer); - } + bad_node_metrics.shunned_by_close_group.record_shunned_metric(peer); } Some(BadNodeMetricsMsg::CloseGroupUpdated(new_closest_peers)) => { - bad_node_metrics.update_close_group_peers(new_closest_peers); + bad_node_metrics.shunned_by_close_group.update_close_group_peers(new_closest_peers); } None => break, } @@ -169,13 +156,32 @@ impl BadNodeMetrics { } _ = update_interval.tick() => { - bad_node_metrics.shunned_count_across_time_frames.try_update(); + bad_node_metrics.shunned_count_across_time_frames.try_update_state(); } } } }); tx } +} + +impl ShunnedByCloseGroup { + pub(crate) fn record_shunned_metric(&mut self, peer: PeerId) { + // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before + if self.close_group_peers.contains(&peer) { + if !self.close_group_peers_that_have_shunned_us.contains(&peer) { + self.metric_current_group.inc(); + self.close_group_peers_that_have_shunned_us.insert(peer); + } + } else if self.old_close_group_peers.iter().any(|(p, _)| p == &peer) + && !self + .old_close_group_peers_that_have_shunned_us + .contains(&peer) + { + self.metric_old_group.inc(); + self.old_close_group_peers_that_have_shunned_us.insert(peer); + } + } pub(crate) fn update_close_group_peers(&mut self, new_closest_peers: Vec) { let new_members: Vec = new_closest_peers @@ -195,11 +201,11 @@ impl BadNodeMetrics { .old_close_group_peers_that_have_shunned_us .contains(new_member) { - self.shunned_by_old_close_group.dec(); + self.metric_old_group.dec(); self.old_close_group_peers_that_have_shunned_us .remove(new_member); - self.shunned_by_close_group.inc(); + self.metric_current_group.inc(); self.close_group_peers_that_have_shunned_us .insert(*new_member); } @@ -214,11 +220,11 @@ impl BadNodeMetrics { .close_group_peers_that_have_shunned_us .contains(evicted_member) { - self.shunned_by_close_group.dec(); + self.metric_current_group.dec(); self.close_group_peers_that_have_shunned_us .remove(evicted_member); - self.shunned_by_old_close_group.inc(); + self.metric_old_group.inc(); self.old_close_group_peers_that_have_shunned_us .insert(*evicted_member); } @@ -228,22 +234,24 @@ impl BadNodeMetrics { debug!("The close group has been updated. The new members are {new_members:?}. The evicted members are {evicted_members:?}"); self.close_group_peers = new_closest_peers; - if self.old_close_group_peers.len() > 5 * CLOSE_GROUP_SIZE { + if self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { // clean the oldest Instant ones self.old_close_group_peers - .sort_by_key(|(_, instant)| *instant); + .sort_by_key(|(_, instant)| std::cmp::Reverse(*instant)); // get the list of the peers that are about to be truncated - let truncated_peers = self.old_close_group_peers.split_off(5 * CLOSE_GROUP_SIZE); + let truncated_peers = self + .old_close_group_peers + .split_off(MAX_EVICTED_CLOSE_GROUP_PEERS); // remove tracking for the truncated peers for (peer, _) in truncated_peers { if self .old_close_group_peers_that_have_shunned_us .remove(&peer) { - self.shunned_by_old_close_group.dec(); + self.metric_old_group.dec(); } if self.close_group_peers_that_have_shunned_us.remove(&peer) { - self.shunned_by_close_group.dec(); + self.metric_current_group.dec(); } } } @@ -267,7 +275,7 @@ impl ShunnedCountAcrossTimeFrames { } } - fn try_update(&mut self) { + fn try_update_state(&mut self) { let now = Instant::now(); let mut idx_to_remove = Vec::new(); @@ -299,9 +307,10 @@ impl ShunnedCountAcrossTimeFrames { #[cfg(test)] mod tests { use super::*; + use eyre::Result; #[test] - fn update_should_move_to_next_state() -> eyre::Result<()> { + fn update_should_move_to_next_timeframe() -> Result<()> { let mut shunned_metrics = ShunnedCountAcrossTimeFrames { metric: Family::default(), shunned_report_tracker: Vec::new(), @@ -325,7 +334,7 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.try_update(); + shunned_metrics.try_update_state(); let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastHour)); // all the counters except LastTenMinutes should be 1 @@ -347,7 +356,7 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.try_update(); + shunned_metrics.try_update_state(); let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastSixHours)); // all the counters except LastTenMinutes and LastHour should be 1 @@ -369,7 +378,7 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.try_update(); + shunned_metrics.try_update_state(); let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastDay)); // all the counters except LastTenMinutes, LastHour and LastSixHours should be 1 @@ -394,7 +403,7 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.try_update(); + shunned_metrics.try_update_state(); let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastWeek)); // all the counters except LastTenMinutes, LastHour, LastSixHours and LastDay should be 1 @@ -420,7 +429,7 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.try_update(); + shunned_metrics.try_update_state(); assert_eq!(shunned_metrics.shunned_report_tracker.len(), 0); // all the counters except Indefinite should be 0 for variant in TimeFrameType::iter() { @@ -436,4 +445,222 @@ mod tests { Ok(()) } + + #[test] + fn metrics_should_not_be_updated_if_close_group_is_not_set() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + }; + + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn close_group_shunned_metric_should_be_updated_on_new_report() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + // report by a peer in the close group should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by same peer should not increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a different peer should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[1]); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a peer that is not in the close group should not increment the metric + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn change_in_close_group_should_update_the_metrics() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + let old_member = close_group_shunned.close_group_peers[0]; + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // update close group + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the peer that shunned us before should now be in the old group + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // report by the old member should not increment the metric + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // update close group with old member + close_group_shunned.update_close_group_peers(vec![ + old_member, + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the metrics of current_group and old_group should be updated + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn update_close_group_metrics_on_reaching_max_evicted_peer_count() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: Vec::new(), + close_group_peers_that_have_shunned_us: HashSet::new(), + old_close_group_peers_that_have_shunned_us: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // evict 1 members + let old_member_1 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // evict 1 members + let old_member_2 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // report by the evicted members should increment the old group metric + close_group_shunned.record_shunned_metric(old_member_1); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + close_group_shunned.record_shunned_metric(old_member_2); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict all the members + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // the metrics should still remain the same + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict 1 more members to cross the threshold + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_1 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + assert!(!close_group_shunned + .old_close_group_peers + .iter() + .any(|(p, _)| p == &old_member_1)); + assert!(close_group_shunned + .old_close_group_peers + .iter() + .any(|(p, _)| p == &old_member_2)); + + // evict 1 more member + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_2 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + assert!(!close_group_shunned + .old_close_group_peers + .iter() + .any(|(p, _)| p == &old_member_1)); + + Ok(()) + } } From fa372c54d7e488462f6248907b7e50b0392c6568 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 4 Oct 2024 18:06:49 +0530 Subject: [PATCH 115/255] chore(metrics): use vecdeque instead of sorting by time --- sn_networking/src/metrics/bad_node.rs | 47 +++++++++++---------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index b9a6ab2877..006801d300 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -14,7 +14,7 @@ use prometheus_client::{ }; use sn_protocol::CLOSE_GROUP_SIZE; use std::{ - collections::HashSet, + collections::{HashSet, VecDeque}, time::{Duration, Instant}, }; use strum::IntoEnumIterator; @@ -42,7 +42,7 @@ struct ShunnedByCloseGroup { // trackers close_group_peers: Vec, - old_close_group_peers: Vec<(PeerId, Instant)>, + old_close_group_peers: VecDeque, // The close group peer that shunned us close_group_peers_that_have_shunned_us: HashSet, old_close_group_peers_that_have_shunned_us: HashSet, @@ -127,7 +127,7 @@ impl BadNodeMetrics { metric_old_group: shunned_by_old_close_group, close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), old_close_group_peers_that_have_shunned_us: HashSet::new(), close_group_peers_that_have_shunned_us: HashSet::new(), }, @@ -173,7 +173,7 @@ impl ShunnedByCloseGroup { self.metric_current_group.inc(); self.close_group_peers_that_have_shunned_us.insert(peer); } - } else if self.old_close_group_peers.iter().any(|(p, _)| p == &peer) + } else if self.old_close_group_peers.contains(&peer) && !self .old_close_group_peers_that_have_shunned_us .contains(&peer) @@ -212,8 +212,7 @@ impl ShunnedByCloseGroup { } for evicted_member in &evicted_members { - self.old_close_group_peers - .push((*evicted_member, Instant::now())); + self.old_close_group_peers.push_back(*evicted_member); // if it has shunned us before, update the metrics. if self @@ -234,23 +233,18 @@ impl ShunnedByCloseGroup { debug!("The close group has been updated. The new members are {new_members:?}. The evicted members are {evicted_members:?}"); self.close_group_peers = new_closest_peers; - if self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { - // clean the oldest Instant ones - self.old_close_group_peers - .sort_by_key(|(_, instant)| std::cmp::Reverse(*instant)); - // get the list of the peers that are about to be truncated - let truncated_peers = self - .old_close_group_peers - .split_off(MAX_EVICTED_CLOSE_GROUP_PEERS); - // remove tracking for the truncated peers - for (peer, _) in truncated_peers { + while self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { + if let Some(removed_peer) = self.old_close_group_peers.pop_front() { if self .old_close_group_peers_that_have_shunned_us - .remove(&peer) + .remove(&removed_peer) { self.metric_old_group.dec(); } - if self.close_group_peers_that_have_shunned_us.remove(&peer) { + if self + .close_group_peers_that_have_shunned_us + .remove(&removed_peer) + { self.metric_current_group.dec(); } } @@ -453,7 +447,7 @@ mod tests { metric_old_group: Gauge::default(), close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), close_group_peers_that_have_shunned_us: HashSet::new(), old_close_group_peers_that_have_shunned_us: HashSet::new(), }; @@ -472,7 +466,7 @@ mod tests { metric_old_group: Gauge::default(), close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), close_group_peers_that_have_shunned_us: HashSet::new(), old_close_group_peers_that_have_shunned_us: HashSet::new(), }; @@ -513,7 +507,7 @@ mod tests { metric_old_group: Gauge::default(), close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), close_group_peers_that_have_shunned_us: HashSet::new(), old_close_group_peers_that_have_shunned_us: HashSet::new(), }; @@ -570,7 +564,7 @@ mod tests { metric_old_group: Gauge::default(), close_group_peers: Vec::new(), - old_close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), close_group_peers_that_have_shunned_us: HashSet::new(), old_close_group_peers_that_have_shunned_us: HashSet::new(), }; @@ -637,12 +631,10 @@ mod tests { assert_eq!(close_group_shunned.metric_old_group.get(), 1); assert!(!close_group_shunned .old_close_group_peers - .iter() - .any(|(p, _)| p == &old_member_1)); + .contains(&old_member_1)); assert!(close_group_shunned .old_close_group_peers - .iter() - .any(|(p, _)| p == &old_member_2)); + .contains(&old_member_2)); // evict 1 more member close_group_shunned.update_close_group_peers(vec![ @@ -658,8 +650,7 @@ mod tests { assert_eq!(close_group_shunned.metric_old_group.get(), 0); assert!(!close_group_shunned .old_close_group_peers - .iter() - .any(|(p, _)| p == &old_member_1)); + .contains(&old_member_1)); Ok(()) } From 72600f482f19c289e8fe4a6ce9df5e0a73e62696 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 4 Oct 2024 14:55:54 +0200 Subject: [PATCH 116/255] refactor: update chunk payments contract naming to data payments --- README.md | 2 +- autonomi/README.md | 10 ++--- evm_testnet/src/main.rs | 5 +-- .../{ChunkPayments.json => DataPayments.json} | 20 ++++----- .../error.rs | 0 .../{chunk_payments => data_payments}/mod.rs | 28 ++++++------ evmlib/src/contract/mod.rs | 2 +- evmlib/src/event.rs | 12 ++--- evmlib/src/lib.rs | 22 +++++----- evmlib/src/testnet.rs | 18 ++++---- evmlib/src/transaction.rs | 44 +++++++++---------- evmlib/src/utils.rs | 2 +- evmlib/src/wallet.rs | 16 +++---- .../{chunk_payments.rs => data_payments.rs} | 20 ++++----- evmlib/tests/wallet.rs | 14 +++--- sn_node/src/bin/safenode/subcommands.rs | 6 +-- sn_node/src/put_validation.rs | 2 +- .../src/bin/cli/subcommands/evm_network.rs | 6 +-- sn_node_manager/src/local.rs | 4 +- test_utils/src/evm.rs | 8 ++-- 20 files changed, 119 insertions(+), 122 deletions(-) rename evmlib/artifacts/{ChunkPayments.json => DataPayments.json} (80%) rename evmlib/src/contract/{chunk_payments => data_payments}/error.rs (100%) rename evmlib/src/contract/{chunk_payments => data_payments}/mod.rs (69%) rename evmlib/tests/{chunk_payments.rs => data_payments.rs} (84%) diff --git a/README.md b/README.md index 52a485c16a..54f69bd62a 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ Take note of the console output for the next step (`RPC URL`, `Payment token add `--rewards-address` _is the address where you will receive your node earnings on._ ```bash -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --chunk-payments-address +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --data-payments-address ``` 4. Verify node status:
diff --git a/autonomi/README.md b/autonomi/README.md index 2603768aea..275f973331 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -31,15 +31,15 @@ Take note of the console output for the next step (`RPC URL`, `Payment token add 3. Run a local network with the `local-discovery` feature and pass the EVM params: ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --chunk-payments-address +cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --data-payments-address ``` 4. Then run the tests with the `local` feature and pass the EVM params again: ```sh -$ RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local +$ RPC_URL= PAYMENT_TOKEN_ADDRESS= DATA_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi RPC_URL= PAYMENT_TOKEN_ADDRESS= CHUNK_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi RPC_URL= PAYMENT_TOKEN_ADDRESS= DATA_PAYMENTS_ADDRESS= cargo test --package=autonomi --features=local -- --nocapture ``` ### Using a live testnet or mainnet @@ -70,13 +70,13 @@ initialise a wallet from with almost infinite gas and payment tokens. Example: ```rust let rpc_url = "http://localhost:54370/"; let payment_token_address = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; -let chunk_payments_address = "0x8464135c8F25Da09e49BC8782676a84730C318bC"; +let data_payments_address = "0x8464135c8F25Da09e49BC8782676a84730C318bC"; let private_key = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; let network = Network::Custom(CustomNetwork::new( rpc_url, payment_token_address, -chunk_payments_address, +data_payments_address, )); let deployer_wallet = Wallet::new_from_private_key(network, private_key).unwrap(); diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index 8cef9ceb98..807bc0c40f 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -76,10 +76,7 @@ async fn print_testnet_details(testnet: &Testnet, genesis_wallet: Option
, N: Network> { +pub struct DataPaymentsHandler, N: Network> { pub contract: DataPaymentsContractInstance, } -impl DataPayments +impl DataPaymentsHandler where T: Transport + Clone, P: Provider, @@ -32,7 +32,7 @@ where /// Create a new ChunkPayments contract instance. pub fn new(contract_address: Address, provider: P) -> Self { let contract = DataPaymentsContract::new(contract_address, provider); - DataPayments { contract } + DataPaymentsHandler { contract } } /// Deploys the ChunkPayments smart contract to the network of the provider. @@ -42,7 +42,7 @@ where .await .expect("Could not deploy contract"); - DataPayments { contract } + DataPaymentsHandler { contract } } pub fn set_provider(&mut self, provider: P) { @@ -54,24 +54,24 @@ where /// Input: (quote_id, reward_address, amount). pub async fn pay_for_quotes>( &self, - chunk_payments: I, + data_payments: I, ) -> Result { - let chunk_payments: Vec = chunk_payments + let data_payments: Vec = data_payments .into_iter() - .map(|(hash, addr, amount)| ChunkPayments::ChunkPayment { - rewardAddress: addr, + .map(|(hash, addr, amount)| DataPayments::DataPayment { + rewardsAddress: addr, amount, quoteHash: hash, }) .collect(); - if chunk_payments.len() > MAX_TRANSFERS_PER_TRANSACTION { + if data_payments.len() > MAX_TRANSFERS_PER_TRANSACTION { return Err(Error::TransferLimitExceeded); } let tx_hash = self .contract - .submitChunkPayments(chunk_payments) + .submitDataPayments(data_payments) .send() .await? .watch() diff --git a/evmlib/src/contract/mod.rs b/evmlib/src/contract/mod.rs index 5afb41f09b..1a4e070efd 100644 --- a/evmlib/src/contract/mod.rs +++ b/evmlib/src/contract/mod.rs @@ -1,2 +1,2 @@ -pub mod chunk_payments; +pub mod data_payments; pub mod network_token; diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs index 9327eb98cd..65d58db0a9 100644 --- a/evmlib/src/event.rs +++ b/evmlib/src/event.rs @@ -3,8 +3,8 @@ use alloy::primitives::{b256, FixedBytes}; use alloy::rpc::types::Log; // Should be updated when the smart contract changes! -pub(crate) const CHUNK_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = - b256!("a6df5ca64d2adbcdd26949b97238efc4e97dc7e5d23012ea53f92a24f005f958"); // DevSkim: ignore DS173237 +pub(crate) const DATA_PAYMENT_EVENT_SIGNATURE: FixedBytes<32> = + b256!("f998960b1c6f0e0e89b7bbe6b6fbf3e03e6f08eee5b8430877d8adb8e149d580"); // DevSkim: ignore DS173237 #[derive(thiserror::Error, Debug)] pub enum Error { @@ -19,7 +19,7 @@ pub enum Error { /// Struct for the ChunkPaymentEvent emitted by the ChunkPayments smart contract. #[derive(Debug)] pub(crate) struct ChunkPaymentEvent { - pub reward_address: Address, + pub rewards_address: Address, pub amount: U256, pub quote_hash: Hash, } @@ -36,17 +36,17 @@ impl TryFrom for ChunkPaymentEvent { let topic0 = log.topics().first().ok_or(Error::EventSignatureMissing)?; // Verify the event signature - if topic0 != &CHUNK_PAYMENT_EVENT_SIGNATURE { + if topic0 != &DATA_PAYMENT_EVENT_SIGNATURE { return Err(Error::EventSignatureDoesNotMatch); } // Extract the data - let reward_address = Address::from_slice(&log.topics()[1][12..]); + let rewards_address = Address::from_slice(&log.topics()[1][12..]); let amount = U256::from_be_slice(&log.topics()[2][12..]); let quote_hash = Hash::from_slice(log.topics()[3].as_slice()); Ok(Self { - reward_address, + rewards_address, amount, quote_hash, }) diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index cd853bbb96..3a9d279943 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -1,5 +1,5 @@ use crate::common::{Address, QuoteHash, TxHash, U256}; -use crate::transaction::verify_chunk_payment; +use crate::transaction::verify_data_payment; use alloy::primitives::address; use alloy::transports::http::reqwest; use std::str::FromStr; @@ -24,23 +24,23 @@ const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); // Should be updated when the smart contract changes! -const ARBITRUM_ONE_CHUNK_PAYMENTS_ADDRESS: Address = - address!("708353783756C62818aCdbce914d90E0245F7319"); +const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = + address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); #[derive(Clone, Debug, PartialEq)] pub struct CustomNetwork { pub rpc_url_http: reqwest::Url, pub payment_token_address: Address, - pub chunk_payments_address: Address, + pub data_payments_address: Address, } impl CustomNetwork { - pub fn new(rpc_url: &str, payment_token_addr: &str, chunk_payments_addr: &str) -> Self { + pub fn new(rpc_url: &str, payment_token_addr: &str, data_payments_addr: &str) -> Self { Self { rpc_url_http: reqwest::Url::parse(rpc_url).expect("Invalid RPC URL"), payment_token_address: Address::from_str(payment_token_addr) .expect("Invalid payment token address"), - chunk_payments_address: Address::from_str(chunk_payments_addr) + data_payments_address: Address::from_str(data_payments_addr) .expect("Invalid chunk payments address"), } } @@ -74,14 +74,14 @@ impl Network { } } - pub fn chunk_payments_address(&self) -> &Address { + pub fn data_payments_address(&self) -> &Address { match self { - Network::ArbitrumOne => &ARBITRUM_ONE_CHUNK_PAYMENTS_ADDRESS, - Network::Custom(custom) => &custom.chunk_payments_address, + Network::ArbitrumOne => &ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS, + Network::Custom(custom) => &custom.data_payments_address, } } - pub async fn verify_chunk_payment( + pub async fn verify_data_payment( &self, tx_hash: TxHash, quote_hash: QuoteHash, @@ -89,7 +89,7 @@ impl Network { amount: U256, quote_expiration_timestamp_in_secs: u64, ) -> Result<(), transaction::Error> { - verify_chunk_payment( + verify_data_payment( self, tx_hash, quote_hash, diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 06e5da1361..3eedfe2a40 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -1,5 +1,5 @@ use crate::common::Address; -use crate::contract::chunk_payments::DataPayments; +use crate::contract::data_payments::DataPaymentsHandler; use crate::contract::network_token::NetworkToken; use crate::{CustomNetwork, Network}; use alloy::hex::ToHexExt; @@ -15,7 +15,7 @@ use alloy::transports::http::{Client, Http}; pub struct Testnet { anvil: AnvilInstance, network_token_address: Address, - chunk_payments_address: Address, + data_payments_address: Address, } impl Testnet { @@ -24,13 +24,13 @@ impl Testnet { let anvil = start_node(); let network_token = deploy_network_token_contract(&anvil).await; - let chunk_payments = - deploy_chunk_payments_contract(&anvil, *network_token.contract.address()).await; + let data_payments = + deploy_data_payments_contract(&anvil, *network_token.contract.address()).await; Testnet { anvil, network_token_address: *network_token.contract.address(), - chunk_payments_address: *chunk_payments.contract.address(), + data_payments_address: *data_payments.contract.address(), } } @@ -44,7 +44,7 @@ impl Testnet { Network::Custom(CustomNetwork { rpc_url_http: rpc_url, payment_token_address: self.network_token_address, - chunk_payments_address: self.chunk_payments_address, + data_payments_address: self.data_payments_address, }) } @@ -97,10 +97,10 @@ pub async fn deploy_network_token_contract( NetworkToken::deploy(provider).await } -pub async fn deploy_chunk_payments_contract( +pub async fn deploy_data_payments_contract( anvil: &AnvilInstance, token_address: Address, -) -> DataPayments< +) -> DataPaymentsHandler< Http, FillProvider< JoinFill< @@ -128,5 +128,5 @@ pub async fn deploy_chunk_payments_contract( .on_http(rpc_url); // Deploy the contract. - DataPayments::deploy(provider, token_address).await + DataPaymentsHandler::deploy(provider, token_address).await } diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index b74c268450..2b5c929d95 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -1,5 +1,5 @@ use crate::common::{Address, QuoteHash, TxHash, U256}; -use crate::event::{ChunkPaymentEvent, CHUNK_PAYMENT_EVENT_SIGNATURE}; +use crate::event::{ChunkPaymentEvent, DATA_PAYMENT_EVENT_SIGNATURE}; use crate::Network; use alloy::eips::BlockNumberOrTag; use alloy::primitives::FixedBytes; @@ -57,9 +57,9 @@ async fn get_transaction_logs(network: &Network, filter: Filter) -> Result = FixedBytes::left_padding_from(reward_addr.as_slice()); let filter = Filter::new() - .event_signature(CHUNK_PAYMENT_EVENT_SIGNATURE) + .event_signature(DATA_PAYMENT_EVENT_SIGNATURE) .topic1(topic1) .topic2(amount) .topic3(quote_hash) @@ -79,8 +79,8 @@ async fn get_chunk_payment_event( get_transaction_logs(network, filter).await } -/// Verify if a chunk payment is confirmed. -pub async fn verify_chunk_payment( +/// Verify if a data payment is confirmed. +pub async fn verify_data_payment( network: &Network, tx_hash: TxHash, quote_hash: QuoteHash, @@ -111,7 +111,7 @@ pub async fn verify_chunk_payment( } let logs = - get_chunk_payment_event(network, block_number, quote_hash, reward_addr, amount).await?; + get_data_payment_event(network, block_number, quote_hash, reward_addr, amount).await?; for log in logs { if log.transaction_hash != Some(tx_hash) { @@ -122,7 +122,7 @@ pub async fn verify_chunk_payment( if let Ok(event) = ChunkPaymentEvent::try_from(log) { // Check if the event matches what we expect. if event.quote_hash == quote_hash - && event.reward_address == reward_addr + && event.rewards_address == reward_addr && event.amount >= amount { return Ok(()); @@ -137,7 +137,7 @@ pub async fn verify_chunk_payment( mod tests { use crate::common::{Address, U256}; use crate::transaction::{ - get_chunk_payment_event, get_transaction_receipt_by_hash, verify_chunk_payment, + get_data_payment_event, get_transaction_receipt_by_hash, verify_data_payment, }; use crate::Network; use alloy::hex::FromHex; @@ -147,7 +147,7 @@ mod tests { async fn test_get_transaction_receipt_by_hash() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 + let tx_hash = b256!("3304465f38fa0bd9670a426108dd1ddd193e059dcb7c13982d31424646217a36"); // DevSkim: ignore DS173237 assert!(get_transaction_receipt_by_hash(&network, tx_hash) .await @@ -156,16 +156,16 @@ mod tests { } #[tokio::test] - async fn test_get_chunk_payment_event() { + async fn test_get_data_payment_event() { let network = Network::ArbitrumOne; - let block_number: u64 = 250043261; - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 - let amount = U256::from(200); - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 + let block_number: u64 = 260246302; + let reward_address = Address::from_hex("8AB15A43305854e4AE4E6FBEa0CD1CC0AB4ecB2A").unwrap(); // DevSkim: ignore DS173237 + let amount = U256::from(1); + let quote_hash = b256!("EBD943C38C0422901D4CF22E677DD95F2591CA8D6EBFEA8BAF1BFE9FF5506ECE"); // DevSkim: ignore DS173237 let logs = - get_chunk_payment_event(&network, block_number, quote_hash, reward_address, amount) + get_data_payment_event(&network, block_number, quote_hash, reward_address, amount) .await .unwrap(); @@ -173,15 +173,15 @@ mod tests { } #[tokio::test] - async fn test_verify_chunk_payment() { + async fn test_verify_data_payment() { let network = Network::ArbitrumOne; - let tx_hash = b256!("462ff33b01d7930b05dc87826b485f6f19884f1cf1c15694477be68ff7dda066"); // DevSkim: ignore DS173237 - let quote_hash = b256!("477a32ca129183ebaa7e0a082813f8f9b121a1f9ba5dd83104bae44b6e32658c"); // DevSkim: ignore DS173237 - let reward_address = Address::from_hex("fdd33ec6f2325b742c1f32ed5b1da19547cb2f30").unwrap(); // DevSkim: ignore DS173237 - let amount = U256::from(200); + let tx_hash = b256!("3304465f38fa0bd9670a426108dd1ddd193e059dcb7c13982d31424646217a36"); // DevSkim: ignore DS173237 + let quote_hash = b256!("EBD943C38C0422901D4CF22E677DD95F2591CA8D6EBFEA8BAF1BFE9FF5506ECE"); // DevSkim: ignore DS173237 + let reward_address = Address::from_hex("8AB15A43305854e4AE4E6FBEa0CD1CC0AB4ecB2A").unwrap(); // DevSkim: ignore DS173237 + let amount = U256::from(1); - let result = verify_chunk_payment( + let result = verify_data_payment( &network, tx_hash, quote_hash, diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 8fde529508..f49f609c20 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -16,7 +16,7 @@ pub fn dummy_hash() -> Hash { /// Get the `Network` from environment variables pub fn evm_network_from_env() -> Result { - const EVM_VARS: [&str; 3] = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "CHUNK_PAYMENTS_ADDRESS"]; + const EVM_VARS: [&str; 3] = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "DATA_PAYMENTS_ADDRESS"]; let custom_vars_exist = EVM_VARS.iter().all(|var| env::var(var).is_ok()); if custom_vars_exist { diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index dfc79ff990..5982de0f0c 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; -use crate::contract::chunk_payments::{DataPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use crate::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; use crate::contract::network_token::NetworkToken; -use crate::contract::{chunk_payments, network_token}; +use crate::contract::{data_payments, network_token}; use crate::Network; use alloy::network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; use alloy::providers::fillers::{ @@ -24,7 +24,7 @@ pub enum Error { #[error("Network token contract error: {0}")] NetworkTokenContract(#[from] network_token::Error), #[error("Chunk payments contract error: {0}")] - ChunkPaymentsContract(#[from] chunk_payments::error::Error), + ChunkPaymentsContract(#[from] data_payments::error::Error), } pub struct Wallet { @@ -99,9 +99,9 @@ impl Wallet { /// transaction hashes of the payments by quotes. pub async fn pay_for_quotes>( &self, - chunk_payments: I, + data_payments: I, ) -> Result, PayForQuotesError> { - pay_for_quotes(self.wallet.clone(), &self.network, chunk_payments).await + pay_for_quotes(self.wallet.clone(), &self.network, data_payments).await } } @@ -244,14 +244,14 @@ pub async fn pay_for_quotes>( approve_to_spend_tokens( wallet.clone(), network, - *network.chunk_payments_address(), + *network.data_payments_address(), total_amount, ) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); - let chunk_payments = DataPayments::new(*network.chunk_payments_address(), provider); + let data_payments = DataPaymentsHandler::new(*network.data_payments_address(), provider); // Divide transfers over multiple transactions if they exceed the max per transaction. let chunks = payments.chunks(MAX_TRANSFERS_PER_TRANSACTION); @@ -259,7 +259,7 @@ pub async fn pay_for_quotes>( for batch in chunks { let batch: Vec = batch.to_vec(); - let tx_hash = chunk_payments + let tx_hash = data_payments .pay_for_quotes(batch.clone()) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; diff --git a/evmlib/tests/chunk_payments.rs b/evmlib/tests/data_payments.rs similarity index 84% rename from evmlib/tests/chunk_payments.rs rename to evmlib/tests/data_payments.rs index e101e57e15..129e06c62f 100644 --- a/evmlib/tests/chunk_payments.rs +++ b/evmlib/tests/data_payments.rs @@ -12,9 +12,9 @@ use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider, WalletProvide use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{Client, Http}; use evmlib::common::U256; -use evmlib::contract::chunk_payments::{DataPayments, MAX_TRANSFERS_PER_TRANSACTION}; +use evmlib::contract::data_payments{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; use evmlib::contract::network_token::NetworkToken; -use evmlib::testnet::{deploy_chunk_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; use evmlib::wallet::wallet_address; async fn setup() -> ( @@ -38,7 +38,7 @@ async fn setup() -> ( >, Ethereum, >, - DataPayments< + DataPaymentsHandler< Http, FillProvider< JoinFill< @@ -62,10 +62,10 @@ async fn setup() -> ( let network_token = deploy_network_token_contract(&anvil).await; - let chunk_payments = - deploy_chunk_payments_contract(&anvil, *network_token.contract.address()).await; + let data_payments = + deploy_data_payments_contract(&anvil, *network_token.contract.address()).await; - (anvil, network_token, chunk_payments) + (anvil, network_token, data_payments) } #[allow(clippy::unwrap_used)] @@ -113,7 +113,7 @@ async fn test_deploy() { #[tokio::test] async fn test_pay_for_quotes() { - let (_anvil, network_token, mut chunk_payments) = setup().await; + let (_anvil, network_token, mut data_payments) = setup().await; let mut quote_payments = vec![]; @@ -123,15 +123,15 @@ async fn test_pay_for_quotes() { } let _ = network_token - .approve(*chunk_payments.contract.address(), U256::MAX) + .approve(*data_payments.contract.address(), U256::MAX) .await .unwrap(); // Contract provider has a different account coupled to it, // so we set it to the same as the network token contract - chunk_payments.set_provider(network_token.contract.provider().clone()); + data_payments.set_provider(network_token.contract.provider().clone()); - let result = chunk_payments.pay_for_quotes(quote_payments).await; + let result = data_payments.pay_for_quotes(quote_payments).await; assert!(result.is_ok(), "Failed with error: {:?}", result.err()); } diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs index 97531859b6..a95ee34eca 100644 --- a/evmlib/tests/wallet.rs +++ b/evmlib/tests/wallet.rs @@ -8,9 +8,9 @@ use alloy::providers::ext::AnvilApi; use alloy::providers::{ProviderBuilder, WalletProvider}; use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use evmlib::common::{Amount, TxHash}; -use evmlib::contract::chunk_payments::MAX_TRANSFERS_PER_TRANSACTION; -use evmlib::testnet::{deploy_chunk_payments_contract, deploy_network_token_contract, start_node}; -use evmlib::transaction::verify_chunk_payment; +use evmlib::contract::data_payments::MAX_TRANSFERS_PER_TRANSACTION; +use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; +use evmlib::transaction::verify_data_payment; use evmlib::wallet::{transfer_tokens, wallet_address, Wallet}; use evmlib::{CustomNetwork, Network}; use std::collections::HashSet; @@ -21,14 +21,14 @@ async fn local_testnet() -> (AnvilInstance, Network, EthereumWallet) { let rpc_url = anvil.endpoint().parse().unwrap(); let network_token = deploy_network_token_contract(&anvil).await; let payment_token_address = *network_token.contract.address(); - let chunk_payments = deploy_chunk_payments_contract(&anvil, payment_token_address).await; + let data_payments = deploy_data_payments_contract(&anvil, payment_token_address).await; ( anvil, Network::Custom(CustomNetwork { rpc_url_http: rpc_url, payment_token_address, - chunk_payments_address: *chunk_payments.contract.address(), + data_payments_address: *data_payments.contract.address(), }), network_token.contract.provider().wallet().clone(), ) @@ -65,7 +65,7 @@ async fn funded_wallet(network: &Network, genesis_wallet: EthereumWallet) -> Wal } #[tokio::test] -async fn test_pay_for_quotes_and_chunk_payment_verification() { +async fn test_pay_for_quotes_and_data_payment_verification() { const TRANSFERS: usize = 600; const EXPIRATION_TIMESTAMP_IN_SECS: u64 = 4102441200; // The year 2100 @@ -91,7 +91,7 @@ async fn test_pay_for_quotes_and_chunk_payment_verification() { for quote_payment in quote_payments.iter() { let tx_hash = *tx_hashes.get("e_payment.0).unwrap(); - let result = verify_chunk_payment( + let result = verify_data_payment( &network, tx_hash, quote_payment.0, diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs index 3faada3562..8c3c87bf77 100644 --- a/sn_node/src/bin/safenode/subcommands.rs +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -18,7 +18,7 @@ pub(crate) enum EvmNetworkCommand { /// The chunk payments contract address #[arg(long, short)] - chunk_payments_address: String, + data_payments_address: String, }, } @@ -30,11 +30,11 @@ impl Into for EvmNetworkCommand { Self::EvmCustom { rpc_url, payment_token_address, - chunk_payments_address, + data_payments_address, } => EvmNetwork::Custom(EvmNetworkCustom::new( &rpc_url, &payment_token_address, - &chunk_payments_address, + &data_payments_address, )), } } diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 149e6991e0..a93158a18f 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -608,7 +608,7 @@ impl Node { // check if payment is valid on chain debug!("Verifying payment for record {pretty_key}"); self.evm_network() - .verify_chunk_payment( + .verify_data_payment( payment.tx_hash, payment.quote.hash(), *self.reward_address(), diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs index 89c39a16f6..8237bfe371 100644 --- a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -18,7 +18,7 @@ pub enum EvmNetworkCommand { /// The chunk payments contract address #[arg(long, short)] - chunk_payments_address: String, + data_payments_address: String, }, } @@ -30,11 +30,11 @@ impl Into for EvmNetworkCommand { Self::EvmCustom { rpc_url, payment_token_address, - chunk_payments_address, + data_payments_address, } => EvmNetwork::Custom(EvmNetworkCustom::new( &rpc_url, &payment_token_address, - &chunk_payments_address, + &data_payments_address, )), } } diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ed668c4b38..0165688d1c 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -155,8 +155,8 @@ impl Launcher for LocalSafeLauncher { args.push(custom.rpc_url_http.to_string()); args.push("--payment-token-address".to_string()); args.push(custom.payment_token_address.to_string()); - args.push("--chunk-payments-address".to_string()); - args.push(custom.chunk_payments_address.to_string()); + args.push("--data-payments-address".to_string()); + args.push(custom.data_payments_address.to_string()); } } diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 4ec41ca5d2..2c1d85eb4a 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -18,28 +18,28 @@ pub fn evm_network_from_env() -> evmlib::Network { let evm_network = env::var("EVM_NETWORK").ok(); let arbitrum_flag = evm_network.as_deref() == Some("arbitrum-one"); - let (rpc_url, payment_token_address, chunk_payments_address) = if arbitrum_flag { + let (rpc_url, payment_token_address, data_payments_address) = if arbitrum_flag { ( evmlib::Network::ArbitrumOne.rpc_url().to_string(), evmlib::Network::ArbitrumOne .payment_token_address() .encode_hex_with_prefix(), evmlib::Network::ArbitrumOne - .chunk_payments_address() + .data_payments_address() .encode_hex_with_prefix(), ) } else { ( get_var_or_panic("RPC_URL"), get_var_or_panic("PAYMENT_TOKEN_ADDRESS"), - get_var_or_panic("CHUNK_PAYMENTS_ADDRESS"), + get_var_or_panic("DATA_PAYMENTS_ADDRESS"), ) }; evmlib::Network::Custom(CustomNetwork::new( &rpc_url, &payment_token_address, - &chunk_payments_address, + &data_payments_address, )) } From f050a0eff5cde77de4a1895f0c61f2b68891d942 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 4 Oct 2024 19:00:27 +0530 Subject: [PATCH 117/255] chore(metrics): use a single field to track peers that have shunned us --- sn_networking/src/metrics/bad_node.rs | 68 +++++++-------------------- 1 file changed, 16 insertions(+), 52 deletions(-) diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index 006801d300..7b64e248ec 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -43,9 +43,7 @@ struct ShunnedByCloseGroup { // trackers close_group_peers: Vec, old_close_group_peers: VecDeque, - // The close group peer that shunned us - close_group_peers_that_have_shunned_us: HashSet, - old_close_group_peers_that_have_shunned_us: HashSet, + old_new_group_shunned_list: HashSet, } /// A struct to record the the number of reports against our node across different time frames. @@ -128,8 +126,8 @@ impl BadNodeMetrics { close_group_peers: Vec::new(), old_close_group_peers: VecDeque::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), + // Shunned by old or new close group + old_new_group_shunned_list: HashSet::new(), }, }; @@ -168,18 +166,14 @@ impl BadNodeMetrics { impl ShunnedByCloseGroup { pub(crate) fn record_shunned_metric(&mut self, peer: PeerId) { // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before - if self.close_group_peers.contains(&peer) { - if !self.close_group_peers_that_have_shunned_us.contains(&peer) { + if !self.old_new_group_shunned_list.contains(&peer) { + if self.close_group_peers.contains(&peer) { self.metric_current_group.inc(); - self.close_group_peers_that_have_shunned_us.insert(peer); + self.old_new_group_shunned_list.insert(peer); + } else if self.old_close_group_peers.contains(&peer) { + self.metric_old_group.inc(); + self.old_new_group_shunned_list.insert(peer); } - } else if self.old_close_group_peers.contains(&peer) - && !self - .old_close_group_peers_that_have_shunned_us - .contains(&peer) - { - self.metric_old_group.inc(); - self.old_close_group_peers_that_have_shunned_us.insert(peer); } } @@ -197,35 +191,18 @@ impl ShunnedByCloseGroup { .collect(); for new_member in &new_members { // if it has shunned us before, update the metrics. - if self - .old_close_group_peers_that_have_shunned_us - .contains(new_member) - { + if self.old_new_group_shunned_list.contains(new_member) { self.metric_old_group.dec(); - self.old_close_group_peers_that_have_shunned_us - .remove(new_member); - self.metric_current_group.inc(); - self.close_group_peers_that_have_shunned_us - .insert(*new_member); } } for evicted_member in &evicted_members { self.old_close_group_peers.push_back(*evicted_member); - // if it has shunned us before, update the metrics. - if self - .close_group_peers_that_have_shunned_us - .contains(evicted_member) - { + if self.old_new_group_shunned_list.contains(evicted_member) { self.metric_current_group.dec(); - self.close_group_peers_that_have_shunned_us - .remove(evicted_member); - self.metric_old_group.inc(); - self.old_close_group_peers_that_have_shunned_us - .insert(*evicted_member); } } @@ -235,18 +212,9 @@ impl ShunnedByCloseGroup { while self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { if let Some(removed_peer) = self.old_close_group_peers.pop_front() { - if self - .old_close_group_peers_that_have_shunned_us - .remove(&removed_peer) - { + if self.old_new_group_shunned_list.remove(&removed_peer) { self.metric_old_group.dec(); } - if self - .close_group_peers_that_have_shunned_us - .remove(&removed_peer) - { - self.metric_current_group.dec(); - } } } } @@ -448,8 +416,7 @@ mod tests { close_group_peers: Vec::new(), old_close_group_peers: VecDeque::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), + old_new_group_shunned_list: HashSet::new(), }; close_group_shunned.record_shunned_metric(PeerId::random()); @@ -467,8 +434,7 @@ mod tests { close_group_peers: Vec::new(), old_close_group_peers: VecDeque::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), + old_new_group_shunned_list: HashSet::new(), }; close_group_shunned.update_close_group_peers(vec![ PeerId::random(), @@ -508,8 +474,7 @@ mod tests { close_group_peers: Vec::new(), old_close_group_peers: VecDeque::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), + old_new_group_shunned_list: HashSet::new(), }; close_group_shunned.update_close_group_peers(vec![ PeerId::random(), @@ -565,8 +530,7 @@ mod tests { close_group_peers: Vec::new(), old_close_group_peers: VecDeque::new(), - close_group_peers_that_have_shunned_us: HashSet::new(), - old_close_group_peers_that_have_shunned_us: HashSet::new(), + old_new_group_shunned_list: HashSet::new(), }; close_group_shunned.update_close_group_peers(vec![ PeerId::random(), From 1f98efad3075cef184a18ed604d57376cea1ec2a Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Fri, 4 Oct 2024 16:37:16 +0200 Subject: [PATCH 118/255] fix: cargo fmt --- evmlib/tests/data_payments.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/evmlib/tests/data_payments.rs b/evmlib/tests/data_payments.rs index 129e06c62f..ed9e2ac413 100644 --- a/evmlib/tests/data_payments.rs +++ b/evmlib/tests/data_payments.rs @@ -12,7 +12,7 @@ use alloy::providers::{Identity, ProviderBuilder, ReqwestProvider, WalletProvide use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{Client, Http}; use evmlib::common::U256; -use evmlib::contract::data_payments{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; +use evmlib::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; use evmlib::contract::network_token::NetworkToken; use evmlib::testnet::{deploy_data_payments_contract, deploy_network_token_contract, start_node}; use evmlib::wallet::wallet_address; From 134872f20cff3f94f1f4c305ea0e4159f4ded122 Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 7 Oct 2024 14:40:30 +0900 Subject: [PATCH 119/255] feat: registers in cli --- autonomi/src/client/data.rs | 2 +- autonomi/src/client/registers.rs | 75 ++++++++--- autonomi/tests/register.rs | 20 +-- autonomi_cli/Cargo.toml | 2 +- autonomi_cli/src/access/data_dir.rs | 19 +++ autonomi_cli/src/access/keys.rs | 103 +++++++++++++++ autonomi_cli/src/access/mod.rs | 11 ++ autonomi_cli/src/access/network.rs | 29 +++++ autonomi_cli/src/commands.rs | 61 ++++++--- autonomi_cli/src/commands/file.rs | 8 +- autonomi_cli/src/commands/register.rs | 119 +++++++++++++++--- autonomi_cli/src/main.rs | 6 +- autonomi_cli/src/utils.rs | 89 ------------- .../reactivate_examples/register_inspect.rs | 2 +- sn_node/tests/verify_data_location.rs | 15 ++- sn_registers/src/address.rs | 3 +- 16 files changed, 400 insertions(+), 164 deletions(-) create mode 100644 autonomi_cli/src/access/data_dir.rs create mode 100644 autonomi_cli/src/access/keys.rs create mode 100644 autonomi_cli/src/access/mod.rs create mode 100644 autonomi_cli/src/access/network.rs delete mode 100644 autonomi_cli/src/utils.rs diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index e0650a2ca9..99e1c23e88 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -236,7 +236,7 @@ impl Client { Ok((proofs, skipped_chunks)) } - async fn get_store_quotes( + pub(crate) async fn get_store_quotes( &self, content_addrs: impl Iterator, ) -> Result, PayError> { diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index e5e3f24866..a94e198218 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -1,8 +1,19 @@ -use std::collections::BTreeSet; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +/// Register Secret Key +pub use bls::SecretKey as RegisterSecretKey; +use sn_evm::Amount; +use sn_evm::AttoTokens; +pub use sn_registers::RegisterAddress; use crate::client::data::PayError; use crate::client::Client; -use bls::SecretKey; use bytes::Bytes; use evmlib::wallet::Wallet; use libp2p::kad::{Quorum, Record}; @@ -12,11 +23,11 @@ use sn_networking::PutRecordCfg; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; -use sn_protocol::storage::RegisterAddress; use sn_protocol::NetworkAddress; use sn_registers::Register as ClientRegister; use sn_registers::SignedRegister; use sn_registers::{EntryHash, Permissions}; +use std::collections::BTreeSet; use xor_name::XorName; #[derive(Debug, thiserror::Error)] @@ -63,11 +74,13 @@ impl Register { } impl Client { + /// Generate a new register key + pub fn register_generate_key(&self) -> RegisterSecretKey { + RegisterSecretKey::random() + } + /// Fetches a Register from the network. - pub async fn fetch_register( - &self, - address: RegisterAddress, - ) -> Result { + pub async fn register_get(&self, address: RegisterAddress) -> Result { let network_address = NetworkAddress::from_register_address(address); let key = network_address.to_record_key(); @@ -93,11 +106,11 @@ impl Client { } /// Updates a Register on the network with a new value. This will overwrite existing value(s). - pub async fn update_register( + pub async fn register_update( &self, register: Register, new_value: Bytes, - owner: SecretKey, + owner: RegisterSecretKey, ) -> Result<(), RegisterError> { // Fetch the current register let mut signed_register = register.inner; @@ -112,7 +125,7 @@ impl Client { // Write the new value to all branches let (_, op) = register - .write(new_value.to_vec(), &children, &owner) + .write(new_value.into(), &children, &owner) .map_err(RegisterError::Write)?; // Apply the operation to the register @@ -143,15 +156,49 @@ impl Client { Ok(()) } - /// Creates a new Register with an initial value and uploads it to the network. - pub async fn create_register( + /// Get the cost to create a register + pub async fn register_cost( + &self, + name: String, + owner: RegisterSecretKey, + ) -> Result { + // get register address + let pk = owner.public_key(); + let name = XorName::from_content_parts(&[name.as_bytes()]); + let permissions = Permissions::new_with([pk]); + let register = ClientRegister::new(pk, name, permissions); + let reg_xor = register.address().xorname(); + + // get cost to store register + // NB TODO: register should be priced differently from other data + let cost_map = self.get_store_quotes(std::iter::once(reg_xor)).await?; + let total_cost = AttoTokens::from_atto( + cost_map + .values() + .map(|quote| quote.2.cost.as_atto()) + .sum::(), + ); + + Ok(total_cost) + } + + /// Get the address of a register from its name and owner + pub fn register_address(&self, name: &str, owner: &RegisterSecretKey) -> RegisterAddress { + let pk = owner.public_key(); + let name = XorName::from_content_parts(&[name.as_bytes()]); + RegisterAddress::new(name, pk) + } + + /// Creates a new Register with a name and an initial value and uploads it to the network. + pub async fn register_create( &self, value: Bytes, - name: XorName, - owner: SecretKey, + name: &str, + owner: RegisterSecretKey, wallet: &Wallet, ) -> Result { let pk = owner.public_key(); + let name = XorName::from_content_parts(&[name.as_bytes()]); // Owner can write to the register. let permissions = Permissions::new_with([pk]); diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 3cee58d0d2..f03cf34a4c 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -4,10 +4,10 @@ mod common; use autonomi::Client; use bytes::Bytes; +use rand::Rng; use std::time::Duration; use test_utils::evm::get_funded_wallet; use tokio::time::sleep; -use xor_name::XorName; #[tokio::test] async fn register() { @@ -20,30 +20,30 @@ async fn register() { let key = bls::SecretKey::random(); // Create a register with the value [1, 2, 3, 4] + let rand_name: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); let register = client - .create_register( - vec![1, 2, 3, 4].into(), - XorName::random(&mut rand::thread_rng()), - key.clone(), - &wallet, - ) + .register_create(vec![1, 2, 3, 4].into(), &rand_name, key.clone(), &wallet) .await .unwrap(); sleep(Duration::from_secs(10)).await; // Fetch the register again - let register = client.fetch_register(*register.address()).await.unwrap(); + let register = client.register_get(*register.address()).await.unwrap(); // Update the register with the value [5, 6, 7, 8] client - .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) + .register_update(register.clone(), vec![5, 6, 7, 8].into(), key) .await .unwrap(); sleep(Duration::from_secs(2)).await; // Fetch and verify the register contains the updated value - let register = client.fetch_register(*register.address()).await.unwrap(); + let register = client.register_get(*register.address()).await.unwrap(); assert_eq!(register.values(), vec![Bytes::from(vec![5, 6, 7, 8])]); } diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index e779493126..b6a0678ee5 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -10,7 +10,7 @@ metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] [dependencies] -autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files"] } +autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files", "registers"] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" diff --git a/autonomi_cli/src/access/data_dir.rs b/autonomi_cli/src/access/data_dir.rs new file mode 100644 index 0000000000..af0db16c2c --- /dev/null +++ b/autonomi_cli/src/access/data_dir.rs @@ -0,0 +1,19 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use color_eyre::eyre::{eyre, Context, Result}; +use std::path::PathBuf; + +pub fn get_client_data_dir_path() -> Result { + let mut home_dirs = dirs_next::data_dir() + .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; + home_dirs.push("safe"); + home_dirs.push("client"); + std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; + Ok(home_dirs) +} diff --git a/autonomi_cli/src/access/keys.rs b/autonomi_cli/src/access/keys.rs new file mode 100644 index 0000000000..a11de06b7a --- /dev/null +++ b/autonomi_cli/src/access/keys.rs @@ -0,0 +1,103 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::client::registers::RegisterSecretKey; +use autonomi::Wallet; +use color_eyre::eyre::{Context, Result}; +use color_eyre::Section; +use std::env; +use std::fs; +use std::path::PathBuf; + +const SECRET_KEY_ENV: &str = "SECRET_KEY"; +const REGISTER_SIGNING_KEY_ENV: &str = "REGISTER_SIGNING_KEY"; + +const SECRET_KEY_FILE: &str = "secret_key"; +const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; + +/// EVM wallet +pub fn load_evm_wallet() -> Result { + let secret_key = + get_secret_key().wrap_err("The secret key is required to perform this action")?; + let network = crate::network::get_evm_network_from_environment() + .wrap_err("Failed to load EVM network")?; + let wallet = Wallet::new_from_private_key(network, &secret_key) + .wrap_err("Failed to load EVM wallet from key")?; + Ok(wallet) +} + +/// EVM wallet private key +pub fn get_secret_key() -> Result { + // try env var first + let why_env_failed = match env::var(SECRET_KEY_ENV) { + Ok(key) => return Ok(key), + Err(e) => e, + }; + + // try from data dir + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var"))?; + + // load the key from file + let key_path = dir.join(SECRET_KEY_FILE); + fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file") + .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY_ENV} env var or have the key in a file at {key_path:?}")) + .with_suggestion(|| "the secret key should be a hex encoded string of your evm wallet private key") +} + +pub fn create_register_signing_key_file(key: RegisterSecretKey) -> Result { + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err("Could not access directory to write key to")?; + let file_path = dir.join(REGISTER_SIGNING_KEY_FILE); + fs::write(&file_path, key.to_hex()).wrap_err("Could not write key to file")?; + Ok(file_path) +} + +fn parse_register_signing_key(key_hex: &str) -> Result { + RegisterSecretKey::from_hex(key_hex) + .wrap_err("Failed to parse register signing key") + .with_suggestion(|| { + "the register signing key should be a hex encoded string of a bls secret key" + }) + .with_suggestion(|| { + "you can generate a new secret key with the `register generate-key` subcommand" + }) +} + +pub fn get_register_signing_key() -> Result { + // try env var first + let why_env_failed = match env::var(REGISTER_SIGNING_KEY_ENV) { + Ok(key) => return parse_register_signing_key(&key), + Err(e) => e, + }; + + // try from data dir + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err(format!("Failed to obtain register signing key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY_ENV} env var")) + .with_suggestion(|| "you can generate a new secret key with the `register generate-key` subcommand")?; + + // load the key from file + let key_path = dir.join(REGISTER_SIGNING_KEY_FILE); + let key_hex = fs::read_to_string(&key_path) + .wrap_err("Failed to read secret key from file") + .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY_ENV} env var or have the key in a file at {key_path:?}")) + .with_suggestion(|| "you can generate a new secret key with the `register generate-key` subcommand")?; + + // parse the key + parse_register_signing_key(&key_hex) +} + +pub fn get_register_signing_key_path() -> Result { + let dir = super::data_dir::get_client_data_dir_path() + .wrap_err("Could not access directory for register signing key")?; + let file_path = dir.join(REGISTER_SIGNING_KEY_FILE); + Ok(file_path) +} diff --git a/autonomi_cli/src/access/mod.rs b/autonomi_cli/src/access/mod.rs new file mode 100644 index 0000000000..ac80eeca88 --- /dev/null +++ b/autonomi_cli/src/access/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +pub mod data_dir; +pub mod keys; +pub mod network; diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs new file mode 100644 index 0000000000..b611161bcd --- /dev/null +++ b/autonomi_cli/src/access/network.rs @@ -0,0 +1,29 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::Multiaddr; +use autonomi::Network; +use color_eyre::eyre::eyre; +use color_eyre::eyre::Context; +use color_eyre::Result; +use color_eyre::Section; +use sn_peers_acquisition::PeersArgs; + +use sn_peers_acquisition::SAFE_PEERS_ENV; + +pub async fn get_peers(peers: PeersArgs) -> Result> { + peers.get_peers().await + .wrap_err("Please provide valid Network peers to connect to") + .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) + .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") +} + +pub(crate) fn get_evm_network_from_environment() -> Result { + evmlib::utils::evm_network_from_env() + .map_err(|err| eyre!("Failed to get EVM network from environment: {err}")) +} diff --git a/autonomi_cli/src/commands.rs b/autonomi_cli/src/commands.rs index 12d4af26f1..a3bd5064a9 100644 --- a/autonomi_cli/src/commands.rs +++ b/autonomi_cli/src/commands.rs @@ -64,6 +64,14 @@ pub enum FileCmd { #[derive(Subcommand, Debug)] pub enum RegisterCmd { + /// Generate a new register key. + GenerateKey { + /// Overwrite existing key if it exists + /// Warning: overwriting the existing key will result in loss of access to any existing registers created using that key + #[arg(short, long)] + overwrite: bool, + }, + /// Estimate cost to register a name. Cost { /// The name to register. @@ -80,16 +88,26 @@ pub enum RegisterCmd { /// Edit an existing register. Edit { - /// The name of the register. - name: String, + /// Use the name of the register instead of the address + /// Note that only the owner of the register can use this shorthand as the address can be generated from the name and register key. + #[arg(short, long)] + name: bool, + /// The address of the register + /// With the name option on the address will be used as a name + address: String, /// The new value to store in the register. value: String, }, /// Get the value of a register. Get { - /// The name of the register. - name: String, + /// Use the name of the register instead of the address + /// Note that only the owner of the register can use this shorthand as the address can be generated from the name and register key. + #[arg(short, long)] + name: bool, + /// The address of the register + /// With the name option on the address will be used as a name + address: String, }, /// List previous registers @@ -109,27 +127,36 @@ pub enum VaultCmd { } pub async fn handle_subcommand(opt: Opt) -> Result<()> { - let peers = crate::utils::get_peers(opt.peers).await?; + let peers = crate::access::network::get_peers(opt.peers); let cmd = opt.command; match cmd { SubCmd::File { command } => match command { - FileCmd::Cost { file } => file::cost(&file, peers).await, - FileCmd::Upload { file } => file::upload(&file, peers).await, - FileCmd::Download { addr, dest_file } => file::download(&addr, &dest_file, peers).await, - FileCmd::List => file::list(peers), + FileCmd::Cost { file } => file::cost(&file, peers.await?).await, + FileCmd::Upload { file } => file::upload(&file, peers.await?).await, + FileCmd::Download { addr, dest_file } => { + file::download(&addr, &dest_file, peers.await?).await + } + FileCmd::List => file::list(peers.await?), }, SubCmd::Register { command } => match command { - RegisterCmd::Cost { name } => register::cost(&name, peers), - RegisterCmd::Create { name, value } => register::create(&name, &value, peers), - RegisterCmd::Edit { name, value } => register::edit(&name, &value, peers), - RegisterCmd::Get { name } => register::get(&name, peers), - RegisterCmd::List => register::list(peers), + RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), + RegisterCmd::Cost { name } => register::cost(&name, peers.await?).await, + RegisterCmd::Create { name, value } => { + register::create(&name, &value, peers.await?).await + } + RegisterCmd::Edit { + address, + name, + value, + } => register::edit(address, name, &value, peers.await?).await, + RegisterCmd::Get { address, name } => register::get(address, name, peers.await?).await, + RegisterCmd::List => register::list(peers.await?), }, SubCmd::Vault { command } => match command { - VaultCmd::Cost => vault::cost(peers), - VaultCmd::Create => vault::create(peers), - VaultCmd::Sync => vault::sync(peers), + VaultCmd::Cost => vault::cost(peers.await?), + VaultCmd::Create => vault::create(peers.await?), + VaultCmd::Sync => vault::sync(peers.await?), }, } } diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 45b60a24df..672c779f89 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -8,7 +8,6 @@ use autonomi::client::address::xorname_to_str; use autonomi::Multiaddr; -use autonomi::Wallet; use color_eyre::eyre::Context; use color_eyre::eyre::Result; use std::path::PathBuf; @@ -28,12 +27,7 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { } pub async fn upload(file: &str, peers: Vec) -> Result<()> { - let secret_key = crate::utils::get_secret_key() - .wrap_err("The secret key is required to perform this action")?; - let network = crate::utils::get_evm_network_from_environment()?; - let wallet = - Wallet::new_from_private_key(network, &secret_key).wrap_err("Failed to load wallet")?; - + let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; println!("Uploading data to network..."); diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 6afa26c755..3f73f6d650 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -6,37 +6,128 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use autonomi::client::registers::RegisterAddress; +use autonomi::client::registers::RegisterSecretKey; use autonomi::Multiaddr; +use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use color_eyre::Section; -pub fn cost(_name: &str, _peers: Vec) -> Result<()> { - let _register_key = crate::utils::get_register_signing_key() +pub fn generate_key(overwrite: bool) -> Result<()> { + // check if the key already exists + let key_path = crate::keys::get_register_signing_key_path()?; + if key_path.exists() && !overwrite { + return Err(eyre!("Register key already exists at: {}", key_path.display())) + .with_suggestion(|| "if you want to overwrite the existing key, run the command with the --overwrite flag") + .with_warning(|| "overwriting the existing key might result in loss of access to any existing registers created using that key"); + } + + // generate and write a new key to file + let key = RegisterSecretKey::random(); + let path = crate::keys::create_register_signing_key_file(key) + .wrap_err("Failed to create new register key")?; + println!("✅ Created new register key at: {}", path.display()); + Ok(()) +} + +pub async fn cost(name: &str, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("The register feature is coming soon!"); + let client = crate::actions::connect_to_network(peers).await?; + + let cost = client + .register_cost(name.to_string(), register_key) + .await + .wrap_err("Failed to get cost for register")?; + println!("✅ The estimated cost to create a register with name {name} is: {cost}"); Ok(()) } -pub fn create(_name: &str, _value: &str, _peers: Vec) -> Result<()> { - let _secret_key = crate::utils::get_secret_key() - .wrap_err("The secret key is required to perform this action")?; - let _register_key = crate::utils::get_register_signing_key() +pub async fn create(name: &str, value: &str, peers: Vec) -> Result<()> { + let wallet = crate::keys::load_evm_wallet()?; + let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("The register feature is coming soon!"); + let client = crate::actions::connect_to_network(peers).await?; + + println!("Creating register with name: {name}"); + let register = client + .register_create( + value.as_bytes().to_vec().into(), + name, + register_key, + &wallet, + ) + .await + .wrap_err("Failed to create register")?; + let address = register.address(); + + println!("✅ Register created at address: {address}"); + println!("With name: {name}"); + println!("And initial value: [{value}]"); Ok(()) } -pub fn edit(_name: &str, _value: &str, _peers: Vec) -> Result<()> { - let _register_key = crate::utils::get_register_signing_key() +pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("The register feature is coming soon!"); + let client = crate::actions::connect_to_network(peers).await?; + + let address = if name { + client.register_address(&address, ®ister_key) + } else { + RegisterAddress::from_hex(&address) + .wrap_err(format!("Failed to parse register address: {address}"))? + }; + + println!("Getting register at address: {address}"); + let register = client + .register_get(address) + .await + .wrap_err(format!("Failed to get register at address: {address}"))?; + println!("Found register at address: {address}"); + + println!("Updating register with new value: {value}"); + client + .register_update(register, value.as_bytes().to_vec().into(), register_key) + .await + .wrap_err(format!("Failed to update register at address: {address}"))?; + + println!("✅ Successfully updated register"); + println!("With value: [{value}]"); + Ok(()) } -pub fn get(_name: &str, _peers: Vec) -> Result<()> { - let _register_key = crate::utils::get_register_signing_key() +pub async fn get(address: String, name: bool, peers: Vec) -> Result<()> { + let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - println!("The register feature is coming soon!"); + let client = crate::actions::connect_to_network(peers).await?; + + let address = if name { + client.register_address(&address, ®ister_key) + } else { + RegisterAddress::from_hex(&address) + .wrap_err(format!("Failed to parse register address: {address}"))? + }; + + println!("Getting register at address: {address}"); + let register = client + .register_get(address) + .await + .wrap_err(format!("Failed to get register at address: {address}"))?; + let values = register.values(); + + println!("✅ Register found at address: {address}"); + match values.as_slice() { + [one] => println!("With value: [{:?}]", String::from_utf8_lossy(one)), + _ => { + println!("With multiple concurrent values:"); + for value in values.iter() { + println!("[{:?}]", String::from_utf8_lossy(value)); + } + } + } Ok(()) } diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index 6aaa446582..8e7a9e1a5b 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -9,11 +9,15 @@ #[macro_use] extern crate tracing; +mod access; mod actions; mod commands; mod log_metrics; mod opt; -mod utils; + +pub use access::data_dir; +pub use access::keys; +pub use access::network; use clap::Parser; use color_eyre::Result; diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs deleted file mode 100644 index 2d7cce6d19..0000000000 --- a/autonomi_cli/src/utils.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use autonomi::Multiaddr; -use autonomi::Network; -use color_eyre::eyre::eyre; -use color_eyre::eyre::Context; -use color_eyre::Result; -use color_eyre::Section; -use sn_peers_acquisition::PeersArgs; -use std::env; -use std::fs; -use std::path::PathBuf; - -use sn_peers_acquisition::SAFE_PEERS_ENV; - -// NB TODO: use those as return values for the functions below -// use autonomi::register::RegisterKey; -// use autonomi::wallet::WalletKey; - -const SECRET_KEY: &str = "SECRET_KEY"; -const REGISTER_SIGNING_KEY: &str = "REGISTER_SIGNING_KEY"; - -const SECRET_KEY_FILE: &str = "secret_key"; -const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; - -pub fn get_secret_key() -> Result { - // try env var first - let why_env_failed = match env::var(SECRET_KEY) { - Ok(key) => return Ok(key), - Err(e) => e, - }; - - // try from data dir - let dir = get_client_data_dir_path() - .wrap_err(format!("Failed to obtain secret key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY} env var"))?; - - // load the key from file - let key_path = dir.join(SECRET_KEY_FILE); - fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file") - .with_suggestion(|| format!("make sure you've provided the {SECRET_KEY} env var or have the key in a file at {key_path:?}")) -} - -pub fn get_register_signing_key() -> Result { - // try env var first - let why_env_failed = match env::var(REGISTER_SIGNING_KEY) { - Ok(key) => return Ok(key), - Err(e) => e, - }; - - // try from data dir - let dir = get_client_data_dir_path() - .wrap_err(format!("Failed to obtain register signing key from env var: {why_env_failed}, reading from disk also failed as couldn't access data dir")) - .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY} env var"))?; - - // load the key from file - let key_path = dir.join(REGISTER_SIGNING_KEY_FILE); - fs::read_to_string(&key_path) - .wrap_err("Failed to read secret key from file") - .with_suggestion(|| format!("make sure you've provided the {REGISTER_SIGNING_KEY} env var or have the key in a file at {key_path:?}")) -} - -pub fn get_client_data_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir() - .ok_or_else(|| eyre!("Failed to obtain data dir, your OS might not be supported."))?; - home_dirs.push("safe"); - home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path()).wrap_err("Failed to create data dir")?; - Ok(home_dirs) -} - -pub async fn get_peers(peers: PeersArgs) -> Result> { - peers.get_peers().await - .wrap_err("Please provide valid Network peers to connect to") - .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) - .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") -} - -pub(crate) fn get_evm_network_from_environment() -> Result { - evmlib::utils::evm_network_from_env() - .map_err(|err| eyre!("Failed to get EVM network from environment: {err}")) -} diff --git a/sn_node/reactivate_examples/register_inspect.rs b/sn_node/reactivate_examples/register_inspect.rs index 2873aa1139..03f35ffa6e 100644 --- a/sn_node/reactivate_examples/register_inspect.rs +++ b/sn_node/reactivate_examples/register_inspect.rs @@ -73,7 +73,7 @@ // .join("client"); // let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) -// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .wrap_err(format!"Unable to read wallet file in {root_dir:?}")) // .suggestion( // "If you have an old wallet file, it may no longer be compatible. Try removing it", // )?; diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 3a1c091dc1..d387bd76b6 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -34,7 +34,6 @@ use std::{ }; use tonic::Request; use tracing::{debug, error, info}; -use xor_name::XorName; const CHUNK_SIZE: usize = 1024; @@ -374,13 +373,13 @@ async fn store_registers( let key = bls::SecretKey::random(); // Create a register with the value [1, 2, 3, 4] + let rand_name: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); let register = client - .create_register( - vec![1, 2, 3, 4].into(), - XorName::random(&mut rand::thread_rng()), - key.clone(), - wallet, - ) + .register_create(vec![1, 2, 3, 4].into(), &rand_name, key.clone(), wallet) .await?; println!("Created Register at {:?}", register.address()); @@ -389,7 +388,7 @@ async fn store_registers( // Update the register with the value [5, 6, 7, 8] client - .update_register(register.clone(), vec![5, 6, 7, 8].into(), key) + .register_update(register.clone(), vec![5, 6, 7, 8].into(), key) .await?; println!("Updated Register at {:?}", register.address()); diff --git a/sn_registers/src/address.rs b/sn_registers/src/address.rs index d0cdacb0ba..f8f2c346a1 100644 --- a/sn_registers/src/address.rs +++ b/sn_registers/src/address.rs @@ -26,8 +26,9 @@ pub struct RegisterAddress { } impl Display for RegisterAddress { + /// Display the register address in hex format that can be parsed by `RegisterAddress::from_hex`. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}({:?})", &self.to_hex()[0..6], self.xorname()) + write!(f, "{}", &self.to_hex()) } } From ca2bc0d52d56be89f6ba139114b237bb53b878a6 Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 7 Oct 2024 14:43:18 +0900 Subject: [PATCH 120/255] chore: improve err msgs --- autonomi_cli/src/commands/register.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 3f73f6d650..7971f145da 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -77,7 +77,8 @@ pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<( client.register_address(&address, ®ister_key) } else { RegisterAddress::from_hex(&address) - .wrap_err(format!("Failed to parse register address: {address}"))? + .wrap_err(format!("Failed to parse register address: {address}")) + .with_suggestion(|| "if you want to use the name as the address, run the command with the --name flag")? }; println!("Getting register at address: {address}"); From 703b42f308638f1e1fa458b99ccbecdcf1c831ed Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 7 Oct 2024 10:46:59 +0200 Subject: [PATCH 121/255] test(autonomi): load from compile time env --- autonomi/Cargo.toml | 1 + autonomi/tests/common.rs | 20 ++++++++++++++++++++ autonomi/tests/wasm.rs | 2 +- evmlib/src/utils.rs | 25 +++++++++++++++++++++---- 4 files changed, 43 insertions(+), 5 deletions(-) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 8ac45f7c5b..0b2920aa37 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -49,6 +49,7 @@ futures = "0.3.30" [dev-dependencies] eyre = "0.6.5" tracing-subscriber = { version = "0.3", features = ["env-filter"] } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs index 5277ea1d56..0c781f7279 100644 --- a/autonomi/tests/common.rs +++ b/autonomi/tests/common.rs @@ -1,5 +1,6 @@ use bytes::Bytes; use rand::Rng; +use sn_peers_acquisition::parse_peer_addr; #[allow(dead_code)] pub fn gen_random_data(len: usize) -> Bytes { @@ -32,3 +33,22 @@ pub fn enable_logging_wasm(directive: impl AsRef) { .with(tracing_subscriber::EnvFilter::new(directive)) .init(); } + +/// Get peers from `SAFE_PEERS` environment variable, first from runtime, then compile-time. +/// If no peers are found and `local` is not enabled, this will panic. Otherwise, it will return an empty list. +#[allow(dead_code)] +pub fn peers_from_run_or_compile_time_env( +) -> Result, libp2p::multiaddr::Error> { + let peers_str = std::env::var("SAFE_PEERS") + .ok() + .or_else(|| option_env!("SAFE_PEERS").map(|s| s.to_string())); + + let Some(peers_str) = peers_str else { + #[cfg(not(feature = "local"))] + panic!("SAFE_PEERS environment variable not set and `local` feature is not enabled"); + #[cfg(feature = "local")] + return Ok(vec![]); + }; + + peers_str.split(',').map(parse_peer_addr).collect() +} diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index 25f3643e68..703d93316b 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -14,7 +14,7 @@ wasm_bindgen_test_configure!(run_in_browser); async fn put() -> Result<(), Box> { common::enable_logging_wasm("sn_networking,autonomi,wasm"); - let client = Client::connect(&test_utils::peers_from_env()?) + let client = Client::connect(&common::peers_from_run_or_compile_time_env()?) .await .unwrap(); let wallet = test_utils::evm::get_funded_wallet(); diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 36501a81a7..b54f391cea 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -4,6 +4,19 @@ use dirs_next::data_dir; use rand::Rng; use std::env; +// Get environment variable from runtime or build time, in that order. Returns `None` if not set. +macro_rules! env_from_runtime_or_compiletime { + ($var:literal) => {{ + if let Ok(val) = env::var($var) { + Some(val) + } else if let Some(val) = option_env!($var) { + Some(val.to_string()) + } else { + None + } + }}; +} + pub const EVM_TESTNET_CSV_FILENAME: &str = "evm_testnet_data.csv"; #[derive(thiserror::Error, Debug)] @@ -24,10 +37,14 @@ pub fn dummy_hash() -> Hash { /// Get the `Network` from environment variables pub fn evm_network_from_env() -> Result { - let evm_vars = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "DATA_PAYMENTS_ADDRESS"] - .iter() - .map(|var| env::var(var).map_err(|_| Error::FailedToGetEvmNetwork)) - .collect::, Error>>(); + let evm_vars = [ + env_from_runtime_or_compiletime!("RPC_URL"), + env_from_runtime_or_compiletime!("PAYMENT_TOKEN_ADDRESS"), + env_from_runtime_or_compiletime!("DATA_PAYMENTS_ADDRESS"), + ] + .into_iter() + .map(|var| var.ok_or(Error::FailedToGetEvmNetwork)) + .collect::, Error>>(); let use_local_evm = std::env::var("EVM_NETWORK") .map(|v| v == "local") From 041ff0a76300eed3fcd7385b675a2c62eb19fa21 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 7 Oct 2024 10:47:48 +0200 Subject: [PATCH 122/255] test(autonomi): patch alloy to fix wasm test --- Cargo.lock | 41 +++++++++++++++++++++-------------------- Cargo.toml | 2 +- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fece06fe20..9dafb7099c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,7 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,7 +151,7 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +166,7 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -237,7 +237,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -254,7 +254,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "alloy-serde", @@ -276,7 +276,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -289,7 +289,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,7 +309,7 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -321,7 +321,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -364,7 +364,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -424,7 +424,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -440,12 +440,13 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -457,7 +458,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "alloy-serde", @@ -467,7 +468,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -485,7 +486,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "serde", @@ -495,7 +496,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-primitives", "async-trait", @@ -508,7 +509,7 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-consensus", "alloy-network", @@ -596,7 +597,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -616,7 +617,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#a1733071f834d2d965fca21ebb7d7f68982608d1" +source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -9842,7 +9843,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d163582506..291761495b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,4 +61,4 @@ push = false tag = false [patch.crates-io] -alloy = { git = 'https://github.com/alloy-rs/alloy.git', branch = "main" } +alloy = { git = 'https://github.com/b-zee/alloy.git', branch = "fix-rpc-client-sleep-call" } From a0bc96483d23a96b086b7e2d60833feb96b85910 Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 4 Oct 2024 23:03:52 +0800 Subject: [PATCH 123/255] feat!: client do not select bad_node as a payee BREAKING CHANGE --- sn_evm/src/data_payments.rs | 9 ++++ sn_networking/src/cmd.rs | 40 +++++++++++++-- sn_networking/src/lib.rs | 65 ++++++++++++++---------- sn_networking/src/record_store.rs | 1 + sn_node/src/node.rs | 3 +- sn_node/src/quote.rs | 4 ++ sn_transfers/src/wallet/data_payments.rs | 10 ++++ 7 files changed, 102 insertions(+), 30 deletions(-) diff --git a/sn_evm/src/data_payments.rs b/sn_evm/src/data_payments.rs index 4791c4af96..3b5ccd6e11 100644 --- a/sn_evm/src/data_payments.rs +++ b/sn_evm/src/data_payments.rs @@ -71,6 +71,10 @@ pub struct PaymentQuote { pub timestamp: SystemTime, /// quoting metrics being used to generate this quote pub quoting_metrics: QuotingMetrics, + /// list of bad_nodes that client shall not pick as a payee + /// in `serialised` format to avoid cyclic dependent on sn_protocol + #[debug(skip)] + pub bad_nodes: Vec, /// the node's wallet address pub rewards_address: RewardsAddress, /// the node's libp2p identity public key in bytes (PeerId) @@ -89,6 +93,7 @@ impl PaymentQuote { cost: AttoTokens::zero(), timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], rewards_address: dummy_address(), pub_key: vec![], signature: vec![], @@ -108,6 +113,7 @@ impl PaymentQuote { cost: AttoTokens, timestamp: SystemTime, quoting_metrics: &QuotingMetrics, + serialised_bad_nodes: &[u8], rewards_address: &RewardsAddress, ) -> Vec { let mut bytes = xorname.to_vec(); @@ -121,6 +127,7 @@ impl PaymentQuote { ); let serialised_quoting_metrics = rmp_serde::to_vec(quoting_metrics).unwrap_or_default(); bytes.extend_from_slice(&serialised_quoting_metrics); + bytes.extend_from_slice(serialised_bad_nodes); bytes.extend_from_slice(rewards_address.as_slice()); bytes } @@ -132,6 +139,7 @@ impl PaymentQuote { self.cost, self.timestamp, &self.quoting_metrics, + &self.bad_nodes, &self.rewards_address, ) } @@ -190,6 +198,7 @@ impl PaymentQuote { cost, timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], pub_key: vec![], signature: vec![], rewards_address: dummy_address(), diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 541a518ce5..ef7eaaa017 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -88,10 +88,10 @@ pub enum LocalSwarmCmd { key: RecordKey, sender: oneshot::Sender>, }, - /// GetLocalStoreCost for this node + /// GetLocalStoreCost for this node, also with the bad_node list close to the target GetLocalStoreCost { key: RecordKey, - sender: oneshot::Sender<(AttoTokens, QuotingMetrics)>, + sender: oneshot::Sender<(AttoTokens, QuotingMetrics, Vec)>, }, /// Notify the node received a payment. PaymentReceived, @@ -565,7 +565,41 @@ impl SwarmDriver { quoting_metrics: "ing_metrics, }); - let _res = sender.send((cost, quoting_metrics)); + // To avoid sending entire list to client, sending those that: + // closer than the CLOSE_GROUP_SIZEth closest node to the target + let mut bad_nodes: Vec<_> = self + .bad_nodes + .iter() + .filter_map(|(peer_id, (_issue_list, is_bad))| { + if *is_bad { + Some(NetworkAddress::from_peer(*peer_id)) + } else { + None + } + }) + .collect(); + + // List is ordered already, hence the last one is always the one wanted + let kbucket_key = NetworkAddress::from_record_key(&key).as_kbucket_key(); + let closest_peers: Vec<_> = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&kbucket_key) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect(); + // In case of not enough clsest_peers, send the entire list + if closest_peers.len() >= CLOSE_GROUP_SIZE { + let boundary_peer = closest_peers[CLOSE_GROUP_SIZE - 1]; + let key_address = NetworkAddress::from_record_key(&key); + let boundary_distance = + key_address.distance(&NetworkAddress::from_peer(boundary_peer)); + bad_nodes + .retain(|peer_addr| key_address.distance(peer_addr) < boundary_distance); + } + + let _res = sender.send((cost, quoting_metrics, bad_nodes)); } LocalSwarmCmd::PaymentReceived => { cmd_string = "PaymentReceived"; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 856f0559a3..aa7baa4595 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -66,7 +66,7 @@ use sn_protocol::{ NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeMap, HashMap}, net::IpAddr, sync::Arc, }; @@ -337,8 +337,8 @@ impl Network { /// Get the store costs from the majority of the closest peers to the provided RecordKey. /// Record already exists will have a cost of zero to be returned. /// - /// Ignore the quote from any peers from `ignore_peers`. This is useful if we want to repay a different PeerId - /// on failure. + /// Ignore the quote from any peers from `ignore_peers`. + /// This is useful if we want to repay a different PeerId on failure. pub async fn get_store_costs_from_network( &self, record_address: NetworkAddress, @@ -346,7 +346,14 @@ impl Network { ) -> Result { // The requirement of having at least CLOSE_GROUP_SIZE // close nodes will be checked internally automatically. - let close_nodes = self.get_closest_peers(&record_address, true).await?; + let mut close_nodes = self.get_closest_peers(&record_address, true).await?; + // Filter out results from the ignored peers. + close_nodes.retain(|peer_id| !ignore_peers.contains(peer_id)); + + if close_nodes.is_empty() { + error!("Cann't get store_cost of {record_address:?}, as all close_nodes are ignored"); + return Err(NetworkError::NoStoreCostResponses); + } let request = Request::Query(Query::GetStoreCost(record_address.clone())); let responses = self @@ -402,27 +409,7 @@ impl Network { self.send_req_ignore_reply(request, *peer_id); } - // Sort all_costs by the NetworkAddress proximity to record_address - all_costs.sort_by(|(peer_address_a, _, _), (peer_address_b, _, _)| { - record_address - .distance(peer_address_a) - .cmp(&record_address.distance(peer_address_b)) - }); - let ignore_peers = ignore_peers - .into_iter() - .map(NetworkAddress::from_peer) - .collect::>(); - - // Ensure we dont have any further out nodes than `close_group_majority()` - // This should ensure that if we didnt get all responses from close nodes, - // we're less likely to be paying a node that is not in the CLOSE_GROUP - // - // Also filter out the peers. - let all_costs = all_costs - .into_iter() - .filter(|(peer_address, ..)| !ignore_peers.contains(peer_address)) - .take(close_group_majority()) - .collect(); + filter_out_bad_nodes(&mut all_costs, record_address); get_fees_from_store_cost_responses(all_costs) } @@ -581,7 +568,7 @@ impl Network { pub async fn get_local_storecost( &self, key: RecordKey, - ) -> Result<(AttoTokens, QuotingMetrics)> { + ) -> Result<(AttoTokens, QuotingMetrics, Vec)> { let (sender, receiver) = oneshot::channel(); self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); @@ -986,6 +973,32 @@ fn get_fees_from_store_cost_responses( Ok((payee_id, payee.1, payee.2)) } +/// According to the bad_nodes list collected via quotes, +/// candidate that received majority votes from others shall be ignored. +fn filter_out_bad_nodes( + all_costs: &mut Vec<(NetworkAddress, RewardsAddress, PaymentQuote)>, + record_address: NetworkAddress, +) { + let mut bad_node_votes: BTreeMap = BTreeMap::new(); + for (peer_addr, _reward_addr, quote) in all_costs.iter() { + let bad_nodes: Vec = match rmp_serde::from_slice("e.bad_nodes) { + Ok(bad_nodes) => bad_nodes, + Err(err) => { + error!("For record {record_address:?}, failed to recover bad_nodes from quote of {peer_addr:?} with error {err:?}"); + continue; + } + }; + for bad_node in bad_nodes { + let entry = bad_node_votes.entry(bad_node).or_default(); + *entry += 1; + } + } + all_costs.retain(|(peer_addr, _, _)| { + let entry = bad_node_votes.entry(peer_addr.clone()).or_default(); + *entry < close_group_majority() + }); +} + /// Get the value of the provided Quorum pub fn get_quorum_value(quorum: &Quorum) -> usize { match quorum { diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index b9100e084c..5ac90decfc 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -1783,6 +1783,7 @@ mod tests { received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, + bad_nodes: vec![], pub_key: bls::SecretKey::random().public_key().to_bytes().to_vec(), signature: vec![], rewards_address: peer.rewards_addr, // unimportant for cost calc diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 83666ddd39..37ad21d0f2 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -607,7 +607,7 @@ impl Node { let store_cost = network.get_local_storecost(record_key.clone()).await; match store_cost { - Ok((cost, quoting_metrics)) => { + Ok((cost, quoting_metrics, bad_nodes)) => { if cost == AttoTokens::zero() { QueryResponse::GetStoreCost { quote: Err(ProtocolError::RecordExists( @@ -623,6 +623,7 @@ impl Node { cost, &address, "ing_metrics, + bad_nodes, &payment_address, ), payment_address, diff --git a/sn_node/src/quote.rs b/sn_node/src/quote.rs index 42079b1d0c..969d326ce0 100644 --- a/sn_node/src/quote.rs +++ b/sn_node/src/quote.rs @@ -19,15 +19,18 @@ impl Node { cost: AttoTokens, address: &NetworkAddress, quoting_metrics: &QuotingMetrics, + bad_nodes: Vec, payment_address: &RewardsAddress, ) -> Result { let content = address.as_xorname().unwrap_or_default(); let timestamp = std::time::SystemTime::now(); + let serialised_bad_nodes = rmp_serde::to_vec(&bad_nodes).unwrap_or_default(); let bytes = PaymentQuote::bytes_for_signing( content, cost, timestamp, quoting_metrics, + &serialised_bad_nodes, payment_address, ); @@ -40,6 +43,7 @@ impl Node { cost, timestamp, quoting_metrics: quoting_metrics.clone(), + bad_nodes: serialised_bad_nodes, pub_key: network.get_pub_key(), rewards_address: *payment_address, signature, diff --git a/sn_transfers/src/wallet/data_payments.rs b/sn_transfers/src/wallet/data_payments.rs index 90e05e179c..b200ff4c97 100644 --- a/sn_transfers/src/wallet/data_payments.rs +++ b/sn_transfers/src/wallet/data_payments.rs @@ -106,6 +106,10 @@ pub struct PaymentQuote { pub timestamp: SystemTime, /// quoting metrics being used to generate this quote pub quoting_metrics: QuotingMetrics, + /// list of bad_nodes that client shall not pick as a payee + /// in `serialised` format to avoid cyclic dependent on sn_protocol + #[debug(skip)] + pub bad_nodes: Vec, /// node's public key that can verify the signature #[debug(skip)] pub pub_key: Vec, @@ -121,6 +125,7 @@ impl PaymentQuote { cost: NanoTokens::zero(), timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], pub_key: vec![], signature: vec![], } @@ -132,6 +137,7 @@ impl PaymentQuote { cost: NanoTokens, timestamp: SystemTime, quoting_metrics: &QuotingMetrics, + serialised_bad_nodes: &[u8], ) -> Vec { let mut bytes = xorname.to_vec(); bytes.extend_from_slice(&cost.to_bytes()); @@ -144,6 +150,7 @@ impl PaymentQuote { ); let serialised_quoting_metrics = rmp_serde::to_vec(quoting_metrics).unwrap_or_default(); bytes.extend_from_slice(&serialised_quoting_metrics); + bytes.extend_from_slice(serialised_bad_nodes); bytes } @@ -168,6 +175,7 @@ impl PaymentQuote { self.cost, self.timestamp, &self.quoting_metrics, + &self.bad_nodes, ); if !pub_key.verify(&bytes, &self.signature) { @@ -196,6 +204,7 @@ impl PaymentQuote { cost, timestamp: SystemTime::now(), quoting_metrics: Default::default(), + bad_nodes: vec![], pub_key: vec![], signature: vec![], } @@ -294,6 +303,7 @@ mod tests { quote.cost, quote.timestamp, "e.quoting_metrics, + &[], ); let signature = if let Ok(sig) = keypair.sign(&bytes) { sig From 2baa5238f5ef9f139d18b0a17a98dc51cfbbb624 Mon Sep 17 00:00:00 2001 From: qima Date: Sat, 5 Oct 2024 21:01:48 +0800 Subject: [PATCH 124/255] feat(node): env MAX_STORAGE_SPACE for MAX_RECORD_COUNT calculate --- Cargo.lock | 31 +++++++++++++++-- sn_networking/Cargo.toml | 1 + sn_networking/src/record_store.rs | 58 ++++++++++++++++++++----------- 3 files changed, 67 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6110e42b18..d98c551e56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1075,7 +1075,7 @@ dependencies = [ "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", - "self_encryption", + "self_encryption 0.29.2", "serde", "sn_bls_ckd", "sn_curv", @@ -7686,6 +7686,32 @@ dependencies = [ "xor_name", ] +[[package]] +name = "self_encryption" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9439a0cb3efb35e080a1576e3e00a804caab04010adc802aed88cf539b103ed" +dependencies = [ + "aes", + "bincode", + "brotli", + "bytes", + "cbc", + "hex 0.4.3", + "itertools 0.10.5", + "lazy_static", + "num_cpus", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rayon", + "serde", + "tempfile", + "thiserror", + "tiny-keccak", + "tokio", + "xor_name", +] + [[package]] name = "semver" version = "0.11.0" @@ -8191,6 +8217,7 @@ dependencies = [ "rand 0.8.5", "rayon", "rmp-serde", + "self_encryption 0.30.0", "serde", "sn_build_info", "sn_evm", @@ -8240,7 +8267,7 @@ dependencies = [ "rayon", "reqwest 0.12.7", "rmp-serde", - "self_encryption", + "self_encryption 0.29.2", "serde", "serde_json", "sn_build_info", diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index a83271e88f..eb8de53126 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -52,6 +52,7 @@ prometheus-client = { version = "0.22", optional = true } rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" +self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } sn_build_info = { path="../sn_build_info", version = "0.1.14" } sn_protocol = { path = "../sn_protocol", version = "0.17.10" } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index b9100e084c..160ce57c4c 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -18,6 +18,7 @@ use aes_gcm_siv::{ }; use itertools::Itertools; +use lazy_static::lazy_static; use libp2p::{ identity::PeerId, kad::{ @@ -29,6 +30,7 @@ use libp2p::{ use prometheus_client::metrics::gauge::Gauge; use rand::RngCore; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use self_encryption::MAX_CHUNK_SIZE; use serde::{Deserialize, Serialize}; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{ @@ -48,13 +50,27 @@ use tokio::sync::mpsc; use walkdir::{DirEntry, WalkDir}; use xor_name::XorName; -// A spend record is at the size of 4KB roughly. -// Given chunk record is maxed at size of 512KB. -// During Beta phase, it's almost one spend per chunk, -// which makes the average record size is around 256k. -// Given we are targeting node size to be 32GB, -// this shall allow around 128K records. -const MAX_RECORDS_COUNT: usize = 128 * 1024; +/// The default value of targted max storage space is to be 32GB. +const DEFAULT_MAX_STORAGE_SPACE: u64 = 32 * 1024 * 1024 * 1024; + +lazy_static! { + /// The max storage space for the records. + /// A `node size` is to be defined as this plus the logging space assigned. + pub static ref MAX_STORAGE_SPACE: u64 = std::option_env!("MAX_STORAGE_SPACE") + .unwrap_or(&DEFAULT_MAX_STORAGE_SPACE.to_string()) + .parse::() + .unwrap_or(DEFAULT_MAX_STORAGE_SPACE); + + // A spend record is at the size of 2KB roughly. + // During Beta phase, it's almost one spend per chunk, + // which makes the average record size is to be half of the MAX_CHUNK_SIZE + static ref MAX_RECORDS_COUNT: usize = { + let records_count: usize = ((*MAX_STORAGE_SPACE as f64 / *MAX_CHUNK_SIZE as f64) * 2.0) as usize; + info!("MAX_STORAGE_SPACE is {}, MAX_CHUNK_SIZE is {}, MAX_RECORDS_COUNT is {records_count}", + *MAX_STORAGE_SPACE, *MAX_CHUNK_SIZE); + records_count + }; +} /// The maximum number of records to cache in memory. const MAX_RECORDS_CACHE_SIZE: usize = 100; @@ -127,7 +143,7 @@ impl Default for NodeRecordStoreConfig { Self { storage_dir: historic_quote_dir.clone(), historic_quote_dir, - max_records: MAX_RECORDS_COUNT, + max_records: *MAX_RECORDS_COUNT, max_value_bytes: MAX_PACKET_SIZE, records_cache_size: MAX_RECORDS_CACHE_SIZE, } @@ -461,7 +477,7 @@ impl NodeRecordStore { // result in mis-calculation of relevant records. pub fn cleanup_unrelevant_records(&mut self) { let accumulated_records = self.records.len(); - if accumulated_records < 6 * MAX_RECORDS_COUNT / 10 { + if accumulated_records < *MAX_RECORDS_COUNT * 6 / 10 { return; } @@ -932,7 +948,7 @@ impl RecordStore for ClientRecordStore { pub fn calculate_cost_for_records(records_stored: usize) -> u64 { use std::cmp::{max, min}; - let max_records = MAX_RECORDS_COUNT; + let max_records = *MAX_RECORDS_COUNT; let ori_cost = positive_input_0_1_sigmoid(records_stored as f64 / max_records as f64) * MAX_STORE_COST as f64; @@ -1009,13 +1025,13 @@ mod tests { #[test] fn test_calculate_max_cost_for_records() { - let sut = calculate_cost_for_records(MAX_RECORDS_COUNT + 1); + let sut = calculate_cost_for_records(*MAX_RECORDS_COUNT + 1); assert_eq!(sut, MAX_STORE_COST - 1); } #[test] fn test_calculate_50_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 50 / 100; + let percent = *MAX_RECORDS_COUNT * 50 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1023,16 +1039,16 @@ mod tests { } #[test] fn test_calculate_60_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 60 / 100; + let percent = *MAX_RECORDS_COUNT * 60 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost - assert_eq!(sut, 952572); + assert_eq!(sut, 952561); } #[test] fn test_calculate_65_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 65 / 100; + let percent = *MAX_RECORDS_COUNT * 65 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1041,7 +1057,7 @@ mod tests { #[test] fn test_calculate_70_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 70 / 100; + let percent = *MAX_RECORDS_COUNT * 70 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1050,7 +1066,7 @@ mod tests { #[test] fn test_calculate_80_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 80 / 100; + let percent = *MAX_RECORDS_COUNT * 80 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1059,7 +1075,7 @@ mod tests { #[test] fn test_calculate_90_percent_cost_for_records() { - let percent = MAX_RECORDS_COUNT * 90 / 100; + let percent = *MAX_RECORDS_COUNT * 90 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost assert_eq!(sut, 999993); @@ -1750,8 +1766,8 @@ mod tests { max_store_cost / min_store_cost ); assert!( - (max_earned / min_earned) < 300000000, - "earning distribution is not balanced, expected to be < 200000000, but was {}", + (max_earned / min_earned) < 500000000, + "earning distribution is not balanced, expected to be < 500000000, but was {}", max_earned / min_earned ); break; @@ -1779,7 +1795,7 @@ mod tests { timestamp: std::time::SystemTime::now(), quoting_metrics: QuotingMetrics { close_records_stored: peer.records_stored.load(Ordering::Relaxed), - max_records: MAX_RECORDS_COUNT, + max_records: *MAX_RECORDS_COUNT, received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, From c7994e013244f82925ed2c3c93bb3f3d7d16484a Mon Sep 17 00:00:00 2001 From: grumbach Date: Mon, 7 Oct 2024 19:03:46 +0900 Subject: [PATCH 125/255] feat: wasm docs --- autonomi/WASM_docs.md | 170 +++++++++++++++++++++++++++++ autonomi/src/client/data.rs | 4 +- autonomi/src/lib.rs | 2 +- autonomi_cli/src/access/network.rs | 4 +- 4 files changed, 176 insertions(+), 4 deletions(-) create mode 100644 autonomi/WASM_docs.md diff --git a/autonomi/WASM_docs.md b/autonomi/WASM_docs.md new file mode 100644 index 0000000000..995809b8bd --- /dev/null +++ b/autonomi/WASM_docs.md @@ -0,0 +1,170 @@ +## JavaScript Autonomi API Documentation + +Note that this is a first version and will be subject to change. + +### **Client** + +The `Client` object allows interaction with the network to store and retrieve data. Below are the available methods for the `Client` class. + +#### **Constructor** + +```javascript +let client = await new Client([multiaddress]); +``` + +- **multiaddress** (Array of Strings): A list of network addresses for the client to connect to. + +Example: +```javascript +let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +``` + +#### **Methods** + +##### **put(data, wallet)** + +Uploads a piece of encrypted data to the network. + +```javascript +let result = await client.put(data, wallet); +``` + +- **data** (Uint8Array): The data to be stored. +- **wallet** (Wallet): The wallet used to pay for the storage. + +Returns: +- **result** (XorName): The XOR address of the stored data. + +Example: +```javascript +let wallet = getFundedWallet(); +let data = new Uint8Array([1, 2, 3]); +let result = await client.put(data, wallet); +``` + +##### **get(data_map_addr)** + +Fetches encrypted data from the network using its XOR address. + +```javascript +let data = await client.get(data_map_addr); +``` + +- **data_map_addr** (XorName): The XOR address of the data to fetch. + +Returns: +- **data** (Uint8Array): The fetched data. + +Example: +```javascript +let data = await client.get(result); +``` + +##### **cost(data)** + +Gets the cost of storing the provided data on the network. + +```javascript +let cost = await client.cost(data); +``` + +- **data** (Uint8Array): The data whose storage cost you want to calculate. + +Returns: +- **cost** (AttoTokens): The calculated cost for storing the data. + +Example: +```javascript +let cost = await client.cost(new Uint8Array([1, 2, 3])); +``` + +--- + +### **Wallet** + +The `Wallet` object represents an Ethereum wallet used for data payments. + +#### **Methods** + +##### **new_from_private_key(network, private_key)** + +Creates a new wallet using the given private key. + +```javascript +let wallet = Wallet.new_from_private_key(network, private_key); +``` + +- **network** (EvmNetwork): The network to which the wallet connects. +- **private_key** (String): The private key of the wallet. + +Returns: +- **wallet** (Wallet): The created wallet. + +Example: +```javascript +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +``` + +##### **address()** + +Gets the wallet’s address. + +```javascript +let address = wallet.address(); +``` + +Returns: +- **address** (Address): The wallet's address. + +Example: +```javascript +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +let address = wallet.address(); +``` + +--- + +### **EvmNetwork** + +The `EvmNetwork` object represents the blockchain network. + +#### **Methods** + +##### **default()** + +Connects to the default network. + +```javascript +let network = EvmNetwork.default(); +``` + +Returns: +- **network** (EvmNetwork): The default network. + +Example: +```javascript +let network = EvmNetwork.default(); +``` + +--- + +### Example Usage: + +```javascript +let client = await new Client(["/ip4/127.0.0.1/tcp/36075/ws/p2p/12D3KooWALb...BhDAfJY"]); +console.log("connected"); + +let wallet = Wallet.new_from_private_key(EvmNetwork.default(), "your_private_key_here"); +console.log("wallet retrieved"); + +let data = new Uint8Array([1, 2, 3]); +let result = await client.put(data, wallet); +console.log("Data stored at:", result); + +let fetchedData = await client.get(result); +console.log("Data retrieved:", fetchedData); +``` + +--- + +This documentation covers the basic usage of `Client`, `Wallet`, and `EvmNetwork` types in the JavaScript API. \ No newline at end of file diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 99e1c23e88..cdfa7aac25 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -186,7 +186,8 @@ impl Client { Ok(map_xor_name) } - pub(crate) async fn cost(&self, data: Bytes) -> Result { + /// Get the cost of storing a piece of data. + pub async fn cost(&self, data: Bytes) -> Result { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -209,6 +210,7 @@ impl Client { Ok(total_cost) } + /// Pay for the chunks and get the proof of payment. pub(crate) async fn pay( &self, content_addrs: impl Iterator, diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 0e8ff3f61d..55f2415786 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -25,7 +25,7 @@ pub mod client; #[cfg(feature = "data")] mod self_encryption; -pub use sn_evm::EvmNetwork as Network; +pub use sn_evm::EvmNetwork; pub use sn_evm::EvmWallet as Wallet; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index b611161bcd..bfc77e851f 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::Multiaddr; -use autonomi::Network; +use autonomi::EvmNetwork; use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::Result; @@ -23,7 +23,7 @@ pub async fn get_peers(peers: PeersArgs) -> Result> { .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") } -pub(crate) fn get_evm_network_from_environment() -> Result { +pub(crate) fn get_evm_network_from_environment() -> Result { evmlib::utils::evm_network_from_env() .map_err(|err| eyre!("Failed to get EVM network from environment: {err}")) } From 9ec8990b2dcc6758a58ea3922f572e48b553eea7 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 8 Oct 2024 14:34:01 +0900 Subject: [PATCH 126/255] fix: registers missing --- autonomi/src/client/registers.rs | 74 ++++++++++++++++++++++----- autonomi_cli/src/access/network.rs | 2 +- autonomi_cli/src/commands/register.rs | 8 ++- sn_networking/src/error.rs | 3 ++ sn_protocol/src/storage.rs | 3 +- 5 files changed, 73 insertions(+), 17 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index a94e198218..6a52fd8820 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -10,7 +10,11 @@ pub use bls::SecretKey as RegisterSecretKey; use sn_evm::Amount; use sn_evm::AttoTokens; +use sn_networking::GetRecordError; +use sn_networking::VerificationKind; +use sn_protocol::storage::RetryStrategy; pub use sn_registers::RegisterAddress; +use tracing::warn; use crate::client::data::PayError; use crate::client::Client; @@ -46,6 +50,8 @@ pub enum RegisterError { Write(#[source] sn_registers::Error), #[error("Failed to sign register")] CouldNotSign(#[source] sn_registers::Error), + #[error("Received invalid quote from node, this node is possibly malfunctioning, try another node by trying another register name")] + InvalidQuote, } #[derive(Clone, Debug)] @@ -85,17 +91,36 @@ impl Client { let key = network_address.to_record_key(); let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, + get_quorum: Quorum::Majority, retry_strategy: None, target_record: None, expected_holders: Default::default(), is_register: true, }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; - - let register: SignedRegister = - try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + let register = match self.network.get_record_from_network(key, &get_cfg).await { + Ok(record) => { + try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)? + } + // manage forked register case + Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + let mut registers: Vec = vec![]; + for (_, (record, _)) in result_map { + registers.push( + try_deserialize_record(&record) + .map_err(|_| RegisterError::Serialization)?, + ); + } + let register = registers.iter().fold(registers[0].clone(), |mut acc, x| { + if let Err(e) = acc.merge(x) { + warn!("Ignoring forked register as we failed to merge conflicting registers at {}: {e}", x.address()); + } + acc + }); + register + } + Err(e) => Err(e)?, + }; // Make sure the fetched record contains valid CRDT operations register @@ -143,11 +168,18 @@ impl Client { expires: None, }; + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; let put_cfg = PutRecordCfg { put_quorum: Quorum::All, retry_strategy: None, use_put_record_to: None, - verification: None, + verification: Some((VerificationKind::Network, get_cfg)), }; // Store the updated register on the network @@ -211,14 +243,23 @@ impl Client { .map(|(entry_hash, _value)| entry_hash) .collect(); - // TODO: Handle error. let _ = register.write(value.into(), &entries, &owner); let reg_xor = register.address().xorname(); - let (payment_proofs, _) = self.pay(std::iter::once(reg_xor), wallet).await?; - // Should always be there, else it would have failed on the payment step. - let proof = payment_proofs.get(®_xor).expect("Missing proof"); - let payee = proof.to_peer_id_payee().expect("Missing payee Peer ID"); - let signed_register = register.clone().into_signed(&owner).expect("TODO"); + let (payment_proofs, _skipped) = self.pay(std::iter::once(reg_xor), wallet).await?; + let proof = if let Some(proof) = payment_proofs.get(®_xor) { + proof + } else { + // register was skipped, meaning it was already paid for + return Err(RegisterError::Network(NetworkError::RegisterAlreadyExists)); + }; + + let payee = proof + .to_peer_id_payee() + .ok_or(RegisterError::InvalidQuote)?; + let signed_register = register + .clone() + .into_signed(&owner) + .map_err(RegisterError::CouldNotSign)?; let record = Record { key: address.to_record_key(), @@ -232,11 +273,18 @@ impl Client { expires: None, }; + let get_cfg = GetRecordCfg { + get_quorum: Quorum::Majority, + retry_strategy: Some(RetryStrategy::default()), + target_record: None, + expected_holders: Default::default(), + is_register: true, + }; let put_cfg = PutRecordCfg { put_quorum: Quorum::All, retry_strategy: None, use_put_record_to: Some(vec![payee]), - verification: None, + verification: Some((VerificationKind::Network, get_cfg)), }; self.network.put_record(record, &put_cfg).await?; diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index bfc77e851f..a480bd25ba 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -6,8 +6,8 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::Multiaddr; use autonomi::EvmNetwork; +use autonomi::Multiaddr; use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::Result; diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 7971f145da..f1df7660c6 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -78,7 +78,9 @@ pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<( } else { RegisterAddress::from_hex(&address) .wrap_err(format!("Failed to parse register address: {address}")) - .with_suggestion(|| "if you want to use the name as the address, run the command with the --name flag")? + .with_suggestion(|| { + "if you want to use the name as the address, run the command with the --name flag" + })? }; println!("Getting register at address: {address}"); diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 6da5a22d9a..6534c84017 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -183,6 +183,9 @@ pub enum NetworkError { #[error("Error setting up behaviour: {0}")] BahviourErr(String), + + #[error("Register already exists at this address")] + RegisterAlreadyExists, } #[cfg(test)] diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index c0a9007ed0..2935e43fce 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -27,11 +27,12 @@ pub use self::{ /// Chunk/Registers/Spend to be more flexible. /// /// The Duration/Attempts is chosen based on the internal logic. -#[derive(Clone, Debug, Copy)] +#[derive(Clone, Debug, Copy, Default)] pub enum RetryStrategy { /// Quick: Resolves to a 15-second wait or 1 retry attempt. Quick, /// Balanced: Resolves to a 60-second wait or 3 retry attempt. + #[default] Balanced, /// Persistent: Resolves to a 180-second wait or 6 retry attempt. Persistent, From 3775f3f98e3fedc09caafe53bedd263f090139a2 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 8 Oct 2024 16:58:45 +0900 Subject: [PATCH 127/255] feat: simpler evm connection and env --- Cargo.lock | 2 +- autonomi/src/client/data.rs | 8 +++++ autonomi/src/client/files.rs | 8 +++++ autonomi/src/client/mod.rs | 8 +++++ autonomi/src/client/vault.rs | 8 +++++ autonomi/src/lib.rs | 9 +++++ autonomi_cli/Cargo.toml | 1 - autonomi_cli/src/access/keys.rs | 3 +- autonomi_cli/src/access/network.rs | 11 +++--- evm_testnet/Cargo.toml | 1 + evm_testnet/src/main.rs | 15 ++++++++ evmlib/src/lib.rs | 10 +++++- evmlib/src/utils.rs | 7 +++- sn_evm/src/data_payments.rs | 17 +++++++++ sn_evm/src/evm.rs | 36 ++++++++++--------- sn_evm/src/lib.rs | 8 ++--- sn_node/src/bin/safenode/main.rs | 7 +++- sn_node/src/bin/safenode/subcommands.rs | 8 ++--- .../src/bin/cli/subcommands/evm_network.rs | 6 ++-- sn_node_manager/src/local.rs | 15 ++++---- 20 files changed, 140 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d98c551e56..c6ca94bec5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1103,7 +1103,6 @@ dependencies = [ "clap", "color-eyre", "dirs-next", - "evmlib", "indicatif", "sn_build_info", "sn_logging", @@ -2738,6 +2737,7 @@ dependencies = [ "clap", "dirs-next", "evmlib", + "sn_evm", "tokio", ] diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index cdfa7aac25..95d8e6d709 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::self_encryption::DataMapLevel; use bytes::Bytes; use evmlib::wallet; diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index c8317a2ed7..61a2dc43ab 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::client::data::{GetError, PutError}; use crate::client::Client; use crate::self_encryption::encrypt; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index c0f097501f..71446ed2a2 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + pub mod address; #[cfg(feature = "data")] diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index afe38c0825..8087f17eff 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use std::collections::HashSet; use crate::client::data::PutError; diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 55f2415786..4ff7057a31 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + //! Connect to and build on the Autonomi network. //! //! # Data types @@ -25,6 +33,7 @@ pub mod client; #[cfg(feature = "data")] mod self_encryption; +pub use sn_evm::evm; pub use sn_evm::EvmNetwork; pub use sn_evm::EvmWallet as Wallet; diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index b6a0678ee5..0d41ba92c3 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -14,7 +14,6 @@ autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.0" } indicatif = { version = "0.17.5", features = ["tokio"] } tokio = { version = "1.32.0", features = [ "io-util", diff --git a/autonomi_cli/src/access/keys.rs b/autonomi_cli/src/access/keys.rs index a11de06b7a..9fca310124 100644 --- a/autonomi_cli/src/access/keys.rs +++ b/autonomi_cli/src/access/keys.rs @@ -24,8 +24,7 @@ const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; pub fn load_evm_wallet() -> Result { let secret_key = get_secret_key().wrap_err("The secret key is required to perform this action")?; - let network = crate::network::get_evm_network_from_environment() - .wrap_err("Failed to load EVM network")?; + let network = crate::network::get_evm_network_from_env(); let wallet = Wallet::new_from_private_key(network, &secret_key) .wrap_err("Failed to load EVM wallet from key")?; Ok(wallet) diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index a480bd25ba..65e2495377 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -8,7 +8,6 @@ use autonomi::EvmNetwork; use autonomi::Multiaddr; -use color_eyre::eyre::eyre; use color_eyre::eyre::Context; use color_eyre::Result; use color_eyre::Section; @@ -23,7 +22,11 @@ pub async fn get_peers(peers: PeersArgs) -> Result> { .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") } -pub(crate) fn get_evm_network_from_environment() -> Result { - evmlib::utils::evm_network_from_env() - .map_err(|err| eyre!("Failed to get EVM network from environment: {err}")) +pub fn get_evm_network_from_env() -> EvmNetwork { + let network = autonomi::evm::network_from_env(); + if matches!(network, EvmNetwork::Custom(_)) { + println!("Using custom EVM network found from environment variables"); + info!("Using custom EVM network found from environment variables {network:?}"); + } + network } diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 0828dfe6dd..666fc9fdbb 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -12,6 +12,7 @@ version = "0.1.0" clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" evmlib = { path = "../evmlib" } +sn_evm = { path = "../sn_evm" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index ab5eb74d22..d3e4d93ec0 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -125,6 +125,21 @@ impl TestnetData { if let Some((tokens, gas)) = self.tokens_and_gas { println!("Genesis wallet balance (atto): (tokens: {tokens}, gas: {gas})"); } + + println!(); + println!("--------------"); + println!("Run the CLI or Node with the following env vars set to use this network:"); + println!( + "{}=\"{}\" {}=\"{}\" {}=\"{}\"", + sn_evm::evm::RPC_URL, + self.rpc_url, + sn_evm::evm::PAYMENT_TOKEN_ADDRESS, + self.payment_token_address, + sn_evm::evm::DATA_PAYMENTS_ADDRESS, + self.data_payments_address + ); + println!("--------------"); + println!(); } fn save_csv(&self) { diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index e7d20f90d4..cdbbab6b5c 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -38,7 +38,7 @@ pub struct CustomNetwork { } impl CustomNetwork { - pub fn new(rpc_url: &str, payment_token_addr: &str, data_payments_addr: &str) -> Self { + fn new(rpc_url: &str, payment_token_addr: &str, data_payments_addr: &str) -> Self { Self { rpc_url_http: reqwest::Url::parse(rpc_url).expect("Invalid RPC URL"), payment_token_address: Address::from_str(payment_token_addr) @@ -56,6 +56,14 @@ pub enum Network { } impl Network { + pub fn new_custom(rpc_url: &str, payment_token_addr: &str, chunk_payments_addr: &str) -> Self { + Self::Custom(CustomNetwork::new( + rpc_url, + payment_token_addr, + chunk_payments_addr, + )) + } + pub fn identifier(&self) -> &str { match self { Network::ArbitrumOne => "arbitrum-one", diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 36501a81a7..0007c17bf4 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -6,6 +6,11 @@ use std::env; pub const EVM_TESTNET_CSV_FILENAME: &str = "evm_testnet_data.csv"; +/// environment variable to connect to a custom EVM network +pub const RPC_URL: &str = "RPC_URL"; +pub const PAYMENT_TOKEN_ADDRESS: &str = "PAYMENT_TOKEN_ADDRESS"; +pub const DATA_PAYMENTS_ADDRESS: &str = "DATA_PAYMENTS_ADDRESS"; + #[derive(thiserror::Error, Debug)] pub enum Error { #[error("Failed to get EVM network")] @@ -24,7 +29,7 @@ pub fn dummy_hash() -> Hash { /// Get the `Network` from environment variables pub fn evm_network_from_env() -> Result { - let evm_vars = ["RPC_URL", "PAYMENT_TOKEN_ADDRESS", "DATA_PAYMENTS_ADDRESS"] + let evm_vars = [RPC_URL, PAYMENT_TOKEN_ADDRESS, DATA_PAYMENTS_ADDRESS] .iter() .map(|var| env::var(var).map_err(|_| Error::FailedToGetEvmNetwork)) .collect::, Error>>(); diff --git a/sn_evm/src/data_payments.rs b/sn_evm/src/data_payments.rs index 3b5ccd6e11..7ac835bbd1 100644 --- a/sn_evm/src/data_payments.rs +++ b/sn_evm/src/data_payments.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{AttoTokens, EvmError}; +use evmlib::common::TxHash; use evmlib::{ common::{Address as RewardsAddress, QuoteHash}, utils::dummy_address, @@ -22,6 +23,22 @@ pub const QUOTE_EXPIRATION_SECS: u64 = 3600; /// The margin allowed for live_time const LIVE_TIME_MARGIN: u64 = 10; +/// The proof of payment for a data payment +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub struct ProofOfPayment { + /// The Quote we're paying for + pub quote: PaymentQuote, + /// The transaction hash + pub tx_hash: TxHash, +} + +impl ProofOfPayment { + pub fn to_peer_id_payee(&self) -> Option { + let pub_key = PublicKey::try_decode_protobuf(&self.quote.pub_key).ok()?; + Some(PeerId::from_public_key(&pub_key)) + } +} + /// Quoting metrics that got used to generate a quote, or to track peer's status. #[derive( Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize, custom_debug::Debug, diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs index 6f8edadb85..dafee4608e 100644 --- a/sn_evm/src/evm.rs +++ b/sn_evm/src/evm.rs @@ -6,25 +6,27 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use evmlib::common::TxHash; -use libp2p::identity::PublicKey; -use libp2p::PeerId; -use serde::{Deserialize, Serialize}; +use crate::EvmNetwork; -use crate::PaymentQuote; +pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; -/// The proof of payment for a data payment -#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub struct ProofOfPayment { - /// The Quote we're paying for - pub quote: PaymentQuote, - /// The transaction hash - pub tx_hash: TxHash, -} +/// Load the evm network from env +pub fn network_from_env() -> EvmNetwork { + let rpc_url = std::env::var(RPC_URL); + let payment_token_address = std::env::var(PAYMENT_TOKEN_ADDRESS); + let data_payments_address = std::env::var(DATA_PAYMENTS_ADDRESS); -impl ProofOfPayment { - pub fn to_peer_id_payee(&self) -> Option { - let pub_key = PublicKey::try_decode_protobuf(&self.quote.pub_key).ok()?; - Some(PeerId::from_public_key(&pub_key)) + match (rpc_url, payment_token_address, data_payments_address) { + // all parameters are custom + (Ok(url), Ok(tok), Ok(pay)) => EvmNetwork::new_custom(&url, &tok, &pay), + // only rpc url is custom + (Ok(url), _, _) => { + let defaults = EvmNetwork::ArbitrumOne; + let tok = defaults.payment_token_address().to_string(); + let pay = defaults.data_payments_address().to_string(); + EvmNetwork::new_custom(&url, &tok, &pay) + } + // default to arbitrum one + _ => EvmNetwork::ArbitrumOne, } } diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index de3a62c1dc..43c7bd2b43 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -13,16 +13,16 @@ pub use evmlib::common::Address as RewardsAddress; pub use evmlib::common::{QuoteHash, TxHash}; pub use evmlib::utils; pub use evmlib::wallet::Wallet as EvmWallet; -pub use evmlib::CustomNetwork as EvmNetworkCustom; pub use evmlib::Network as EvmNetwork; mod amount; mod data_payments; mod error; -mod evm; -pub use data_payments::{PaymentQuote, QuotingMetrics}; -pub use evm::ProofOfPayment; +/// EVM network configuration +pub mod evm; + +pub use data_payments::{PaymentQuote, ProofOfPayment, QuotingMetrics}; /// Types used in the public API pub use amount::{Amount, AttoTokens}; diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index f3888db47c..96a1087183 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -257,7 +257,12 @@ fn main() -> Result<()> { .as_ref() .cloned() .map(|v| v.into()) - .unwrap_or_default(); + .unwrap_or_else(sn_evm::evm::network_from_env); + if matches!(evm_network, EvmNetwork::Custom(_)) { + println!("Using custom EVM network"); + info!("Using custom EVM network {evm_network:?}"); + } + println!("EVM network: {evm_network:?}"); let node_socket_addr = SocketAddr::new(opt.ip, opt.port); let (root_dir, keypair) = get_root_dir_and_keypair(&opt.root_dir)?; diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs index 8c3c87bf77..7c5ec3aa51 100644 --- a/sn_node/src/bin/safenode/subcommands.rs +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -1,5 +1,5 @@ use clap::Subcommand; -use sn_evm::{EvmNetwork, EvmNetworkCustom}; +use sn_evm::EvmNetwork; #[derive(Subcommand, Clone, Debug)] pub(crate) enum EvmNetworkCommand { @@ -31,11 +31,7 @@ impl Into for EvmNetworkCommand { rpc_url, payment_token_address, data_payments_address, - } => EvmNetwork::Custom(EvmNetworkCustom::new( - &rpc_url, - &payment_token_address, - &data_payments_address, - )), + } => EvmNetwork::new_custom(&rpc_url, &payment_token_address, &data_payments_address), } } } diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs index 7556a1bafc..a046609e4e 100644 --- a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -8,7 +8,7 @@ use clap::Subcommand; use color_eyre::eyre::Result; -use sn_evm::{utils::local_evm_network_from_csv, EvmNetwork, EvmNetworkCustom}; +use sn_evm::{utils::local_evm_network_from_csv, EvmNetwork}; #[derive(Subcommand, Clone, Debug)] #[allow(clippy::enum_variant_names)] @@ -49,11 +49,11 @@ impl TryInto for EvmNetworkCommand { rpc_url, payment_token_address, data_payments_address, - } => Ok(EvmNetwork::Custom(EvmNetworkCustom::new( + } => Ok(EvmNetwork::new_custom( &rpc_url, &payment_token_address, &data_payments_address, - ))), + )), } } } diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 4d8ebb1d3d..68d7ba7e56 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -7,11 +7,17 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::add_services::config::PortRange; -#[cfg(feature = "faucet")] -use crate::helpers::get_username; use crate::helpers::{ check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; + +#[cfg(feature = "faucet")] +use crate::helpers::get_username; +#[cfg(feature = "faucet")] +use sn_service_management::FaucetServiceData; +#[cfg(feature = "faucet")] +use sn_transfers::get_faucet_data_dir; + use color_eyre::eyre::OptionExt; use color_eyre::{eyre::eyre, Result}; use colored::Colorize; @@ -20,16 +26,11 @@ use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; use mockall::automock; use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; -#[cfg(feature = "faucet")] -use sn_service_management::FaucetServiceData; use sn_service_management::{ control::ServiceControl, rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; -#[cfg(feature = "faucet")] -use sn_transfers::get_faucet_data_dir; - use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, From e3712165ea1cc1ada15a1ad2d1e533f6f0ebaa9a Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 8 Oct 2024 17:47:43 +0900 Subject: [PATCH 128/255] chore: make all env readers use the same function --- evmlib/src/utils.rs | 3 ++- sn_evm/src/evm.rs | 22 ++++++---------------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 0007c17bf4..285b92be72 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -67,7 +67,8 @@ pub fn local_evm_network_from_csv() -> Result { if !csv_path.exists() { error!("evm data csv path does not exist {:?}", csv_path); - return Err(Error::FailedToGetEvmNetwork); + return Err(Error::FailedToGetEvmNetwork) + .inspect_err(|_| error!("Missing evm testnet CSV file"))?; } let csv = std::fs::read_to_string(&csv_path) diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs index dafee4608e..53290e4035 100644 --- a/sn_evm/src/evm.rs +++ b/sn_evm/src/evm.rs @@ -12,21 +12,11 @@ pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; /// Load the evm network from env pub fn network_from_env() -> EvmNetwork { - let rpc_url = std::env::var(RPC_URL); - let payment_token_address = std::env::var(PAYMENT_TOKEN_ADDRESS); - let data_payments_address = std::env::var(DATA_PAYMENTS_ADDRESS); - - match (rpc_url, payment_token_address, data_payments_address) { - // all parameters are custom - (Ok(url), Ok(tok), Ok(pay)) => EvmNetwork::new_custom(&url, &tok, &pay), - // only rpc url is custom - (Ok(url), _, _) => { - let defaults = EvmNetwork::ArbitrumOne; - let tok = defaults.payment_token_address().to_string(); - let pay = defaults.data_payments_address().to_string(); - EvmNetwork::new_custom(&url, &tok, &pay) - } - // default to arbitrum one - _ => EvmNetwork::ArbitrumOne, + match evmlib::utils::evm_network_from_env() { + Ok(network) => network, + Err(e) => { + warn!("Failed to get EVM network from environment variables, using default: {e}"); + EvmNetwork::default() + }, } } From 3a8eef45bde8c81260003bebacf3af9b57a590fe Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 8 Oct 2024 17:55:32 +0900 Subject: [PATCH 129/255] chore: clippy --- sn_evm/src/evm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs index 53290e4035..13b3ca9500 100644 --- a/sn_evm/src/evm.rs +++ b/sn_evm/src/evm.rs @@ -17,6 +17,6 @@ pub fn network_from_env() -> EvmNetwork { Err(e) => { warn!("Failed to get EVM network from environment variables, using default: {e}"); EvmNetwork::default() - }, + } } } From 2bfb117fff0ccc635602272b2bd73b1833194245 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 2 Oct 2024 17:38:46 +0530 Subject: [PATCH 130/255] feat(ci): get half of e2e action working --- .github/workflows/merge.yml | 185 +++++++++++++++----------------- Cargo.lock | 1 + autonomi/Cargo.toml | 3 +- autonomi/README.md | 4 +- autonomi/tests/common.rs | 24 +++-- autonomi/tests/evm/file.rs | 11 +- autonomi/tests/file.rs | 23 ++-- autonomi/tests/put.rs | 23 +++- autonomi/tests/register.rs | 19 +++- autonomi/tests/wallet.rs | 11 ++ autonomi/tests/wasm.rs | 17 +-- autonomi_cli/src/log_metrics.rs | 40 ------- autonomi_cli/src/main.rs | 31 +++++- sn_logging/src/lib.rs | 4 +- 14 files changed, 218 insertions(+), 178 deletions(-) delete mode 100644 autonomi_cli/src/log_metrics.rs diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 170d2cf632..17d32b5d48 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -150,104 +150,89 @@ jobs: # # we do many more runs on the nightly run # PROPTEST_CASES: 50 - # e2e: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: E2E tests - # runs-on: ${{ matrix.os }} - # strategy: - # matrix: - # include: - # - os: ubuntu-latest - # safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # safe_path: /Users/runner/Library/Application Support/safe - # steps: - # - uses: actions/checkout@v4 - - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 - - # - name: Build binaries - # run: cargo build --release --bin safenode --bin safe - # timeout-minutes: 30 - - # - name: Build faucet binary - # run: cargo build --release --bin faucet --features gifting - # timeout-minutes: 30 - - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ${{ matrix.os }} - # build: true - - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi - - # # only these unit tests require a network, the rest are run above - # - name: Run sn_client --tests - # run: cargo test --package sn_client --release --tests - # env: - # SN_LOG: "all" - # # only set the target dir for windows to bypass the linker issue. - # # happens if we build the node manager via testnet action - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 15 - - # - name: Create and fund a wallet to pay for files storage - # run: | - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + e2e: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: E2E tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + safe_path: /home/runner/.local/share/safe + - os: windows-latest + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + safe_path: /Users/runner/Library/Application Support/safe + steps: + - uses: actions/checkout@v4 - # - name: Start a client to upload cost estimate - # run: ./target/release/safe --log-output-dest=data-dir files estimate "./resources" - # env: - # SN_LOG: "all" - # timeout-minutes: 15 + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 - # - name: Start a client to upload files - # run: ./target/release/safe --log-output-dest=data-dir files upload "./resources" --retry-strategy quick - # env: - # SN_LOG: "all" - # timeout-minutes: 15 + - name: Build binaries + run: cargo build --release --bin safenode --bin autonomi_cli + timeout-minutes: 30 - # - name: Start a client to download files - # run: ./target/release/safe --log-output-dest=data-dir files download --retry-strategy quick - # env: - # SN_LOG: "all" - # timeout-minutes: 2 + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ${{ matrix.os }} + build: true - # # Client FoldersApi tests against local network - # - name: Client FoldersApi tests against local network - # run: cargo test --release --package sn_client --test folders_api - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Check if SAFE_PEERS and EVM_NETWORK are set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi + + # only these unit tests require a network, the rest are run above + - name: Run autonomi --tests + run: cargo test --package autonomi --tests -- --nocapture + env: + SN_LOG: "v" + # only set the target dir for windows to bypass the linker issue. + # happens if we build the node manager via testnet action + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 15 + + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + + - name: Start a client to upload cost estimate + run: ./target/release/autonomi_cli --log-output-dest=data-dir file cost "./resources" + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: Start a client to upload files + run: ./target/release/autonomi_cli --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 15 + + - name: Get address from stdout + run: | + ADDRESS=$(rg "At address: ([0-9a-f]{64})" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$ADDRESS" >> $GITHUB_ENV - # # CLI Acc-Packet files and folders tests against local network - # - name: CLI Acc-Packet files and folders tests - # run: cargo test --release -p sn_cli test_acc_packet -- --nocapture - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Start a client to download files + run: ./target/release/autonomi_cli --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources + env: + SN_LOG: "all" + timeout-minutes: 5 # - name: Start a client to create a register writable by the owner only # run: ./target/release/safe --log-output-dest=data-dir register create -n baobao @@ -331,13 +316,13 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Stop the local network and upload logs - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_e2e - # platform: ${{ matrix.os }} + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: stop + log_file_prefix: safe_test_logs_e2e + platform: ${{ matrix.os }} # spend_test: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" diff --git a/Cargo.lock b/Cargo.lock index c6ca94bec5..e4da458598 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1080,6 +1080,7 @@ dependencies = [ "sn_bls_ckd", "sn_curv", "sn_evm", + "sn_logging", "sn_networking", "sn_peers_acquisition", "sn_protocol", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index d5990e8b53..b4019decae 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -16,7 +16,7 @@ data = [] vault = ["data"] files = ["fs", "data"] fs = ["tokio/fs"] -local = ["sn_networking/local-discovery"] +local-discovery = ["sn_networking/local-discovery"] registers = [] [dependencies] @@ -47,6 +47,7 @@ xor_name = "5.0.0" [dev-dependencies] eyre = "0.6.5" +sn_logging = { path = "../sn_logging", version = "0.2.33" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. diff --git a/autonomi/README.md b/autonomi/README.md index ed89812901..bf5bb7cad7 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -37,7 +37,7 @@ cargo run --bin=safenode-manager --features=local-discovery -- local run --build ```sh $ EVM_NETWORK=local cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local-discovery -- --nocapture ``` ### Using a live testnet or mainnet @@ -57,7 +57,7 @@ cargo run --bin=safenode-manager --features=local-discovery -- local run --build ```sh $ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local-discovery -- --nocapture ``` ## Faucet (local) diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs index 77a057fde2..5d18e07d7b 100644 --- a/autonomi/tests/common.rs +++ b/autonomi/tests/common.rs @@ -1,4 +1,13 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use bytes::Bytes; +use eyre::Result; use libp2p::Multiaddr; use rand::Rng; use sn_peers_acquisition::parse_peer_addr; @@ -23,10 +32,13 @@ pub fn enable_logging() { /// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. /// /// An empty `Vec` will be returned if the env var is not set. -pub fn peers_from_env() -> Result, libp2p::multiaddr::Error> { - let Ok(peers_str) = env::var("SAFE_PEERS") else { - return Ok(vec![]); - }; - - peers_str.split(',').map(parse_peer_addr).collect() +pub fn peers_from_env() -> Result> { + let bootstrap_peers = if cfg!(feature = "local-discovery") { + Ok(vec![]) + } else if let Ok(peers_str) = env::var("SAFE_PEERS") { + peers_str.split(',').map(parse_peer_addr).collect() + } else { + Ok(vec![]) + }?; + Ok(bootstrap_peers) } diff --git a/autonomi/tests/evm/file.rs b/autonomi/tests/evm/file.rs index 5283c775b9..7b16217b97 100644 --- a/autonomi/tests/evm/file.rs +++ b/autonomi/tests/evm/file.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + #[cfg(feature = "evm-payments")] mod test { use autonomi::Client; @@ -9,7 +17,8 @@ mod test { #[tokio::test] async fn file() -> Result<(), Box> { - common::enable_logging(); + let _log_appender_guard = + sn_logging::LogBuilder::init_single_threaded_tokio_test("file", false); let mut client = Client::connect(&[]).await.unwrap(); let mut wallet = get_funded_wallet(); diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index 6b2d58ad9d..bce5ee61a1 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -1,17 +1,28 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + #![cfg(feature = "files")] mod common; use autonomi::Client; +use common::peers_from_env; +use eyre::Result; +use sn_logging::LogBuilder; use std::time::Duration; use test_utils::evm::get_funded_wallet; use tokio::time::sleep; #[tokio::test] -async fn file() -> Result<(), Box> { - common::enable_logging(); +async fn file() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("file", false); - let mut client = Client::connect(&[]).await.unwrap(); + let mut client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); let (root, addr) = client @@ -32,10 +43,10 @@ async fn file() -> Result<(), Box> { #[cfg(feature = "vault")] #[tokio::test] -async fn file_into_vault() -> eyre::Result<()> { - common::enable_logging(); +async fn file_into_vault() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("file", false); - let mut client = Client::connect(&[]).await?; + let mut client = Client::connect(&peers_from_env()?).await?; let mut wallet = get_funded_wallet(); let client_sk = bls::SecretKey::random(); diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index be7f84c025..2989259336 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -1,24 +1,37 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + #![cfg(feature = "data")] mod common; use autonomi::Client; +use common::peers_from_env; +use eyre::Result; +use sn_logging::LogBuilder; use std::time::Duration; use test_utils::evm::get_funded_wallet; use tokio::time::sleep; #[tokio::test] -async fn put() { - common::enable_logging(); +async fn put() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("put", false); - let client = Client::connect(&[]).await.unwrap(); + let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); let data = common::gen_random_data(1024 * 1024 * 10); - let addr = client.put(data.clone(), &wallet).await.unwrap(); + let addr = client.put(data.clone(), &wallet).await?; sleep(Duration::from_secs(10)).await; - let data_fetched = client.get(addr).await.unwrap(); + let data_fetched = client.get(addr).await?; assert_eq!(data, data_fetched, "data fetched should match data put"); + + Ok(()) } diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index f03cf34a4c..1296442553 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -1,19 +1,30 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + #![cfg(feature = "registers")] mod common; use autonomi::Client; use bytes::Bytes; +use common::peers_from_env; +use eyre::Result; use rand::Rng; +use sn_logging::LogBuilder; use std::time::Duration; use test_utils::evm::get_funded_wallet; use tokio::time::sleep; #[tokio::test] -async fn register() { - common::enable_logging(); +async fn register() -> Result<()> { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("register", false); - let client = Client::connect(&[]).await.unwrap(); + let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); // Owner key of the register. @@ -46,4 +57,6 @@ async fn register() { // Fetch and verify the register contains the updated value let register = client.register_get(*register.address()).await.unwrap(); assert_eq!(register.values(), vec![Bytes::from(vec![5, 6, 7, 8])]); + + Ok(()) } diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index d8245c7457..82b84205d5 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -1,9 +1,18 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + mod common; use const_hex::traits::FromHex; use evmlib::common::{Address, Amount}; use evmlib::utils::evm_network_from_env; use evmlib::wallet::Wallet; +use sn_logging::LogBuilder; use test_utils::evm::get_funded_wallet; #[tokio::test] @@ -21,6 +30,8 @@ async fn from_private_key() { #[tokio::test] async fn send_tokens() { + let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("wallet", false); + let network = evm_network_from_env().expect("Could not get EVM network from environment variables"); let wallet = get_funded_wallet(); diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index e1265d8e59..246644b491 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -1,6 +1,15 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use std::time::Duration; use autonomi::Client; +use common::peers_from_env; use test_utils::evm::get_funded_wallet; use tokio::time::sleep; use wasm_bindgen_test::*; @@ -14,13 +23,7 @@ wasm_bindgen_test_configure!(run_in_browser); async fn file() -> Result<(), Box> { common::enable_logging(); - let peers = vec![ - "/ip4/127.0.0.1/tcp/35499/ws/p2p/12D3KooWGN5RqREZ4RYtsUc3DNCkrNSVXEzTYEbMb1AZx2rNddoW" - .try_into() - .expect("str to be valid multiaddr"), - ]; - - let client = Client::connect(&peers).await.unwrap(); + let client = Client::connect(&peers_from_env()?).await.unwrap(); let wallet = get_funded_wallet(); let data = common::gen_random_data(1024 * 1024 * 10); diff --git a/autonomi_cli/src/log_metrics.rs b/autonomi_cli/src/log_metrics.rs deleted file mode 100644 index 9694d799a5..0000000000 --- a/autonomi_cli/src/log_metrics.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use color_eyre::Result; -#[cfg(feature = "metrics")] -use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; - -use crate::opt::Opt; - -pub fn init_logging_and_metrics(opt: &Opt) -> Result<()> { - let logging_targets = vec![ - ("sn_networking".to_string(), Level::INFO), - ("sn_build_info".to_string(), Level::TRACE), - ("autonomi".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_evm".to_string(), Level::TRACE), - ]; - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest.clone()); - log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - let _log_handles = log_builder.initialize()?; - - #[cfg(feature = "metrics")] - std::thread::spawn(|| { - let rt = tokio::runtime::Runtime::new() - .expect("Failed to create tokio runtime to spawn metrics thread"); - rt.spawn(async { - init_metrics(std::process::id()).await; - }); - }); - Ok(()) -} diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index 8e7a9e1a5b..d655b2cf0a 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -12,7 +12,6 @@ extern crate tracing; mod access; mod actions; mod commands; -mod log_metrics; mod opt; pub use access::data_dir; @@ -23,11 +22,14 @@ use clap::Parser; use color_eyre::Result; use opt::Opt; +use sn_logging::{LogBuilder, LogFormat, ReloadHandle, WorkerGuard}; +use tracing::Level; -fn main() -> Result<()> { +#[tokio::main] +async fn main() -> Result<()> { color_eyre::install().expect("Failed to initialise error handler"); let opt = Opt::parse(); - log_metrics::init_logging_and_metrics(&opt).expect("Failed to initialise logging and metrics"); + let _log_guards = init_logging_and_metrics(&opt)?; // Log the full command that was run and the git version info!("\"{}\"", std::env::args().collect::>().join(" ")); @@ -35,6 +37,25 @@ fn main() -> Result<()> { info!("autonomi client built with git version: {version}"); println!("autonomi client built with git version: {version}"); - let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime"); - rt.block_on(commands::handle_subcommand(opt)) + commands::handle_subcommand(opt).await?; + + Ok(()) +} + +fn init_logging_and_metrics(opt: &Opt) -> Result<(ReloadHandle, Option)> { + let logging_targets = vec![ + ("sn_networking".to_string(), Level::INFO), + ("sn_build_info".to_string(), Level::TRACE), + ("autonomi_cli".to_string(), Level::TRACE), + ("sn_logging".to_string(), Level::TRACE), + ("sn_peers_acquisition".to_string(), Level::TRACE), + ("sn_protocol".to_string(), Level::TRACE), + ("sn_registers".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), + ]; + let mut log_builder = LogBuilder::new(logging_targets); + log_builder.output_dest(opt.log_output_dest.clone()); + log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); + let guards = log_builder.initialize()?; + Ok(guards) } diff --git a/sn_logging/src/lib.rs b/sn_logging/src/lib.rs index f88463246f..464581d9e3 100644 --- a/sn_logging/src/lib.rs +++ b/sn_logging/src/lib.rs @@ -17,12 +17,12 @@ use layers::TracingLayers; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use tracing::info; -use tracing_appender::non_blocking::WorkerGuard; use tracing_core::dispatcher::DefaultGuard; use tracing_subscriber::{prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt}; pub use error::Error; pub use layers::ReloadHandle; +pub use tracing_appender::non_blocking::WorkerGuard; // re-exporting the tracing crate's Level as it is used in our public API pub use tracing_core::Level; @@ -268,7 +268,7 @@ impl LogBuilder { let timestamp = chrono::Local::now().format("%Y-%m-%d_%H-%M-%S").to_string(); let path = dir .join("safe") - .join("client") + .join("autonomi_cli") .join("logs") .join(format!("log_{timestamp}")); LogOutputDest::Path(path) From b323fc7a682b56b219913f2598a30fb1f53cca65 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 16:51:17 +0530 Subject: [PATCH 131/255] fix(ci): enable unit tests --- .github/workflows/merge.yml | 85 ++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 48 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 17d32b5d48..11b45b676a 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -92,63 +92,52 @@ jobs: echo "All packages built successfully. Cleaning up..." cargo clean - # unit: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: Unit Tests - # runs-on: ${{ matrix.os }} - # strategy: - # matrix: - # os: [ubuntu-latest, windows-latest, macos-latest] - # steps: - # - uses: actions/checkout@v4 - - # - name: Check we're on the right commit - # run: git log -1 --oneline - - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable + unit: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: Unit Tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 - # - uses: Swatinem/rust-cache@v2 + - name: Check we're on the right commit + run: git log -1 --oneline - # - name: Run CLI tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_cli -- --skip test_acc_packet_ + - name: Install Rust + uses: dtolnay/rust-toolchain@stable - # # We do not run client `--tests` here as they can require a network - # - name: Run client tests - # timeout-minutes: 25 - # run: | - # cargo test --release --package sn_client --lib - # cargo test --release --package sn_client --doc + - uses: Swatinem/rust-cache@v2 - # - name: Run node tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_node --lib + - name: Run node tests + timeout-minutes: 25 + run: cargo test --release --package sn_node --lib - # - name: Run network tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_networking --features="open-metrics" + - name: Run network tests + timeout-minutes: 25 + run: cargo test --release --package sn_networking --features="open-metrics" - # - name: Run protocol tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_protocol + - name: Run protocol tests + timeout-minutes: 25 + run: cargo test --release --package sn_protocol - # - name: Run transfers tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_transfers + - name: Run transfers tests + timeout-minutes: 25 + run: cargo test --release --package sn_transfers - # - name: Run logging tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_logging + - name: Run logging tests + timeout-minutes: 25 + run: cargo test --release --package sn_logging - # - name: Run register tests - # timeout-minutes: 25 - # run: cargo test --release --package sn_registers - # env: - # # this will speed up PR merge flows, while giving us a modicum - # # of proptesting - # # we do many more runs on the nightly run - # PROPTEST_CASES: 50 + - name: Run register tests + timeout-minutes: 25 + run: cargo test --release --package sn_registers + env: + # this will speed up PR merge flows, while giving us a modicum + # of proptesting + # we do many more runs on the nightly run + PROPTEST_CASES: 50 e2e: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" From fa299c86c27a3932eb655e4a62b65a185aa5a2c1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 16:55:52 +0530 Subject: [PATCH 132/255] fix(ci): enable data location test --- .github/workflows/merge.yml | 211 ++++++++++++++++++------------------ 1 file changed, 105 insertions(+), 106 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 11b45b676a..9f8a1b723c 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -655,124 +655,123 @@ jobs: # exit 1 # fi - # verify_data_location_routing_table: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: Verify data location and Routing Table - # runs-on: ${{ matrix.os }} - # strategy: - # matrix: - # include: - # - os: ubuntu-latest - # node_data_path: /home/runner/.local/share/safe/node - # safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - # steps: - # - uses: actions/checkout@v4 - - # - uses: dtolnay/rust-toolchain@stable + verify_data_location_routing_table: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: Verify data location and Routing Table + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + node_data_path: /home/runner/.local/share/safe/node + safe_path: /home/runner/.local/share/safe + - os: windows-latest + node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + node_data_path: /Users/runner/Library/Application Support/safe/node + safe_path: /Users/runner/Library/Application Support/safe + steps: + - uses: actions/checkout@v4 - # - uses: Swatinem/rust-cache@v2 + - uses: dtolnay/rust-toolchain@stable - # - name: Build binaries - # run: cargo build --release --features local-discovery --bin safenode - # timeout-minutes: 30 + - uses: Swatinem/rust-cache@v2 - # - name: Build fuacet binary - # run: cargo build --release --features="local-discovery,gifting" --bin faucet - # timeout-minutes: 30 + - name: Build binaries + run: cargo build --release --features local-discovery --bin safenode + timeout-minutes: 30 - # - name: Build data location and routing table tests - # run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run - # env: - # # only set the target dir for windows to bypass the linker issue. - # # happens if we build the node manager via testnet action - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 30 + - name: Build data location and routing table tests + run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + env: + # only set the target dir for windows to bypass the linker issue. + # happens if we build the node manager via testnet action + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 30 - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ${{ matrix.os }} - # build: true + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ${{ matrix.os }} + build: true - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + - name: Check if SAFE_PEERS and EVM_NETWORK are set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi - # - name: Verify the routing tables of the nodes - # run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture - # env: - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 5 + - name: Verify the routing tables of the nodes + run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + env: + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 5 - # - name: Verify the location of the data on the network - # run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture - # env: - # CHURN_COUNT: 6 - # SN_LOG: "all" - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 25 + - name: Verify the location of the data on the network + run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + env: + CHURN_COUNT: 6 + SN_LOG: "all" + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 25 - # - name: Verify the routing tables of the nodes - # run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture - # env: - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 5 + - name: Verify the routing tables of the nodes + run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + env: + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 5 - # - name: Stop the local network and upload logs - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_data_location - # platform: ${{ matrix.os }} + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: stop + log_file_prefix: safe_test_logs_data_location + platform: ${{ matrix.os }} - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run: | - # restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - # echo "Node dir count is $node_count" + - name: Verify restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restart $restart_count nodes" + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "PeerRemovedFromRoutingTable $peer_removed times" + if [ $peer_removed -lt $restart_count ]; then + echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + exit 1 + fi + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" - # # Only error out after uploading the logs - # - name: Don't log raw data - # if: matrix.os != 'windows-latest' # causes error - # shell: bash - # timeout-minutes: 10 - # run: | - # if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' - # then - # echo "We are logging an extremely large data" - # exit 1 - # fi + # Only error out after uploading the logs + - name: Don't log raw data + if: matrix.os != 'windows-latest' # causes error + shell: bash + timeout-minutes: 10 + run: | + if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' + then + echo "We are logging an extremely large data" + exit 1 + fi # faucet_test: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" From 8b0f8010953a0eba17b452449459a2f69a68e0d4 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 17:29:25 +0530 Subject: [PATCH 133/255] fix(ci): enable register owner test --- .github/workflows/merge.yml | 72 +++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 9f8a1b723c..dd5af911b3 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -214,32 +214,64 @@ jobs: - name: Get address from stdout run: | - ADDRESS=$(rg "At address: ([0-9a-f]{64})" -o -r '$1' ./upload_output) - echo "UPLOAD_ADDRESS=$ADDRESS" >> $GITHUB_ENV + UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV - name: Start a client to download files run: ./target/release/autonomi_cli --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: - SN_LOG: "all" + SN_LOG: "v" timeout-minutes: 5 + + - name: Generate register signing mey + run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key + + - name: Start a client to create a register writable by the owner only + run: ./target/release/autonomi_cli --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 10 - # - name: Start a client to create a register writable by the owner only - # run: ./target/release/safe --log-output-dest=data-dir register create -n baobao - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Get register address from stdout + run: | + REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV - # - name: Start a client to get a register writable by the owner only - # run: ./target/release/safe --log-output-dest=data-dir register get -n baobao - # env: - # SN_LOG: "all" - # timeout-minutes: 2 + - name: Start a client to get a register writable by the owner only + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} > ./register_put_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 - # - name: Start a client to edit a register writable by the owner only - # run: ./target/release/safe --log-output-dest=data-dir register edit -n baobao wood - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: verify value after put + run: | + if [[ $(rg "With value: \[\"123\"\]" ./register_put_output) ]]; then + echo "Register value verified after put" + else + echo "Register value not verified after put" + exit 1 + fi + + - name: Start a client to edit a register writable by the owner only + run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 + env: + SN_LOG: "v" + timeout-minutes: 10 + + - name: Start a client to get a register writable by the owner only + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} > ./register_edit_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 + + - name: Verify value after edit + run: | + if [[ $(rg "With value: \[\"456\"\]" ./register_edit_output) ]]; then + echo "Register value verified after edit" + else + echo "Register value not verified after edit" + exit 1 + fi # # # # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) # # @@ -305,6 +337,10 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 + - name: Setup tmate session + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 + - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@evm-dev From 4dcda6add1260c426599b709e1aeb46766215ff7 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 18:14:33 +0530 Subject: [PATCH 134/255] fix(ci): export env var for windows using pwsh --- .github/workflows/merge.yml | 49 ++++++++++++++++--------------- sn_networking/src/record_store.rs | 2 ++ 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index dd5af911b3..f407dc8946 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -198,7 +198,13 @@ jobs: # FIXME: do this in a generic way for localtestnets - name: export default secret key + if: matrix.os != 'windows-latest' run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Set secret key for Windows + if: matrix.os == 'windows-latest' + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - name: Start a client to upload cost estimate run: ./target/release/autonomi_cli --log-output-dest=data-dir file cost "./resources" @@ -212,10 +218,19 @@ jobs: SN_LOG: "v" timeout-minutes: 15 - - name: Get address from stdout + - name: Get address from stdout (Unix) + if: matrix.os != 'windows-latest' run: | UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: Get address from stdout (Windows) + if: matrix.os == 'windows-latest' + run: | + $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output + echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - name: Start a client to download files run: ./target/release/autonomi_cli --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources @@ -232,10 +247,19 @@ jobs: SN_LOG: "v" timeout-minutes: 10 - - name: Get register address from stdout + - name: Get register address from stdout (Unix) + if: matrix.os != 'windows-latest' run: | REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: Get register address from stdout (Windows) + if: matrix.os == 'windows-latest' + run: | + $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output + echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - name: Start a client to get a register writable by the owner only run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} > ./register_put_output 2>&1 @@ -243,15 +267,6 @@ jobs: SN_LOG: "v" timeout-minutes: 5 - - name: verify value after put - run: | - if [[ $(rg "With value: \[\"123\"\]" ./register_put_output) ]]; then - echo "Register value verified after put" - else - echo "Register value not verified after put" - exit 1 - fi - - name: Start a client to edit a register writable by the owner only run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 env: @@ -264,14 +279,6 @@ jobs: SN_LOG: "v" timeout-minutes: 5 - - name: Verify value after edit - run: | - if [[ $(rg "With value: \[\"456\"\]" ./register_edit_output) ]]; then - echo "Register value verified after edit" - else - echo "Register value not verified after edit" - exit 1 - fi # # # # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) # # @@ -337,10 +344,6 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - - name: Setup tmate session - if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3 - - name: Stop the local network and upload logs if: always() uses: maidsafe/sn-local-testnet-action@evm-dev diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 39eaf13320..d65a97acde 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -1586,6 +1586,8 @@ mod tests { payments_received: AtomicUsize, } + // takes a long time to run + #[ignore] #[test] fn address_distribution_sim() { use rayon::prelude::*; From c53d2c4681941620ffa03b21a97f6c5e3533a3aa Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Tue, 8 Oct 2024 17:26:43 +0200 Subject: [PATCH 135/255] chore(global): remove alloy patch --- Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 291761495b..c34946d706 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,6 +59,3 @@ pre-release-commit-message = "chore(release): release commit, tags, deps and cha publish = false push = false tag = false - -[patch.crates-io] -alloy = { git = 'https://github.com/b-zee/alloy.git', branch = "fix-rpc-client-sleep-call" } From af5361d0e633d32bffaf87ad5c20fd8f63f1245b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 25 Sep 2024 15:43:49 +0900 Subject: [PATCH 136/255] fix(node): fix payment extraction scratchpad process --- sn_node/src/put_validation.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index a93158a18f..9124f314ef 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -117,7 +117,7 @@ impl Node { // So that when the replicate target asking for the copy, // the node can have a higher chance to respond. let store_scratchpad_result = self - .validate_and_store_scratchpad_record(record, true) + .validate_and_store_scratchpad_record(scratchpad, record_key.clone(), true) .await; if store_scratchpad_result.is_ok() { @@ -288,7 +288,9 @@ impl Node { self.store_chunk(&chunk) } RecordKind::Scratchpad => { - self.validate_and_store_scratchpad_record(record, false) + let key = record.key.clone(); + let scratchpad = try_deserialize_record::(&record)?; + self.validate_and_store_scratchpad_record(scratchpad, key, false) .await } RecordKind::Spend => { @@ -388,13 +390,10 @@ impl Node { pub(crate) async fn validate_and_store_scratchpad_record( &self, - record: Record, + scratchpad: Scratchpad, + record_key: RecordKey, is_client_put: bool, ) -> Result<()> { - let record_key = record.key.clone(); - - let scratchpad = try_deserialize_record::(&record)?; - // owner PK is defined herein, so as long as record key and this match, we're good let addr = scratchpad.address(); debug!("Validating and storing scratchpad {addr:?}"); @@ -425,14 +424,21 @@ impl Node { "Storing sratchpad {addr:?} with content of {:?} as Record locally", scratchpad.encrypted_data_hash() ); + + let record = Record { + key: scratchpad_key.clone(), + value: try_serialize_record(&scratchpad, RecordKind::Scratchpad)?.to_vec(), + publisher: None, + expires: None, + }; self.network().put_local_record(record); - let pretty_key = PrettyPrintRecordKey::from(&record_key); + let pretty_key = PrettyPrintRecordKey::from(&scratchpad_key); self.record_metrics(Marker::ValidScratchpadRecordPutFromNetwork(&pretty_key)); if is_client_put { - self.replicate_valid_fresh_record(record_key, RecordType::Scratchpad); + self.replicate_valid_fresh_record(scratchpad_key, RecordType::Scratchpad); } Ok(()) From ab6b6c5b6b1f03359370c16b8e4349c95ed6fc68 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 4 Sep 2024 09:42:13 +0900 Subject: [PATCH 137/255] feat(node): encrypt records by default --- sn_node/Cargo.toml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 4155b2c6c4..47d7d351e0 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -14,7 +14,7 @@ name = "safenode" path = "src/bin/safenode/main.rs" [features] -default = ["metrics", "upnp", "open-metrics"] +default = ["metrics", "upnp", "open-metrics", "encrypt-records"] local-discovery = ["sn_networking/local-discovery"] otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] @@ -82,9 +82,7 @@ color-eyre = "0.6.2" [dev-dependencies] evmlib = { path = "../evmlib", version = "0.1" } -autonomi = { path = "../autonomi", version = "0.1.0", features = [ - "registers", -] } +autonomi = { path = "../autonomi", version = "0.1.0", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } From a298c63ccb90b83642383cc41712f02a78cb93b7 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 9 Oct 2024 09:02:59 +0200 Subject: [PATCH 138/255] fix(global): fix Cargo.lock --- Cargo.lock | 61 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d780cb0ea..607aee2bba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,8 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -151,7 +152,8 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +168,8 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -237,7 +240,8 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -254,7 +258,8 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -276,7 +281,8 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -289,7 +295,8 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,7 +316,8 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -321,7 +329,8 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -364,7 +373,8 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" dependencies = [ "alloy-chains", "alloy-consensus", @@ -377,7 +387,6 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", - "alloy-signer", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -396,7 +405,6 @@ dependencies = [ "tokio", "tracing", "url", - "wasmtimer", ] [[package]] @@ -424,7 +432,8 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -440,13 +449,13 @@ dependencies = [ "tower 0.5.1", "tracing", "url", - "wasmtimer", ] [[package]] name = "alloy-rpc-types" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -458,7 +467,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" dependencies = [ "alloy-primitives", "alloy-serde", @@ -468,7 +478,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" dependencies = [ "alloy-consensus", "alloy-eips", @@ -486,7 +497,8 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" dependencies = [ "alloy-primitives", "serde", @@ -496,7 +508,8 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" dependencies = [ "alloy-primitives", "async-trait", @@ -509,7 +522,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" dependencies = [ "alloy-consensus", "alloy-network", @@ -597,7 +611,8 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -611,13 +626,13 @@ dependencies = [ "tracing", "url", "wasm-bindgen-futures", - "wasmtimer", ] [[package]] name = "alloy-transport-http" version = "0.4.2" -source = "git+https://github.com/b-zee/alloy.git?branch=fix-rpc-client-sleep-call#5a013fbb7ef55c31d4b42ed8a4314cba98ce9ec1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", From dc7f277a10e4172436bf71b0435e2751155e2a2c Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 21:07:23 +0530 Subject: [PATCH 139/255] feat(autonomi): expose public writeable registers --- autonomi/src/client/registers.rs | 23 ++++++++++++++-- autonomi_cli/src/commands.rs | 11 +++++--- autonomi_cli/src/commands/register.rs | 38 ++++++++++++++++++++------- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 6a52fd8820..60cb10c4b3 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -13,7 +13,7 @@ use sn_evm::AttoTokens; use sn_networking::GetRecordError; use sn_networking::VerificationKind; use sn_protocol::storage::RetryStrategy; -pub use sn_registers::RegisterAddress; +pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; use tracing::warn; use crate::client::data::PayError; @@ -222,18 +222,37 @@ impl Client { } /// Creates a new Register with a name and an initial value and uploads it to the network. + /// + /// The Register is created with the owner as the only writer. pub async fn register_create( &self, value: Bytes, name: &str, owner: RegisterSecretKey, wallet: &Wallet, + ) -> Result { + let pk = owner.public_key(); + let permissions = Permissions::new_with([pk]); + + self.register_create_with_permissions(value, name, owner, permissions, wallet) + .await + } + + /// Creates a new Register with a name and an initial value and uploads it to the network. + /// + /// Unlike `register_create`, this function allows you to specify the permissions for the register. + pub async fn register_create_with_permissions( + &self, + value: Bytes, + name: &str, + owner: RegisterSecretKey, + permissions: RegisterPermissions, + wallet: &Wallet, ) -> Result { let pk = owner.public_key(); let name = XorName::from_content_parts(&[name.as_bytes()]); // Owner can write to the register. - let permissions = Permissions::new_with([pk]); let mut register = ClientRegister::new(pk, name, permissions); let address = NetworkAddress::from_register_address(*register.address()); diff --git a/autonomi_cli/src/commands.rs b/autonomi_cli/src/commands.rs index a3bd5064a9..bb718df43a 100644 --- a/autonomi_cli/src/commands.rs +++ b/autonomi_cli/src/commands.rs @@ -84,6 +84,9 @@ pub enum RegisterCmd { name: String, /// The value to store in the register. value: String, + /// Create the register with public write access. + #[arg(long, default_value = "false")] + public: bool, }, /// Edit an existing register. @@ -142,9 +145,11 @@ pub async fn handle_subcommand(opt: Opt) -> Result<()> { SubCmd::Register { command } => match command { RegisterCmd::GenerateKey { overwrite } => register::generate_key(overwrite), RegisterCmd::Cost { name } => register::cost(&name, peers.await?).await, - RegisterCmd::Create { name, value } => { - register::create(&name, &value, peers.await?).await - } + RegisterCmd::Create { + name, + value, + public, + } => register::create(&name, &value, public, peers.await?).await, RegisterCmd::Edit { address, name, diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index f1df7660c6..3b3c84505c 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::client::registers::RegisterAddress; +use autonomi::client::registers::RegisterPermissions; use autonomi::client::registers::RegisterSecretKey; use autonomi::Multiaddr; use color_eyre::eyre::eyre; @@ -44,22 +45,39 @@ pub async fn cost(name: &str, peers: Vec) -> Result<()> { Ok(()) } -pub async fn create(name: &str, value: &str, peers: Vec) -> Result<()> { +pub async fn create(name: &str, value: &str, public: bool, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; let client = crate::actions::connect_to_network(peers).await?; println!("Creating register with name: {name}"); - let register = client - .register_create( - value.as_bytes().to_vec().into(), - name, - register_key, - &wallet, - ) - .await - .wrap_err("Failed to create register")?; + let register = if public { + println!("With public write access"); + let permissions = RegisterPermissions::new_anyone_can_write(); + client + .register_create_with_permissions( + value.as_bytes().to_vec().into(), + name, + register_key, + permissions, + &wallet, + ) + .await + .wrap_err("Failed to create register")? + } else { + println!("With private write access"); + client + .register_create( + value.as_bytes().to_vec().into(), + name, + register_key, + &wallet, + ) + .await + .wrap_err("Failed to create register")? + }; + let address = register.address(); println!("✅ Register created at address: {address}"); From ed388aa9a6bfd3ea58e4b7050bbe0ed90654ea5a Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 21:24:47 +0530 Subject: [PATCH 140/255] fix(ci): enable public register e2e test --- .github/workflows/merge.yml | 136 +++++++++++++++++------------------- 1 file changed, 64 insertions(+), 72 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index f407dc8946..02d33a89ba 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -151,7 +151,7 @@ jobs: - os: windows-latest safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - os: macos-latest - safe_path: /Users/runner/Library/Application Support/safe + safe_path: /Users/runner/Library/Application\ Support/safe steps: - uses: actions/checkout@v4 @@ -206,143 +206,135 @@ jobs: run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append shell: pwsh - - name: Start a client to upload cost estimate + - name: Get file cost run: ./target/release/autonomi_cli --log-output-dest=data-dir file cost "./resources" env: SN_LOG: "v" timeout-minutes: 15 - - name: Start a client to upload files + - name: File upload run: ./target/release/autonomi_cli --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 env: SN_LOG: "v" timeout-minutes: 15 - - name: Get address from stdout (Unix) + - name: parse address (unix) if: matrix.os != 'windows-latest' run: | UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV shell: bash - - name: Get address from stdout (Windows) + - name: parse address (win) if: matrix.os == 'windows-latest' run: | $UPLOAD_ADDRESS = rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append shell: pwsh - - name: Start a client to download files + - name: File Download run: ./target/release/autonomi_cli --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: SN_LOG: "v" timeout-minutes: 5 - - name: Generate register signing mey + - name: Generate register signing key run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key - - name: Start a client to create a register writable by the owner only + - name: Create register (writeable by owner) run: ./target/release/autonomi_cli --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: SN_LOG: "v" timeout-minutes: 10 - - name: Get register address from stdout (Unix) + - name: parse register address (unix) if: matrix.os != 'windows-latest' run: | REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV shell: bash - - name: Get register address from stdout (Windows) + - name: parse register address (win) if: matrix.os == 'windows-latest' run: | $REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append shell: pwsh - - name: Start a client to get a register writable by the owner only - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} > ./register_put_output 2>&1 + - name: Get register + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 5 - - name: Start a client to edit a register writable by the owner only + - name: Edit register run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 env: SN_LOG: "v" timeout-minutes: 10 - - name: Start a client to get a register writable by the owner only - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} > ./register_edit_output 2>&1 + - name: Get register (after edit) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 5 - # # - # # Next two steps are same with a slight difference in the way they write to the output file (GITHUB_OUTPUT vs ENV:GITHUB_OUTPUT) - # # - # - name: Start a client to create a register writable by anyone - # id: register-address - # if: matrix.os != 'windows-latest' - # run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $GITHUB_OUTPUT - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Create Public Register (writeable by anyone) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 + env: + SN_LOG: "v" + timeout-minutes: 5 - # - name: Start a client to create a register writable by anyone - # id: register-address-windows - # if: matrix.os == 'windows-latest' - # run: echo "$(./target/release/safe --log-output-dest=data-dir register create -n trycatch -p | rg REGISTER_ADDRESS )" >> $ENV:GITHUB_OUTPUT - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: parse public register address (unix) + if: matrix.os != 'windows-latest' + run: | + PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output) + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV + shell: bash + + - name: parse public register address (win) + if: matrix.os == 'windows-latest' + run: | + $PUBLIC_REGISTER_ADDRESS = rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output + echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + shell: pwsh - # - name: Start a client to get a register writable by anyone (current client is the owner) - # run: ./target/release/safe --log-output-dest=data-dir register get -n trycatch - # env: - # SN_LOG: "all" - # timeout-minutes: 2 + - name: Get Public Register (current key is the owner) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + env: + SN_LOG: "v" + timeout-minutes: 5 - # - name: Start a client to edit a register writable by anyone (current client is the owner) - # run: ./target/release/safe --log-output-dest=data-dir register edit -n trycatch wood - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Edit Public Register (current key is the owner) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 + env: + SN_LOG: "v" + timeout-minutes: 10 - # - name: Delete client subdir to generate new client - # shell: bash - # run: rm -rf ${{ matrix.safe_path }}/client - # # - # # Next four steps are same with a slight difference in the which output step they read from - # # - # - name: Start a client to get a register writable by anyone (new client is not the owner) - # if: matrix.os != 'windows-latest' - # run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address.outputs.REGISTER_ADDRESS }} - # env: - # SN_LOG: "all" - # timeout-minutes: 2 + - name: Delete current register signing key + shell: bash + run: rm -rf ${{ matrix.safe_path }}/client - # - name: Start a client to edit a register writable by anyone (new client is not the owner) - # if: matrix.os != 'windows-latest' - # run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address.outputs.REGISTER_ADDRESS }} water - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Generate new register signing key + run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key - # - name: Start a client to get a register writable by anyone (new client is not the owner) - # if: matrix.os == 'windows-latest' - # run: ./target/release/safe --log-output-dest=data-dir register get ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} - # env: - # SN_LOG: "all" - # timeout-minutes: 2 + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + env: + SN_LOG: "v" + timeout-minutes: 2 - # - name: Start a client to edit a register writable by anyone (new client is not the owner) - # if: matrix.os == 'windows-latest' - # run: ./target/release/safe --log-output-dest=data-dir register edit ${{ steps.register-address-windows.outputs.REGISTER_ADDRESS }} water - # env: - # SN_LOG: "all" - # timeout-minutes: 10 + - name: Edit Public Register (new signing key is not the owner) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 + env: + SN_LOG: "v" + timeout-minutes: 10 + + - name: Get Public Register (new signing key is not the owner) + run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + env: + SN_LOG: "v" + timeout-minutes: 2 - name: Stop the local network and upload logs if: always() From 803ffcfbe2294a1a82b84baaae95b5061ed602c1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 8 Oct 2024 21:26:39 +0530 Subject: [PATCH 141/255] feat(ci): enable merge queue for evm-dev branch --- .github/workflows/merge.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 02d33a89ba..45fa0682da 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -5,7 +5,7 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [main, alpha*, beta*, rc*] + branches: [main, evm-dev] pull_request: branches: ["*"] From 866f147bc14928fdc1c4e86d990a41d1f66108f1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 9 Oct 2024 15:07:24 +0530 Subject: [PATCH 142/255] refactor: move common utility functions to test_utils --- Cargo.lock | 3 ++ autonomi/Cargo.toml | 3 +- autonomi/tests/common.rs | 81 -------------------------------------- autonomi/tests/file.rs | 5 +-- autonomi/tests/put.rs | 7 +--- autonomi/tests/register.rs | 5 +-- autonomi/tests/wallet.rs | 2 - autonomi/tests/wasm.rs | 28 +++++++++---- test_utils/Cargo.toml | 5 +++ test_utils/src/lib.rs | 27 +++++++++++++ 10 files changed, 60 insertions(+), 106 deletions(-) delete mode 100644 autonomi/tests/common.rs diff --git a/Cargo.lock b/Cargo.lock index 607aee2bba..7d1738c96a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8764,12 +8764,15 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" name = "test_utils" version = "0.4.6" dependencies = [ + "bytes", "color-eyre", "dirs-next", "evmlib", "libp2p 0.54.1", + "rand 0.8.5", "serde", "serde_json", + "sn_peers_acquisition", ] [[package]] diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 34ee3a971a..416e580bd2 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -16,7 +16,7 @@ data = [] vault = ["data"] files = ["data"] fs = ["tokio/fs", "files"] -local-discovery = ["sn_networking/local-discovery"] +local-discovery = ["sn_networking/local-discovery", "test_utils/local-discovery"] registers = [] [dependencies] @@ -34,7 +34,6 @@ rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } sn_networking = { path = "../sn_networking", version = "0.18.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } sn_protocol = { version = "0.17.10", path = "../sn_protocol" } sn_registers = { path = "../sn_registers", version = "0.3.20" } sn_transfers = { path = "../sn_transfers", version = "0.19.2" } diff --git a/autonomi/tests/common.rs b/autonomi/tests/common.rs deleted file mode 100644 index 1a1be73a51..0000000000 --- a/autonomi/tests/common.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::env; - -use bytes::Bytes; -use eyre::Result; -use libp2p::Multiaddr; -use rand::Rng; -use sn_peers_acquisition::parse_peer_addr; - -#[allow(dead_code)] -pub fn gen_random_data(len: usize) -> Bytes { - let mut data = vec![0u8; len]; - rand::thread_rng().fill(&mut data[..]); - Bytes::from(data) -} - -#[allow(dead_code)] -/// Enable logging for tests. E.g. use `RUST_LOG=autonomi` to see logs. -pub fn enable_logging() { - let _ = tracing_subscriber::fmt() - .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .try_init(); -} - -#[cfg(target_arch = "wasm32")] -#[allow(dead_code)] -pub fn enable_logging_wasm(directive: impl AsRef) { - use tracing_subscriber::prelude::*; - - console_error_panic_hook::set_once(); - - let fmt_layer = tracing_subscriber::fmt::layer() - .with_ansi(false) // Only partially supported across browsers - .without_time() // std::time is not available in browsers - .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console - tracing_subscriber::registry() - .with(fmt_layer) - .with(tracing_subscriber::EnvFilter::new(directive)) - .init(); -} - -/// Get peers from `SAFE_PEERS` environment variable, first from runtime, then compile-time. -/// If no peers are found and `local` is not enabled, this will panic. Otherwise, it will return an empty list. -#[allow(dead_code)] -pub fn peers_from_run_or_compile_time_env( -) -> Result, libp2p::multiaddr::Error> { - let peers_str = env::var("SAFE_PEERS") - .ok() - .or_else(|| option_env!("SAFE_PEERS").map(|s| s.to_string())); - - let Some(peers_str) = peers_str else { - #[cfg(not(feature = "local-discovery"))] - panic!("SAFE_PEERS environment variable not set and `local` feature is not enabled"); - #[cfg(feature = "local-discovery")] - return Ok(vec![]); - }; - - peers_str.split(',').map(parse_peer_addr).collect() -} - -/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. -/// -/// An empty `Vec` will be returned if the env var is not set. -#[allow(dead_code)] -pub fn peers_from_env() -> Result> { - let bootstrap_peers = if cfg!(feature = "local-discovery") { - Ok(vec![]) - } else if let Ok(peers_str) = env::var("SAFE_PEERS") { - peers_str.split(',').map(parse_peer_addr).collect() - } else { - Ok(vec![]) - }?; - Ok(bootstrap_peers) -} diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index a4aa247339..cae3eedbcc 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -8,14 +8,11 @@ #![cfg(all(feature = "files", feature = "fs"))] -mod common; - use autonomi::Client; -use common::peers_from_env; use eyre::Result; use sn_logging::LogBuilder; use std::time::Duration; -use test_utils::evm::get_funded_wallet; +use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; #[tokio::test] diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index 2989259336..5c0163a507 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -8,14 +8,11 @@ #![cfg(feature = "data")] -mod common; - use autonomi::Client; -use common::peers_from_env; use eyre::Result; use sn_logging::LogBuilder; use std::time::Duration; -use test_utils::evm::get_funded_wallet; +use test_utils::{evm::get_funded_wallet, gen_random_data, peers_from_env}; use tokio::time::sleep; #[tokio::test] @@ -24,7 +21,7 @@ async fn put() -> Result<()> { let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); - let data = common::gen_random_data(1024 * 1024 * 10); + let data = gen_random_data(1024 * 1024 * 10); let addr = client.put(data.clone(), &wallet).await?; diff --git a/autonomi/tests/register.rs b/autonomi/tests/register.rs index 1296442553..bf88f831d8 100644 --- a/autonomi/tests/register.rs +++ b/autonomi/tests/register.rs @@ -8,16 +8,13 @@ #![cfg(feature = "registers")] -mod common; - use autonomi::Client; use bytes::Bytes; -use common::peers_from_env; use eyre::Result; use rand::Rng; use sn_logging::LogBuilder; use std::time::Duration; -use test_utils::evm::get_funded_wallet; +use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; #[tokio::test] diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index 82b84205d5..502afb6be0 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -6,8 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -mod common; - use const_hex::traits::FromHex; use evmlib::common::{Address, Amount}; use evmlib::utils::evm_network_from_env; diff --git a/autonomi/tests/wasm.rs b/autonomi/tests/wasm.rs index b4dae36f8f..485193ea48 100644 --- a/autonomi/tests/wasm.rs +++ b/autonomi/tests/wasm.rs @@ -12,22 +12,19 @@ use std::time::Duration; use autonomi::Client; use sn_networking::target_arch::sleep; +use test_utils::{evm::get_funded_wallet, gen_random_data, peers_from_env}; use wasm_bindgen_test::*; -mod common; - wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] async fn put() -> Result<(), Box> { - common::enable_logging_wasm("sn_networking,autonomi,wasm"); + enable_logging_wasm("sn_networking,autonomi,wasm"); - let client = Client::connect(&common::peers_from_run_or_compile_time_env()?) - .await - .unwrap(); - let wallet = test_utils::evm::get_funded_wallet(); + let client = Client::connect(&peers_from_env()?).await.unwrap(); + let wallet = get_funded_wallet(); - let data = common::gen_random_data(1024 * 1024 * 2); // 2MiB + let data = gen_random_data(1024 * 1024 * 2); // 2MiB let addr = client.put(data.clone(), &wallet).await.unwrap(); sleep(Duration::from_secs(2)).await; @@ -37,3 +34,18 @@ async fn put() -> Result<(), Box> { Ok(()) } + +fn enable_logging_wasm(directive: impl AsRef) { + use tracing_subscriber::prelude::*; + + console_error_panic_hook::set_once(); + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) // Only partially supported across browsers + .without_time() // std::time is not available in browsers + .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console + tracing_subscriber::registry() + .with(fmt_layer) + .with(tracing_subscriber::EnvFilter::new(directive)) + .init(); +} diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index b84708a395..ca708ccc85 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -9,11 +9,16 @@ readme = "README.md" repository = "https://github.com/maidsafe/safe_network" version = "0.4.6" +[features] +local-discovery = ["sn_peers_acquisition/local-discovery"] [dependencies] +bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" evmlib = { path = "../evmlib", version = "0.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } +rand = "0.8.5" serde = { version = "1.0.133", features = [ "derive"]} serde_json = "1.0" +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } \ No newline at end of file diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index e2ddf72f2f..75a3276071 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -8,3 +8,30 @@ pub mod evm; pub mod testnet; + +use bytes::Bytes; +use color_eyre::eyre::Result; +use libp2p::Multiaddr; +use rand::Rng; +use sn_peers_acquisition::parse_peer_addr; + +/// Generate random data of the given length. +pub fn gen_random_data(len: usize) -> Bytes { + let mut data = vec![0u8; len]; + rand::thread_rng().fill(&mut data[..]); + Bytes::from(data) +} + +/// Parse the `SAFE_PEERS` env var into a list of Multiaddrs. +/// +/// An empty `Vec` will be returned if the env var is not set or if local discovery is enabled. +pub fn peers_from_env() -> Result> { + let bootstrap_peers = if cfg!(feature = "local-discovery") { + Ok(vec![]) + } else if let Ok(peers_str) = std::env::var("SAFE_PEERS") { + peers_str.split(',').map(parse_peer_addr).collect() + } else { + Ok(vec![]) + }?; + Ok(bootstrap_peers) +} From 98d0e15bfba779dba29ff13a75aedb2383af0e12 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 9 Oct 2024 16:39:45 +0530 Subject: [PATCH 143/255] fix(test): get data with churn test running --- autonomi/src/client/registers.rs | 4 +- autonomi_cli/src/commands/register.rs | 5 +- evmlib/src/wallet.rs | 1 + sn_node/Cargo.toml | 2 +- sn_node/tests/common/client.rs | 15 +- sn_node/tests/data_with_churn.rs | 1148 +++++++++++-------------- test_utils/src/evm.rs | 5 +- 7 files changed, 518 insertions(+), 662 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 60cb10c4b3..c2b3ed6fd2 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -81,7 +81,7 @@ impl Register { impl Client { /// Generate a new register key - pub fn register_generate_key(&self) -> RegisterSecretKey { + pub fn register_generate_key() -> RegisterSecretKey { RegisterSecretKey::random() } @@ -215,7 +215,7 @@ impl Client { } /// Get the address of a register from its name and owner - pub fn register_address(&self, name: &str, owner: &RegisterSecretKey) -> RegisterAddress { + pub fn register_address(name: &str, owner: &RegisterSecretKey) -> RegisterAddress { let pk = owner.public_key(); let name = XorName::from_content_parts(&[name.as_bytes()]); RegisterAddress::new(name, pk) diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index 3b3c84505c..e672a1cc41 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -9,6 +9,7 @@ use autonomi::client::registers::RegisterAddress; use autonomi::client::registers::RegisterPermissions; use autonomi::client::registers::RegisterSecretKey; +use autonomi::Client; use autonomi::Multiaddr; use color_eyre::eyre::eyre; use color_eyre::eyre::Context; @@ -92,7 +93,7 @@ pub async fn edit(address: String, name: bool, value: &str, peers: Vec) -> Result<( let client = crate::actions::connect_to_network(peers).await?; let address = if name { - client.register_address(&address, ®ister_key) + Client::register_address(&address, ®ister_key) } else { RegisterAddress::from_hex(&address) .wrap_err(format!("Failed to parse register address: {address}")) diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 5982de0f0c..18bebd541d 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -27,6 +27,7 @@ pub enum Error { ChunkPaymentsContract(#[from] data_payments::error::Error), } +#[derive(Clone)] pub struct Wallet { wallet: EthereumWallet, network: Network, diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 47d7d351e0..144a7b86fe 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -15,7 +15,7 @@ path = "src/bin/safenode/main.rs" [features] default = ["metrics", "upnp", "open-metrics", "encrypt-records"] -local-discovery = ["sn_networking/local-discovery"] +local-discovery = ["sn_networking/local-discovery", "test_utils/local-discovery"] otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index 849da14332..106a13567e 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -8,12 +8,11 @@ use autonomi::Client; use eyre::Result; -use sn_peers_acquisition::parse_peer_addr; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; use sn_service_management::{get_local_node_registry_path, NodeRegistry}; use std::{net::SocketAddr, path::Path}; -use test_utils::evm::get_funded_wallet; use test_utils::testnet::DeploymentInventory; +use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::sync::Mutex; use tonic::Request; use tracing::{debug, info}; @@ -127,17 +126,7 @@ pub struct LocalNetwork; impl LocalNetwork { /// Get a new Client for testing pub async fn get_client() -> Client { - let bootstrap_peers = if !cfg!(feature = "local-discovery") { - match std::env::var("SAFE_PEERS") { - Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => vec![peer], - Err(err) => panic!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), - }, - Err(err) => panic!("Can't get env var SAFE_PEERS with error {err:?}"), - } - } else { - vec![] - }; + let bootstrap_peers = peers_from_env().expect("Failed to get bootstrap peers from env"); println!("Client bootstrap with peer {bootstrap_peers:?}"); info!("Client bootstrap with peer {bootstrap_peers:?}"); diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 64d014f5dc..ac8caf8b66 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -1,643 +1,505 @@ -// // Copyright 2024 MaidSafe.net limited. -// // -// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// // KIND, either express or implied. Please review the Licences for the specific language governing -// // permissions and limitations relating to use of the SAFE Network Software. - -// mod common; - -// use crate::common::{ -// client::{add_funds_to_wallet, get_client_and_funded_wallet, get_node_count, get_wallet}, -// NodeRestart, -// }; -// use assert_fs::TempDir; -// use eyre::{bail, eyre, Result}; -// use rand::{rngs::OsRng, Rng}; -// // TODO: Update `autonomi` to have relevant types here -// // use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; -// use sn_logging::LogBuilder; -// use sn_protocol::{ -// storage::{ChunkAddress, RegisterAddress, SpendAddress}, -// NetworkAddress, -// }; -// use sn_registers::Permissions; -// use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; -// use std::{ -// collections::{BTreeMap, VecDeque}, -// fmt, -// fs::{create_dir_all, File}, -// io::Write, -// path::{Path, PathBuf}, -// sync::Arc, -// time::{Duration, Instant}, -// }; -// use tempfile::tempdir; -// use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; -// use tracing::{debug, error, info, trace, warn}; -// use xor_name::XorName; - -// const EXTRA_CHURN_COUNT: u32 = 5; -// const CHURN_CYCLES: u32 = 2; -// const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; -// const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; -// const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; - -// const CHUNKS_SIZE: usize = 1024 * 1024; - -// const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; -// const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; - -// // Default total amount of time we run the checks for before reporting the outcome. -// // It can be overriden by setting the 'TEST_DURATION_MINS' env var. -// const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr - -// type ContentList = Arc>>; -// type CashNoteMap = Arc>>; - -// struct ContentError { -// net_addr: NetworkAddress, -// attempts: u8, -// last_err: Error, -// } - -// impl fmt::Debug for ContentError { -// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { -// write!( -// f, -// "{:?}, attempts: {}, last error: {:?}", -// self.net_addr, self.attempts, self.last_err -// ) -// } -// } - -// type ContentErredList = Arc>>; - -// #[tokio::test(flavor = "multi_thread")] -// async fn data_availability_during_churn() -> Result<()> { -// let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); - -// let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { -// Duration::from_secs(60 * str.parse::()?) -// } else { -// TEST_DURATION -// }; -// let node_count = get_node_count(); - -// let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { -// println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); -// info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); -// let cycles = str.parse::()?; -// test_duration / cycles -// } else { -// // Ensure at least some nodes got churned twice. -// test_duration -// / std::cmp::max( -// CHURN_CYCLES * node_count as u32, -// node_count as u32 + EXTRA_CHURN_COUNT, -// ) -// }; -// println!("Nodes will churn every {churn_period:?}"); -// info!("Nodes will churn every {churn_period:?}"); - -// // Create a cross thread usize for tracking churned nodes -// let churn_count = Arc::new(RwLock::new(0_usize)); - -// // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. -// // Default to be not carry out chunks only during churn. -// let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); - -// println!( -// "Running this test for {test_duration:?}{}...", -// if chunks_only { " (Chunks only)" } else { "" } -// ); -// info!( -// "Running this test for {test_duration:?}{}...", -// if chunks_only { " (Chunks only)" } else { "" } -// ); - -// // The testnet will create a `faucet` at last. To avoid mess up with that, -// // wait for a while to ensure the spends of that got settled. -// sleep(std::time::Duration::from_secs(10)).await; - -// info!("Creating a client and paying wallet..."); -// let paying_wallet_dir = TempDir::new()?; -// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - -// // Waiting for the paying_wallet funded. -// sleep(std::time::Duration::from_secs(10)).await; - -// info!( -// "Client and paying_wallet created with signing key: {:?}", -// client.signer_pk() -// ); - -// // Shared bucket where we keep track of content created/stored on the network -// let content = ContentList::default(); - -// // Shared bucket where we keep track of CashNotes created/stored on the network -// let cash_notes = CashNoteMap::default(); - -// // Spawn a task to create Registers and CashNotes at random locations, -// // at a higher frequency than the churning events -// if !chunks_only { -// info!("Creating transfer wallet taking balance from the payment wallet"); -// let transfers_wallet_dir = TempDir::new()?; -// let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; -// info!("Transfer wallet created"); - -// // Waiting for the transfers_wallet funded. -// sleep(std::time::Duration::from_secs(10)).await; - -// create_registers_task( -// client.clone(), -// Arc::clone(&content), -// churn_period, -// paying_wallet_dir.path().to_path_buf(), -// ); - -// create_cash_note_task( -// client.clone(), -// transfers_wallet, -// Arc::clone(&content), -// Arc::clone(&cash_notes), -// churn_period, -// ); -// } - -// println!("Uploading some chunks before carry out node churning"); -// info!("Uploading some chunks before carry out node churning"); - -// // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events -// store_chunks_task( -// client.clone(), -// Arc::clone(&content), -// churn_period, -// paying_wallet_dir.path().to_path_buf(), -// ); - -// // Spawn a task to churn nodes -// churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); - -// // Shared bucket where we keep track of the content which erred when creating/storing/fetching. -// // We remove them from this bucket if we are then able to query/fetch them successfully. -// // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. -// let content_erred = ContentErredList::default(); - -// // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. -// let failures = ContentErredList::default(); - -// // Spawn a task to randomly query/fetch the content we create/store -// query_content_task( -// client.clone(), -// Arc::clone(&content), -// Arc::clone(&content_erred), -// Arc::clone(&cash_notes), -// churn_period, -// paying_wallet_dir.path().to_path_buf(), -// ); - -// // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, -// // and mark them as failures if they effectivelly cannot be retrieved. -// retry_query_content_task( -// client.clone(), -// Arc::clone(&content_erred), -// Arc::clone(&failures), -// Arc::clone(&cash_notes), -// churn_period, -// paying_wallet_dir.path().to_path_buf(), -// ); - -// info!("All tasks have been spawned. The test is now running..."); -// println!("All tasks have been spawned. The test is now running..."); - -// let start_time = Instant::now(); -// while start_time.elapsed() < test_duration { -// let failed = failures.read().await; -// info!( -// "Current failures after {:?} ({}): {:?}", -// start_time.elapsed(), -// failed.len(), -// failed.values() -// ); -// sleep(churn_period).await; -// } - -// println!(); -// println!( -// ">>>>>> Test stopping after running for {:?}. <<<<<<", -// start_time.elapsed() -// ); -// println!("{:?} churn events happened.", *churn_count.read().await); -// println!(); - -// // The churning of storing_chunk/querying_chunk are all random, -// // which will have a high chance that newly stored chunk got queried BEFORE -// // the original holders churned out. -// // i.e. the test may pass even without any replication -// // Hence, we carry out a final round of query all data to confirm storage. -// println!("Final querying confirmation of content"); -// info!("Final querying confirmation of content"); - -// // take one read lock to avoid holding the lock for the whole loop -// // prevent any late content uploads being added to the list -// let content = content.read().await; -// let uploaded_content_count = content.len(); -// let mut handles = Vec::new(); -// for net_addr in content.iter() { -// let client = client.clone(); -// let net_addr = net_addr.clone(); -// let cash_notes = Arc::clone(&cash_notes); - -// let failures = Arc::clone(&failures); -// let wallet_dir = paying_wallet_dir.to_path_buf().clone(); -// let handle = tokio::spawn(async move { -// final_retry_query_content( -// &client, -// &net_addr, -// cash_notes, -// churn_period, -// failures, -// &wallet_dir, -// ) -// .await -// }); -// handles.push(handle); -// } -// let results: Vec<_> = futures::future::join_all(handles).await; - -// let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); -// assert_eq!( -// content_queried_count, uploaded_content_count, -// "Not all content was queried successfully" -// ); - -// println!("{content_queried_count:?} pieces of content queried"); - -// assert_eq!( -// content_queried_count, uploaded_content_count, -// "Not all content was queried" -// ); - -// let failed = failures.read().await; -// if failed.len() > 0 { -// bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); -// } - -// println!("Test passed after running for {:?}.", start_time.elapsed()); -// Ok(()) -// } - -// // Spawns a task which periodically creates CashNotes at random locations. -// fn create_cash_note_task( -// client: Client, -// transfers_wallet: HotWallet, -// content: ContentList, -// cash_notes: CashNoteMap, -// churn_period: Duration, -// ) { -// let _handle = tokio::spawn(async move { -// // Create CashNote at a higher frequency than the churning events -// let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; - -// let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); - -// loop { -// sleep(delay).await; - -// let dest_pk = MainSecretKey::random().main_pubkey(); -// let cash_note = wallet_client -// .send_cash_note(NanoTokens::from(10), dest_pk, true) -// .await -// .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); - -// let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); -// let net_addr = NetworkAddress::SpendAddress(cash_note_addr); -// println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); -// debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); -// content.write().await.push_back(net_addr); -// let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); -// } -// }); -// } - -// // Spawns a task which periodically creates Registers at random locations. -// fn create_registers_task( -// client: Client, -// content: ContentList, -// churn_period: Duration, -// paying_wallet_dir: PathBuf, -// ) { -// let _handle = tokio::spawn(async move { -// // Create Registers at a higher frequency than the churning events -// let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; - -// let paying_wallet = get_wallet(&paying_wallet_dir); - -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - -// loop { -// let meta = XorName(rand::random()); -// let owner = client.signer_pk(); - -// let addr = RegisterAddress::new(meta, owner); -// println!("Creating Register at {addr:?} in {delay:?}"); -// debug!("Creating Register at {addr:?} in {delay:?}"); -// sleep(delay).await; - -// match client -// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) -// .await -// { -// Ok(_) => content -// .write() -// .await -// .push_back(NetworkAddress::RegisterAddress(addr)), -// Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), -// } -// } -// }); -// } - -// // Spawns a task which periodically stores Chunks at random locations. -// fn store_chunks_task( -// client: Client, -// content: ContentList, -// churn_period: Duration, -// paying_wallet_dir: PathBuf, -// ) { -// let _handle: JoinHandle> = tokio::spawn(async move { -// let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); -// let output_dir = temp_dir.path().join("chunk_path"); -// create_dir_all(output_dir.clone()) -// .expect("failed to create output dir for encrypted chunks"); - -// // Store Chunks at a higher frequency than the churning events -// let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; - -// let mut rng = OsRng; - -// loop { -// let random_bytes: Vec = ::std::iter::repeat(()) -// .map(|()| rng.gen::()) -// .take(CHUNKS_SIZE) -// .collect(); -// let chunk_size = random_bytes.len(); - -// let chunk_name = XorName::from_content(&random_bytes); - -// let file_path = temp_dir.path().join(hex::encode(chunk_name)); -// let mut chunk_file = -// File::create(&file_path).expect("failed to create temp chunk file"); -// chunk_file -// .write_all(&random_bytes) -// .expect("failed to write to temp chunk file"); - -// let (addr, _data_map, _file_size, chunks) = -// FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); - -// info!( -// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", -// chunks.len(), -// chunk_size -// ); -// sleep(delay).await; - -// let chunks_len = chunks.len(); -// let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); - -// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); -// uploader.set_show_holders(true); -// uploader.insert_chunk_paths(chunks); - -// let cost = match uploader.start_upload().await { -// Ok(stats) => stats -// .royalty_fees -// .checked_add(stats.storage_cost) -// .ok_or(eyre!("Total storage cost exceed possible token amount"))?, -// Err(err) => { -// bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); -// } -// }; - -// println!( -// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" -// ); -// info!( -// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" -// ); -// sleep(delay).await; - -// for chunk_name in chunks_name { -// content -// .write() -// .await -// .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); -// } -// } -// }); -// } - -// // Spawns a task which periodically queries a content by randomly choosing it from the list -// // of content created by another task. -// fn query_content_task( -// client: Client, -// content: ContentList, -// content_erred: ContentErredList, -// cash_notes: CashNoteMap, -// churn_period: Duration, -// root_dir: PathBuf, -// ) { -// let _handle = tokio::spawn(async move { -// let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; -// loop { -// let len = content.read().await.len(); -// if len == 0 { -// println!("No content created/stored just yet, let's try in {delay:?} ..."); -// info!("No content created/stored just yet, let's try in {delay:?} ..."); -// sleep(delay).await; -// continue; -// } - -// // let's choose a random content to query, picking it from the list of created -// let index = rand::thread_rng().gen_range(0..len); -// let net_addr = content.read().await[index].clone(); -// trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); -// sleep(delay).await; - -// match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { -// Ok(_) => { -// let _ = content_erred.write().await.remove(&net_addr); -// } -// Err(last_err) => { -// println!( -// "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" -// ); -// error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); -// // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. -// let _ = content_erred -// .write() -// .await -// .entry(net_addr.clone()) -// .and_modify(|curr| curr.attempts += 1) -// .or_insert(ContentError { -// net_addr, -// attempts: 1, -// last_err, -// }); -// } -// } -// } -// }); -// } - -// // Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. -// fn churn_nodes_task( -// churn_count: Arc>, -// test_duration: Duration, -// churn_period: Duration, -// ) { -// let start = Instant::now(); -// let _handle: JoinHandle> = tokio::spawn(async move { -// let mut node_restart = NodeRestart::new(true, false)?; - -// loop { -// sleep(churn_period).await; - -// // break out if we've run the duration of churn -// if start.elapsed() > test_duration { -// debug!("Test duration reached, stopping churn nodes task"); -// break; -// } - -// if let Err(err) = node_restart.restart_next(true, true).await { -// println!("Failed to restart node {err}"); -// info!("Failed to restart node {err}"); -// continue; -// } - -// *churn_count.write().await += 1; -// } -// Ok(()) -// }); -// } - -// // Checks (periodically) for any content that an error was reported either at the moment of its creation or -// // in a later query attempt. -// fn retry_query_content_task( -// client: Client, -// content_erred: ContentErredList, -// failures: ContentErredList, -// cash_notes: CashNoteMap, -// churn_period: Duration, -// wallet_dir: PathBuf, -// ) { -// let _handle = tokio::spawn(async move { -// let delay = 2 * churn_period; -// loop { -// sleep(delay).await; - -// // let's try to query from the bucket of those that erred upon creation/query -// let erred = content_erred.write().await.pop_first(); - -// if let Some((net_addr, mut content_error)) = erred { -// let attempts = content_error.attempts + 1; - -// println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); -// info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); -// if let Err(last_err) = -// query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await -// { -// println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); -// warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); -// // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, -// // otherwise report it effectivelly as failure. -// content_error.attempts = attempts; -// content_error.last_err = last_err; - -// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { -// let _ = failures.write().await.insert(net_addr, content_error); -// } else { -// let _ = content_erred.write().await.insert(net_addr, content_error); -// } -// } else { -// // remove from fails and errs if we had a success and it was added meanwhile perchance -// let _ = failures.write().await.remove(&net_addr); -// let _ = content_erred.write().await.remove(&net_addr); -// } -// } -// } -// }); -// } - -// async fn final_retry_query_content( -// client: &Client, -// net_addr: &NetworkAddress, -// cash_notes: CashNoteMap, -// churn_period: Duration, -// failures: ContentErredList, -// wallet_dir: &Path, -// ) -> Result<()> { -// let mut attempts = 1; -// let net_addr = net_addr.clone(); -// loop { -// println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); -// debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); -// if let Err(last_err) = -// query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await -// { -// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { -// println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); -// error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); -// bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); -// } else { -// attempts += 1; -// let delay = 2 * churn_period; -// debug!("Delaying last check for {delay:?} ..."); -// sleep(delay).await; -// continue; -// } -// } else { -// failures.write().await.remove(&net_addr); -// // content retrieved fine -// return Ok(()); -// } -// } -// } - -// async fn query_content( -// client: &Client, -// wallet_dir: &Path, -// net_addr: &NetworkAddress, -// cash_notes: CashNoteMap, -// ) -> Result<(), Error> { -// match net_addr { -// NetworkAddress::SpendAddress(addr) => { -// if let Some(cash_note) = cash_notes.read().await.get(addr) { -// match client.verify_cashnote(cash_note).await { -// Ok(_) => Ok(()), -// Err(err) => Err(Error::CouldNotVerifyTransfer(format!( -// "Verification of cash_note {addr:?} failed with error: {err:?}" -// ))), -// } -// } else { -// Err(Error::CouldNotVerifyTransfer(format!( -// "Do not have the CashNote: {addr:?}" -// ))) -// } -// } -// NetworkAddress::RegisterAddress(addr) => { -// let _ = client.get_register(*addr).await?; -// Ok(()) -// } -// NetworkAddress::ChunkAddress(addr) => { -// let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); -// let mut file_download = FilesDownload::new(files_api); -// let _ = file_download.download_file(*addr, None).await?; - -// Ok(()) -// } -// _other => Ok(()), // we don't create/store any other type of content in this test yet -// } -// } +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +mod common; + +use crate::common::{ + client::{get_client_and_funded_wallet, get_node_count}, + NodeRestart, +}; +use autonomi::{Client, Wallet}; +use eyre::{bail, ErrReport, Result}; +use rand::Rng; +use self_encryption::MAX_CHUNK_SIZE; +use sn_logging::LogBuilder; +use sn_protocol::{storage::ChunkAddress, NetworkAddress}; +use std::{ + collections::{BTreeMap, VecDeque}, + fmt, + fs::create_dir_all, + sync::Arc, + time::{Duration, Instant}, +}; +use tempfile::tempdir; +use test_utils::gen_random_data; +use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; +use tracing::{debug, error, info, trace, warn}; +use xor_name::XorName; + +const EXTRA_CHURN_COUNT: u32 = 5; +const CHURN_CYCLES: u32 = 2; +const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; +const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; + +const DATA_SIZE: usize = MAX_CHUNK_SIZE / 3; + +const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; +const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; + +// Default total amount of time we run the checks for before reporting the outcome. +// It can be overriden by setting the 'TEST_DURATION_MINS' env var. +const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr + +type ContentList = Arc>>; + +struct ContentError { + net_addr: NetworkAddress, + attempts: u8, + last_err: ErrReport, +} + +impl fmt::Debug for ContentError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{:?}, attempts: {}, last error: {:?}", + self.net_addr, self.attempts, self.last_err + ) + } +} + +type ContentErredList = Arc>>; + +#[tokio::test(flavor = "multi_thread")] +async fn data_availability_during_churn() -> Result<()> { + let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); + + let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { + Duration::from_secs(60 * str.parse::()?) + } else { + TEST_DURATION + }; + let node_count = get_node_count(); + + let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { + println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); + info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); + let cycles = str.parse::()?; + test_duration / cycles + } else { + // Ensure at least some nodes got churned twice. + test_duration + / std::cmp::max( + CHURN_CYCLES * node_count as u32, + node_count as u32 + EXTRA_CHURN_COUNT, + ) + }; + println!("Nodes will churn every {churn_period:?}"); + info!("Nodes will churn every {churn_period:?}"); + + // Create a cross thread usize for tracking churned nodes + let churn_count = Arc::new(RwLock::new(0_usize)); + + // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. + // Default to be not carry out chunks only during churn. + let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); + + println!( + "Running this test for {test_duration:?}{}...", + if chunks_only { " (Chunks only)" } else { "" } + ); + info!( + "Running this test for {test_duration:?}{}...", + if chunks_only { " (Chunks only)" } else { "" } + ); + + let (client, wallet) = get_client_and_funded_wallet().await; + + info!( + "Client and wallet created. Wallet address: {:?}", + wallet.address() + ); + + // Shared bucket where we keep track of content created/stored on the network + let content = ContentList::default(); + + // Spawn a task to create Registers and CashNotes at random locations, + // at a higher frequency than the churning events + let create_register_handle = if !chunks_only { + let create_register_handle = create_registers_task( + client.clone(), + wallet.clone(), + Arc::clone(&content), + churn_period, + ); + Some(create_register_handle) + } else { + None + }; + + println!("Uploading some chunks before carry out node churning"); + info!("Uploading some chunks before carry out node churning"); + + // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events + let store_chunks_handle = store_chunks_task( + client.clone(), + wallet.clone(), + Arc::clone(&content), + churn_period, + ); + + // Spawn a task to churn nodes + churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); + + // Shared bucket where we keep track of the content which erred when creating/storing/fetching. + // We remove them from this bucket if we are then able to query/fetch them successfully. + // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. + let content_erred = ContentErredList::default(); + + // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. + let failures = ContentErredList::default(); + + // Spawn a task to randomly query/fetch the content we create/store + query_content_task( + client.clone(), + Arc::clone(&content), + Arc::clone(&content_erred), + churn_period, + ); + + // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, + // and mark them as failures if they effectivelly cannot be retrieved. + retry_query_content_task( + client.clone(), + Arc::clone(&content_erred), + Arc::clone(&failures), + churn_period, + ); + + info!("All tasks have been spawned. The test is now running..."); + println!("All tasks have been spawned. The test is now running..."); + + let start_time = Instant::now(); + while start_time.elapsed() < test_duration { + if store_chunks_handle.is_finished() { + bail!("Store chunks task has finished before the test duration. Probably due to an error."); + } + if let Some(handle) = &create_register_handle { + if handle.is_finished() { + bail!("Create registers task has finished before the test duration. Probably due to an error."); + } + } + + let failed = failures.read().await; + if start_time.elapsed().as_secs() % 10 == 0 { + println!( + "Current failures after {:?} ({}): {:?}", + start_time.elapsed(), + failed.len(), + failed.values() + ); + info!( + "Current failures after {:?} ({}): {:?}", + start_time.elapsed(), + failed.len(), + failed.values() + ); + } + + sleep(Duration::from_secs(3)).await; + } + + println!(); + println!( + ">>>>>> Test stopping after running for {:?}. <<<<<<", + start_time.elapsed() + ); + println!("{:?} churn events happened.", *churn_count.read().await); + println!(); + + // The churning of storing_chunk/querying_chunk are all random, + // which will have a high chance that newly stored chunk got queried BEFORE + // the original holders churned out. + // i.e. the test may pass even without any replication + // Hence, we carry out a final round of query all data to confirm storage. + println!("Final querying confirmation of content"); + info!("Final querying confirmation of content"); + + // take one read lock to avoid holding the lock for the whole loop + // prevent any late content uploads being added to the list + let content = content.read().await; + let uploaded_content_count = content.len(); + let mut handles = Vec::new(); + for net_addr in content.iter() { + let client = client.clone(); + let net_addr = net_addr.clone(); + + let failures = Arc::clone(&failures); + let handle = tokio::spawn(async move { + final_retry_query_content(&client, &net_addr, churn_period, failures).await + }); + handles.push(handle); + } + let results: Vec<_> = futures::future::join_all(handles).await; + + let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); + assert_eq!( + content_queried_count, uploaded_content_count, + "Not all content was queried successfully" + ); + + println!("{content_queried_count:?} pieces of content queried"); + + assert_eq!( + content_queried_count, uploaded_content_count, + "Not all content was queried" + ); + + let failed = failures.read().await; + if failed.len() > 0 { + bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); + } + + println!("Test passed after running for {:?}.", start_time.elapsed()); + Ok(()) +} + +// Spawns a task which periodically creates Registers at random locations. +fn create_registers_task( + client: Client, + wallet: Wallet, + content: ContentList, + churn_period: Duration, +) -> JoinHandle> { + let handle: JoinHandle> = tokio::spawn(async move { + // Create Registers at a higher frequency than the churning events + let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; + + loop { + let owner = Client::register_generate_key(); + let random_name = XorName(rand::random()).to_string(); + let random_data = gen_random_data(DATA_SIZE); + + sleep(delay).await; + + let register = client + .register_create(random_data, &random_name, owner, &wallet) + .await + .inspect_err(|err| { + println!("Error while creating register: {err:?}"); + error!("Error while creating register: {err:?}") + })?; + + let addr = register.address(); + println!("Created new Register ({addr:?}) after a delay of: {delay:?}"); + content + .write() + .await + .push_back(NetworkAddress::RegisterAddress(*addr)); + } + }); + handle +} + +// Spawns a task which periodically stores Chunks at random locations. +fn store_chunks_task( + client: Client, + wallet: Wallet, + content: ContentList, + churn_period: Duration, +) -> JoinHandle> { + let handle: JoinHandle> = tokio::spawn(async move { + let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); + let output_dir = temp_dir.path().join("chunk_path"); + create_dir_all(output_dir.clone()) + .expect("failed to create output dir for encrypted chunks"); + + // Store Chunks at a higher frequency than the churning events + let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; + + loop { + let random_data = gen_random_data(DATA_SIZE); + + let data_map = client.put(random_data, &wallet).await.inspect_err(|err| { + println!("Error to put chunk: {err:?}"); + error!("Error to put chunk: {err:?}") + })?; + + println!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + info!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + + content + .write() + .await + .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(data_map))); + sleep(delay).await; + } + }); + handle +} + +// Spawns a task which periodically queries a content by randomly choosing it from the list +// of content created by another task. +fn query_content_task( + client: Client, + content: ContentList, + content_erred: ContentErredList, + churn_period: Duration, +) { + let _handle = tokio::spawn(async move { + let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; + loop { + let len = content.read().await.len(); + if len == 0 { + println!("No content created/stored just yet, let's try in {delay:?} ..."); + info!("No content created/stored just yet, let's try in {delay:?} ..."); + sleep(delay).await; + continue; + } + + // let's choose a random content to query, picking it from the list of created + let index = rand::thread_rng().gen_range(0..len); + let net_addr = content.read().await[index].clone(); + trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); + sleep(delay).await; + + match query_content(&client, &net_addr).await { + Ok(_) => { + let _ = content_erred.write().await.remove(&net_addr); + } + Err(last_err) => { + println!( + "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" + ); + error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); + // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. + let _ = content_erred + .write() + .await + .entry(net_addr.clone()) + .and_modify(|curr| curr.attempts += 1) + .or_insert(ContentError { + net_addr, + attempts: 1, + last_err, + }); + } + } + } + }); +} + +// Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. +fn churn_nodes_task( + churn_count: Arc>, + test_duration: Duration, + churn_period: Duration, +) { + let start = Instant::now(); + let _handle: JoinHandle> = tokio::spawn(async move { + let mut node_restart = NodeRestart::new(true, false)?; + + loop { + sleep(churn_period).await; + + // break out if we've run the duration of churn + if start.elapsed() > test_duration { + debug!("Test duration reached, stopping churn nodes task"); + break; + } + + if let Err(err) = node_restart.restart_next(true, true).await { + println!("Failed to restart node {err}"); + info!("Failed to restart node {err}"); + continue; + } + + *churn_count.write().await += 1; + } + Ok(()) + }); +} + +// Checks (periodically) for any content that an error was reported either at the moment of its creation or +// in a later query attempt. +fn retry_query_content_task( + client: Client, + content_erred: ContentErredList, + failures: ContentErredList, + churn_period: Duration, +) { + let _handle = tokio::spawn(async move { + let delay = 2 * churn_period; + loop { + sleep(delay).await; + + // let's try to query from the bucket of those that erred upon creation/query + let erred = content_erred.write().await.pop_first(); + + if let Some((net_addr, mut content_error)) = erred { + let attempts = content_error.attempts + 1; + + println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); + info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); + if let Err(last_err) = query_content(&client, &net_addr).await { + println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, + // otherwise report it effectivelly as failure. + content_error.attempts = attempts; + content_error.last_err = last_err; + + if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { + let _ = failures.write().await.insert(net_addr, content_error); + } else { + let _ = content_erred.write().await.insert(net_addr, content_error); + } + } else { + // remove from fails and errs if we had a success and it was added meanwhile perchance + let _ = failures.write().await.remove(&net_addr); + let _ = content_erred.write().await.remove(&net_addr); + } + } + } + }); +} + +async fn final_retry_query_content( + client: &Client, + net_addr: &NetworkAddress, + churn_period: Duration, + failures: ContentErredList, +) -> Result<()> { + let mut attempts = 1; + let net_addr = net_addr.clone(); + loop { + println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); + debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); + if let Err(last_err) = query_content(client, &net_addr).await { + if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { + println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + } else { + attempts += 1; + let delay = 2 * churn_period; + debug!("Delaying last check for {delay:?} ..."); + sleep(delay).await; + continue; + } + } else { + failures.write().await.remove(&net_addr); + // content retrieved fine + return Ok(()); + } + } +} + +async fn query_content(client: &Client, net_addr: &NetworkAddress) -> Result<()> { + match net_addr { + NetworkAddress::RegisterAddress(addr) => { + let _ = client.register_get(*addr).await?; + Ok(()) + } + NetworkAddress::ChunkAddress(addr) => { + client.get(*addr.xorname()).await?; + Ok(()) + } + _other => Ok(()), // we don't create/store any other type of content in this test yet + } +} diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index f9025a8cf3..037e2559d4 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -6,12 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use evmlib::utils::evm_network_from_env; +use evmlib::{utils::evm_network_from_env, Network}; use std::env; pub fn get_funded_wallet() -> evmlib::wallet::Wallet { let network = evm_network_from_env().expect("Failed to get EVM network from environment variables"); + if matches!(network, Network::ArbitrumOne) { + panic!("You're trying to use ArbitrumOne network. Use a custom network for testing."); + } // Default deployer wallet of the testnet. const DEFAULT_WALLET_PRIVATE_KEY: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; From 118fca85a893da1d74f81d268bdf2688d229b49a Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Wed, 9 Oct 2024 18:06:28 +0530 Subject: [PATCH 144/255] chore(autonomi): add logging to emvlib and autonomi --- autonomi/src/client/address.rs | 10 ++- autonomi/src/client/data.rs | 73 +++++++++++++++------ autonomi/src/client/mod.rs | 1 + autonomi/src/client/registers.rs | 82 +++++++++++++++++++----- autonomi/src/client/vault.rs | 26 ++++++-- autonomi/src/lib.rs | 3 + autonomi/src/self_encryption.rs | 7 +- evmlib/src/contract/data_payments/mod.rs | 13 +++- evmlib/src/contract/network_token.rs | 22 +++++-- evmlib/src/event.rs | 11 +++- evmlib/src/transaction.rs | 24 +++++-- evmlib/src/wallet.rs | 11 +++- 12 files changed, 226 insertions(+), 57 deletions(-) diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs index 1bb4d37d45..ef7fab938e 100644 --- a/autonomi/src/client/address.rs +++ b/autonomi/src/client/address.rs @@ -17,8 +17,14 @@ pub enum DataError { } pub fn str_to_xorname(addr: &str) -> Result { - let bytes = hex::decode(addr).map_err(|_| DataError::InvalidHexString)?; - let xor = XorName(bytes.try_into().map_err(|_| DataError::InvalidXorName)?); + let bytes = hex::decode(addr).map_err(|err| { + error!("Failed to decode hex string: {err:?}"); + DataError::InvalidHexString + })?; + let xor = XorName(bytes.try_into().map_err(|err| { + error!("Failed to convert bytes to XorName: {err:?}"); + DataError::InvalidXorName + })?); Ok(xor) } diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index d2578384ec..6e63b80515 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -84,6 +84,7 @@ impl Client { /// Fetch a piece of self-encrypted data from the network, by its data map /// XOR address. pub async fn get(&self, data_map_addr: XorName) -> Result { + info!("Fetching file from data_map: {data_map_addr:?}"); let data_map_chunk = self.fetch_chunk(data_map_addr).await?; let data = self .fetch_from_data_map_chunk(data_map_chunk.value()) @@ -94,7 +95,7 @@ impl Client { /// Get a raw chunk from the network. pub async fn fetch_chunk(&self, addr: XorName) -> Result { - tracing::info!("Getting chunk: {addr:?}"); + info!("Getting chunk: {addr:?}"); let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); @@ -106,7 +107,11 @@ impl Client { is_register: false, }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; + let record = self + .network + .get_record_from_network(key, &get_cfg) + .await + .inspect_err(|err| error!("Error fetching chunk: {err:?}"))?; let header = RecordHeader::from_record(&record)?; if let RecordKind::Chunk = header.kind { @@ -122,7 +127,10 @@ impl Client { let mut encrypted_chunks = vec![]; for info in data_map.infos() { - let chunk = self.fetch_chunk(info.dst_hash).await?; + let chunk = self + .fetch_chunk(info.dst_hash) + .await + .inspect_err(|err| error!("Error fetching chunk {:?}: {err:?}", info.dst_hash))?; let chunk = EncryptedChunk { index: info.index, content: chunk.value, @@ -130,16 +138,19 @@ impl Client { encrypted_chunks.push(chunk); } - let data = decrypt_full_set(data_map, &encrypted_chunks) - .map_err(|e| GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)))?; + let data = decrypt_full_set(data_map, &encrypted_chunks).map_err(|e| { + error!("Error decrypting encrypted_chunks: {e:?}"); + GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)) + })?; Ok(data) } /// Unpack a wrapped data map and fetch all bytes using self-encryption. async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { - let mut data_map_level: DataMapLevel = - rmp_serde::from_slice(data_map_bytes).map_err(GetError::InvalidDataMap)?; + let mut data_map_level: DataMapLevel = rmp_serde::from_slice(data_map_bytes) + .map_err(GetError::InvalidDataMap) + .inspect_err(|err| error!("Error deserializing data map: {err:?}"))?; loop { let data_map = match &data_map_level { @@ -152,8 +163,10 @@ impl Client { match &data_map_level { DataMapLevel::First(_) => break Ok(data), DataMapLevel::Additional(_) => { - data_map_level = - rmp_serde::from_slice(&data).map_err(GetError::InvalidDataMap)?; + data_map_level = rmp_serde::from_slice(&data).map_err(|err| { + error!("Error deserializing data map: {err:?}"); + GetError::InvalidDataMap(err) + })?; continue; } }; @@ -165,8 +178,12 @@ impl Client { pub async fn put(&self, data: Bytes, wallet: &Wallet) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; + info!( + "Uploading datamap chunk to the network at: {:?}", + data_map_chunk.address() + ); - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); let mut xor_names = vec![map_xor_name]; @@ -176,18 +193,28 @@ impl Client { } // Pay for all chunks + data map chunk - let (payment_proofs, _free_chunks) = self.pay(xor_names.into_iter(), wallet).await?; + info!("Paying for {} addresses", xor_names.len()); + let (payment_proofs, _free_chunks) = self + .pay(xor_names.into_iter(), wallet) + .await + .inspect_err(|err| error!("Error paying for data: {err:?}"))?; // Upload data map if let Some(proof) = payment_proofs.get(&map_xor_name) { + debug!("Uploading data map chunk: {map_xor_name:?}"); self.upload_chunk(data_map_chunk.clone(), proof.clone()) - .await?; + .await + .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; } // Upload the rest of the chunks + debug!("Uploading {} chunks", chunks.len()); for chunk in chunks { if let Some(proof) = payment_proofs.get(chunk.name()) { - self.upload_chunk(chunk, proof.clone()).await?; + let address = *chunk.address(); + self.upload_chunk(chunk, proof.clone()) + .await + .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; } } @@ -200,7 +227,7 @@ impl Client { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; - tracing::debug!("Encryption took: {:.2?}", now.elapsed()); + debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); let mut content_addrs = vec![map_xor_name]; @@ -209,7 +236,15 @@ impl Client { content_addrs.push(*chunk.name()); } - let cost_map = self.get_store_quotes(content_addrs.into_iter()).await?; + info!( + "Calculating cost of storing {} chunks. Data map chunk at: {map_xor_name:?}", + content_addrs.len() + ); + + let cost_map = self + .get_store_quotes(content_addrs.into_iter()) + .await + .inspect_err(|err| error!("Error getting store quotes: {err:?}"))?; let total_cost = AttoTokens::from_atto( cost_map .values() @@ -238,7 +273,7 @@ impl Client { let proofs = construct_proofs(&cost_map, &payments); - tracing::trace!( + trace!( "Chunk payments of {} chunks completed. {} chunks were free / already paid for", proofs.len(), skipped_chunks.len() @@ -275,7 +310,7 @@ impl Client { async fn store_chunk(&self, chunk: Chunk, payment: ProofOfPayment) -> Result<(), PutError> { let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); - tracing::debug!("Storing chunk: {chunk:?} to {:?}", storing_node); + debug!("Storing chunk: {chunk:?} to {:?}", storing_node); let key = chunk.network_address().to_record_key(); @@ -337,10 +372,10 @@ async fn fetch_store_quote_with_retries( } Err(err) if retries < 2 => { retries += 1; - tracing::error!("Error while fetching store quote: {err:?}, retry #{retries}"); + error!("Error while fetching store quote: {err:?}, retry #{retries}"); } Err(err) => { - tracing::error!( + error!( "Error while fetching store quote: {err:?}, stopping after {retries} retries" ); break Err(PayError::CouldNotGetStoreQuote(content_addr)); diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 892ff84c3b..cb61bd75de 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -84,6 +84,7 @@ impl Client { let _handle = sn_networking::target_arch::spawn(async move { for addr in peers { if let Err(err) = network_clone.dial(addr.clone()).await { + error!("Failed to dial addr={addr} with err: {err:?}"); eprintln!("addr={addr} Failed to dial: {err:?}"); }; } diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index c2b3ed6fd2..ad279837e6 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -87,6 +87,7 @@ impl Client { /// Fetches a Register from the network. pub async fn register_get(&self, address: RegisterAddress) -> Result { + info!("Fetching register at addr: {address}"); let network_address = NetworkAddress::from_register_address(address); let key = network_address.to_record_key(); @@ -104,6 +105,7 @@ impl Client { } // manage forked register case Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + debug!("Forked register detected for {address:?} merging forks"); let mut registers: Vec = vec![]; for (_, (record, _)) in result_map { registers.push( @@ -119,13 +121,17 @@ impl Client { }); register } - Err(e) => Err(e)?, + Err(e) => { + error!("Failed to get register {address:?} from network: {e}"); + Err(e)? + } }; // Make sure the fetched record contains valid CRDT operations - register - .verify() - .map_err(|_| RegisterError::FailedVerification)?; + register.verify().map_err(|err| { + error!("Failed to verify register {address:?} with error: {err}"); + RegisterError::FailedVerification + })?; Ok(Register { inner: register }) } @@ -142,21 +148,38 @@ impl Client { let mut register = signed_register .clone() .register() - .expect("register to be valid") + .map_err(|err| { + error!( + "Failed to get register from signed register as it failed verification: {err}" + ); + RegisterError::FailedVerification + })? .clone(); + info!("Updating register at addr: {}", register.address()); + // Get all current branches let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); // Write the new value to all branches let (_, op) = register .write(new_value.into(), &children, &owner) - .map_err(RegisterError::Write)?; + .map_err(|err| { + error!( + "Failed to write to register at addr: {} : {err}", + register.address() + ); + RegisterError::Write(err) + })?; // Apply the operation to the register - signed_register - .add_op(op.clone()) - .map_err(RegisterError::Write)?; + signed_register.add_op(op.clone()).map_err(|err| { + error!( + "Failed to add op to register at addr: {} : {err}", + register.address() + ); + RegisterError::Write(err) + })?; // Prepare the record for network storage let record = Record { @@ -183,7 +206,15 @@ impl Client { }; // Store the updated register on the network - self.network.put_record(record, &put_cfg).await?; + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put record - register {:?} to the network: {err}", + register.address() + ) + })?; Ok(()) } @@ -194,6 +225,7 @@ impl Client { name: String, owner: RegisterSecretKey, ) -> Result { + info!("Getting cost for register with name: {name}"); // get register address let pk = owner.public_key(); let name = XorName::from_content_parts(&[name.as_bytes()]); @@ -256,6 +288,8 @@ impl Client { let mut register = ClientRegister::new(pk, name, permissions); let address = NetworkAddress::from_register_address(*register.address()); + info!("Creating register at address: {address}"); + let entries = register .read() .into_iter() @@ -264,21 +298,29 @@ impl Client { let _ = register.write(value.into(), &entries, &owner); let reg_xor = register.address().xorname(); - let (payment_proofs, _skipped) = self.pay(std::iter::once(reg_xor), wallet).await?; + debug!("Paying for register at address: {address}"); + let (payment_proofs, _skipped) = self + .pay(std::iter::once(reg_xor), wallet) + .await + .inspect_err(|err| { + error!("Failed to pay for register at address: {address} : {err}") + })?; let proof = if let Some(proof) = payment_proofs.get(®_xor) { proof } else { // register was skipped, meaning it was already paid for + error!("Register at address: {address} was already paid for"); return Err(RegisterError::Network(NetworkError::RegisterAlreadyExists)); }; let payee = proof .to_peer_id_payee() - .ok_or(RegisterError::InvalidQuote)?; - let signed_register = register - .clone() - .into_signed(&owner) - .map_err(RegisterError::CouldNotSign)?; + .ok_or(RegisterError::InvalidQuote) + .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; + let signed_register = register.clone().into_signed(&owner).map_err(|err| { + error!("Failed to sign register at address: {address} : {err}"); + RegisterError::CouldNotSign(err) + })?; let record = Record { key: address.to_record_key(), @@ -306,7 +348,13 @@ impl Client { verification: Some((VerificationKind::Network, get_cfg)), }; - self.network.put_record(record, &put_cfg).await?; + debug!("Storing register at address {address} to the network"); + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!("Failed to put record - register {address} to the network: {err}") + })?; Ok(Register { inner: signed_register, diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 8087f17eff..735c43c07d 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -39,6 +39,7 @@ impl Client { &self, secret_key: &SecretKey, ) -> Result, VaultError> { + info!("Fetching and decrypting vault"); let pad = self.get_vault_from_network(secret_key).await?; Ok(pad.decrypt_data(secret_key)?) @@ -53,6 +54,7 @@ impl Client { let scratch_address = ScratchpadAddress::new(client_pk); let network_address = NetworkAddress::from_scratchpad_address(scratch_address); + info!("Fetching vault from network at {network_address:?}",); let scratch_key = network_address.to_record_key(); let get_cfg = GetRecordCfg { @@ -66,7 +68,10 @@ impl Client { let record = self .network .get_record_from_network(scratch_key, &get_cfg) - .await?; + .await + .inspect_err(|err| { + error!("Failed to fetch vault {network_address:?} from network: {err}"); + })?; let pad = try_deserialize_record::(&record) .map_err(|_| VaultError::CouldNotDeserializeVaultScratchPad(scratch_address))?; @@ -100,7 +105,7 @@ impl Client { is_new = false; existing_data } else { - tracing::trace!("new scratchpad creation"); + trace!("new scratchpad creation"); Scratchpad::new(client_pk) }; @@ -108,12 +113,17 @@ impl Client { let scratch_address = scratch.network_address(); let scratch_key = scratch_address.to_record_key(); + info!("Writing to vault at {scratch_address:?}",); + let record = if is_new { self.pay( [&scratch_address].iter().filter_map(|f| f.as_xorname()), wallet, ) - .await?; + .await + .inspect_err(|err| { + error!("Failed to pay for new vault at addr: {scratch_address:?} : {err}"); + })?; let scratch_xor = scratch_address.as_xorname().ok_or(PutError::VaultXorName)?; let (payment_proofs, _) = self.pay(std::iter::once(scratch_xor), wallet).await?; @@ -155,7 +165,15 @@ impl Client { )), }; - self.network.put_record(record, &put_cfg).await?; + debug!("Put record - scratchpad at {scratch_address:?} to the network"); + self.network + .put_record(record, &put_cfg) + .await + .inspect_err(|err| { + error!( + "Failed to put scratchpad {scratch_address:?} to the network with err: {err:?}" + ) + })?; Ok(next_count) } diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index ae3a7d1b67..d6be10953c 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -29,6 +29,9 @@ // docs.rs generation will enable unstable `doc_cfg` feature #![cfg_attr(docsrs, feature(doc_cfg))] +#[macro_use] +extern crate tracing; + pub mod client; #[cfg(feature = "data")] mod self_encryption; diff --git a/autonomi/src/self_encryption.rs b/autonomi/src/self_encryption.rs index c050c4deb7..b43648f332 100644 --- a/autonomi/src/self_encryption.rs +++ b/autonomi/src/self_encryption.rs @@ -66,7 +66,8 @@ fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec), Error> { chunk.serialize(&mut serialiser)?; let serialized_chunk = bytes.into_inner().freeze(); - let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk)?; + let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk) + .inspect_err(|err| error!("Failed to encrypt chunks: {err:?}"))?; chunks = next_encrypted_chunks .iter() .map(|c| Chunk::new(c.content.clone())) // no need to encrypt what is self-encrypted @@ -83,6 +84,8 @@ fn wrap_data_map(data_map: &DataMapLevel) -> Result MAX_TRANSFERS_PER_TRANSACTION { + error!( + "Data payments limit exceeded: {} > {}", + data_payments.len(), + MAX_TRANSFERS_PER_TRANSACTION + ); return Err(Error::TransferLimitExceeded); } @@ -73,9 +78,13 @@ where .contract .submitDataPayments(data_payments) .send() - .await? + .await + .inspect_err(|e| error!("Failed to submit data payments during pay_for_quotes: {e:?}"))? .watch() - .await?; + .await + .inspect_err(|e| { + error!("Failed to watch data payments during pay_for_quotes: {e:?}") + })?; Ok(tx_hash) } diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 0b02a7be8d..361c87e340 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -54,7 +54,13 @@ where /// Get the raw token balance of an address. pub async fn balance_of(&self, account: Address) -> Result { - let balance = self.contract.balanceOf(account).call().await?._0; + let balance = self + .contract + .balanceOf(account) + .call() + .await + .inspect_err(|err| error!("Error getting balance of account: {err:?}"))? + ._0; Ok(balance) } @@ -64,9 +70,13 @@ where .contract .approve(spender, value) .send() - .await? + .await + .inspect_err(|err| { + error!("Error approving spender to spend raw amt of tokens: {err:?}") + })? .watch() - .await?; + .await + .inspect_err(|err| error!("Error watching approve tx: {err:?}"))?; Ok(tx_hash) } @@ -77,9 +87,11 @@ where .contract .transfer(receiver, amount) .send() - .await? + .await + .inspect_err(|err| error!("Error transferring raw amt of tokens: {err:?}"))? .watch() - .await?; + .await + .inspect_err(|err| error!("Error watching transfer tx: {err:?}"))?; Ok(tx_hash) } diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs index 65d58db0a9..eff4bdf30e 100644 --- a/evmlib/src/event.rs +++ b/evmlib/src/event.rs @@ -30,13 +30,22 @@ impl TryFrom for ChunkPaymentEvent { fn try_from(log: Log) -> Result { // Verify the amount of topics if log.topics().len() != 4 { + error!("Topics amount is unexpected. Was expecting 4"); return Err(Error::TopicsAmountUnexpected); } - let topic0 = log.topics().first().ok_or(Error::EventSignatureMissing)?; + let topic0 = log + .topics() + .first() + .ok_or(Error::EventSignatureMissing) + .inspect_err(|_| error!("Event signature is missing"))?; // Verify the event signature if topic0 != &DATA_PAYMENT_EVENT_SIGNATURE { + error!( + "Event signature does not match. Expected: {:?}, got: {:?}", + DATA_PAYMENT_EVENT_SIGNATURE, topic0 + ); return Err(Error::EventSignatureDoesNotMatch); } diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index 2b5c929d95..c5c97896e0 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -33,7 +33,10 @@ pub async fn get_transaction_receipt_by_hash( let provider = ProviderBuilder::new() .with_recommended_fillers() .on_http(network.rpc_url().clone()); - let maybe_receipt = provider.get_transaction_receipt(transaction_hash).await?; + let maybe_receipt = provider + .get_transaction_receipt(transaction_hash) + .await + .inspect_err(|err| error!("Error getting transaction receipt for transaction_hash: {transaction_hash:?} : {err:?}", ))?; Ok(maybe_receipt) } @@ -44,7 +47,8 @@ async fn get_block_by_number(network: &Network, block_number: u64) -> Result Result Result, Error> { + debug!( + "Getting data payment event for quote_hash: {quote_hash:?}, reward_addr: {reward_addr:?}" + ); let topic1: FixedBytes<32> = FixedBytes::left_padding_from(reward_addr.as_slice()); let filter = Filter::new() @@ -88,18 +98,21 @@ pub async fn verify_data_payment( amount: U256, quote_expiration_timestamp_in_secs: u64, ) -> Result<(), Error> { + debug!("Verifying data payment for tx_hash: {tx_hash:?}"); let transaction = get_transaction_receipt_by_hash(network, tx_hash) .await? .ok_or(Error::TransactionNotFound)?; // If the status is True, it means the tx is confirmed. if !transaction.status() { + error!("Transaction {tx_hash:?} is not confirmed"); return Err(Error::TransactionUnconfirmed); } let block_number = transaction .block_number - .ok_or(Error::TransactionNotInBlock)?; + .ok_or(Error::TransactionNotInBlock) + .inspect_err(|_| error!("Transaction {tx_hash:?} has not been included in a block yet"))?; let block = get_block_by_number(network, block_number) .await? @@ -107,6 +120,7 @@ pub async fn verify_data_payment( // Check if payment was done within the quote expiration timeframe. if quote_expiration_timestamp_in_secs < block.header.timestamp { + error!("Payment for tx_hash: {tx_hash:?} was done after the quote expired"); return Err(Error::QuoteExpired); } @@ -130,6 +144,8 @@ pub async fn verify_data_payment( } } + error!("No event proof found for tx_hash: {tx_hash:?}"); + Err(Error::EventProofNotFound) } diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 18bebd541d..2d52bb4690 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -114,7 +114,10 @@ fn random() -> EthereumWallet { /// Creates a wallet from a private key in HEX format. fn from_private_key(private_key: &str) -> Result { - let signer: PrivateKeySigner = private_key.parse().map_err(|_| Error::PrivateKeyInvalid)?; + let signer: PrivateKeySigner = private_key.parse().map_err(|err| { + error!("Error parsing private key: {err}"); + Error::PrivateKeyInvalid + })?; Ok(EthereumWallet::from(signer)) } @@ -169,6 +172,7 @@ pub async fn balance_of_tokens( account: Address, network: &Network, ) -> Result { + info!("Getting balance of tokens for account: {account}"); let provider = http_provider(network.rpc_url().clone()); let network_token = NetworkToken::new(*network.payment_token_address(), provider); network_token.balance_of(account).await @@ -179,6 +183,7 @@ pub async fn balance_of_gas_tokens( account: Address, network: &Network, ) -> Result { + debug!("Getting balance of gas tokens for account: {account}"); let provider = http_provider(network.rpc_url().clone()); let balance = provider.get_balance(account).await?; Ok(balance) @@ -191,6 +196,7 @@ async fn approve_to_spend_tokens( spender: Address, amount: U256, ) -> Result { + debug!("Approving address/smart contract with {amount} tokens at address: {spender}",); let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let network_token = NetworkToken::new(*network.payment_token_address(), provider); network_token.approve(spender, amount).await @@ -203,6 +209,7 @@ pub async fn transfer_tokens( receiver: Address, amount: U256, ) -> Result { + debug!("Transferring {amount} tokens to {receiver}"); let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let network_token = NetworkToken::new(*network.payment_token_address(), provider); network_token.transfer(receiver, amount).await @@ -215,6 +222,7 @@ pub async fn transfer_gas_tokens( receiver: Address, amount: U256, ) -> Result { + debug!("Transferring {amount} gas tokens to {receiver}"); let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let tx = TransactionRequest::default() .with_to(receiver) @@ -236,6 +244,7 @@ pub async fn pay_for_quotes>( network: &Network, payments: T, ) -> Result, PayForQuotesError> { + info!("Paying for quotes"); let payments: Vec<_> = payments.into_iter().collect(); let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); From 0030d0a36680d09a988d18adca46dba68daaf29b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 10 Oct 2024 13:11:25 +0900 Subject: [PATCH 145/255] feat(autonomi): use local instead of local-discovery for feat --- .github/workflows/benchmark-prs.yml | 8 +-- .../workflows/generate-benchmark-charts.yml | 6 +- .github/workflows/merge.yml | 60 +++++++++---------- .github/workflows/nightly.yml | 38 ++++++------ .github/workflows/nightly_wan.yml | 53 ++++++++-------- README.md | 30 +++++----- autonomi/Cargo.toml | 8 ++- autonomi/README.md | 12 ++-- autonomi_cli/Cargo.toml | 9 ++- sn_auditor/Cargo.toml | 5 +- sn_auditor/README.md | 2 +- sn_auditor/src/main.rs | 2 +- sn_cli/Cargo.toml | 5 +- sn_cli/benches/files.rs | 2 +- sn_cli/src/bin/main.rs | 2 +- sn_client/Cargo.toml | 2 +- sn_client/README.md | 4 +- sn_client/src/api.rs | 2 +- sn_client/src/test_utils.rs | 2 +- sn_faucet/src/faucet_server.rs | 6 +- sn_faucet/src/main.rs | 2 +- sn_networking/Cargo.toml | 4 +- sn_networking/src/driver.rs | 10 ++-- sn_networking/src/event/mod.rs | 6 +- sn_networking/src/event/swarm.rs | 4 +- sn_node/Cargo.toml | 2 +- sn_node/src/node.rs | 2 +- sn_node_manager/Cargo.toml | 2 +- sn_node_manager/src/bin/cli/main.rs | 4 +- sn_node_manager/src/cmd/mod.rs | 4 +- sn_peers_acquisition/Cargo.toml | 10 ++-- sn_peers_acquisition/src/lib.rs | 12 ++-- test_utils/Cargo.toml | 6 +- test_utils/src/lib.rs | 2 +- 34 files changed, 164 insertions(+), 164 deletions(-) diff --git a/.github/workflows/benchmark-prs.yml b/.github/workflows/benchmark-prs.yml index 170751ab9c..af1b3ce0fe 100644 --- a/.github/workflows/benchmark-prs.yml +++ b/.github/workflows/benchmark-prs.yml @@ -40,14 +40,14 @@ jobs: # # As normal user won't care much about initial client startup, # # but be more alerted on communication speed during transmission. # # Meanwhile the criterion testing code includes the client startup as well, - # # it will be better to execute bench test with `local-discovery`, + # # it will be better to execute bench test with `local`, # # to make the measurement results reflect speed improvement or regression more accurately. # - name: Build sn bins - # run: cargo build --release --bin safe --bin safenode --features local-discovery + # run: cargo build --release --bin safe --bin safenode --features local # timeout-minutes: 30 # - name: Build faucet bin - # run: cargo build --release --bin faucet --features local-discovery --features gifting --no-default-features + # run: cargo build --release --bin faucet --features local --features gifting --no-default-features # timeout-minutes: 30 # - name: Start a local network @@ -171,7 +171,7 @@ jobs: # # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # # passes to tee which displays it in the terminal and writes to output.txt # run: | - # cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt + # cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt # cat output.txt | rg benchmark-complete | jq -s 'map({ # name: (.id | split("/"))[-1], # unit: "MiB/s", diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index cd61f0e165..6c69dc7d1b 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -46,11 +46,11 @@ jobs: run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - name: Build node and client - run: cargo build --release --features local-discovery --bin safenode --bin safe + run: cargo build --release --features local --bin safenode --bin safe timeout-minutes: 30 - name: Build faucet bin - run: cargo build --release --bin faucet --features local-discovery --features gifting + run: cargo build --release --bin faucet --features local --features gifting timeout-minutes: 30 - name: Start a local network @@ -81,7 +81,7 @@ jobs: # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # passes to tee which displays it in the terminal and writes to output.txt run: | - cargo criterion --features=local-discovery --message-format=json 2>&1 -p sn_cli | tee -a output.txt + cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt cat output.txt | rg benchmark-complete | jq -s 'map({ name: (.id | split("/"))[-1], unit: "MiB/s", diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 45fa0682da..1e22700f58 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -73,9 +73,9 @@ jobs: # See https://doc.rust-lang.org/rustdoc/lints.html for lints that are 'warning' by default. run: RUSTDOCFLAGS="--deny=warnings" cargo doc --no-deps - - name: Check local-discovery is not a default feature + - name: Check local is not a default feature shell: bash - run: if [[ ! $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[].features.default[]? | select(. == "local-discovery")') ]]; then echo "local-discovery is not a default feature in any package."; else echo "local-discovery is a default feature in at least one package." && exit 1; fi + run: if [[ ! $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[].features.default[]? | select(. == "local")') ]]; then echo "local is not a default feature in any package."; else echo "local is a default feature in at least one package." && exit 1; fi - name: Clean out the target directory run: cargo clean @@ -224,7 +224,7 @@ jobs: UPLOAD_ADDRESS=$(rg "At address: ([0-9a-f]*)" -o -r '$1' ./upload_output) echo "UPLOAD_ADDRESS=$UPLOAD_ADDRESS" >> $GITHUB_ENV shell: bash - + - name: parse address (win) if: matrix.os == 'windows-latest' run: | @@ -237,10 +237,10 @@ jobs: env: SN_LOG: "v" timeout-minutes: 5 - + - name: Generate register signing key run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key - + - name: Create register (writeable by owner) run: ./target/release/autonomi_cli --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: @@ -253,7 +253,7 @@ jobs: REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_create_output) echo "REGISTER_ADDRESS=$REGISTER_ADDRESS" >> $GITHUB_ENV shell: bash - + - name: parse register address (win) if: matrix.os == 'windows-latest' run: | @@ -291,7 +291,7 @@ jobs: PUBLIC_REGISTER_ADDRESS=$(rg "Register created at address: ([0-9a-f]*)" -o -r '$1' ./register_public_create_output) echo "PUBLIC_REGISTER_ADDRESS=$PUBLIC_REGISTER_ADDRESS" >> $GITHUB_ENV shell: bash - + - name: parse public register address (win) if: matrix.os == 'windows-latest' run: | @@ -360,15 +360,15 @@ jobs: # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features=local-discovery --bin safenode + # run: cargo build --release --features=local --bin safenode # timeout-minutes: 30 # - name: Build faucet binary - # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # run: cargo build --release --bin faucet --features="local,gifting" # timeout-minutes: 30 # - name: Build testing executable - # run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run + # run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run # env: # # only set the target dir for windows to bypass the linker issue. # # happens if we build the node manager via testnet action @@ -396,21 +396,21 @@ jobs: # fi # - name: execute the sequential transfers tests - # run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 + # run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 # env: # SN_LOG: "all" # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} # timeout-minutes: 25 # - name: execute the storage payment tests - # run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 + # run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 # env: # SN_LOG: "all" # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} # timeout-minutes: 25 # - name: execute the double spend tests - # run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 + # run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 # env: # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} # timeout-minutes: 25 @@ -440,15 +440,15 @@ jobs: # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features=local-discovery --bin safenode + # run: cargo build --release --features=local --bin safenode # timeout-minutes: 30 # - name: Build faucet binary - # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # run: cargo build --release --bin faucet --features="local,gifting" # timeout-minutes: 30 # - name: Build testing executable - # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + # run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run # env: # # only set the target dir for windows to bypass the linker issue. # # happens if we build the node manager via testnet action @@ -477,7 +477,7 @@ jobs: # fi # - name: execute the spend simulation - # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture # env: # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} # timeout-minutes: 25 @@ -506,15 +506,15 @@ jobs: # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features=local-discovery,distribution --bin safenode + # run: cargo build --release --features=local,distribution --bin safenode # timeout-minutes: 35 # - name: Build faucet binary - # run: cargo build --release --features=local-discovery,distribution,gifting --bin faucet + # run: cargo build --release --features=local,distribution,gifting --bin faucet # timeout-minutes: 35 # - name: Build testing executable - # run: cargo test --release --features=local-discovery,distribution --no-run + # run: cargo test --release --features=local,distribution --no-run # env: # # only set the target dir for windows to bypass the linker issue. # # happens if we build the node manager via testnet action @@ -542,7 +542,7 @@ jobs: # fi # - name: execute token_distribution tests - # run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 + # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 # env: # SN_LOG: "all" # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} @@ -580,15 +580,15 @@ jobs: # - uses: Swatinem/rust-cache@v2 # - name: Build binaries - # run: cargo build --release --features local-discovery --bin safenode + # run: cargo build --release --features local --bin safenode # timeout-minutes: 30 # - name: Build faucet binaries - # run: cargo build --release --features="local-discovery,gifting" --bin faucet + # run: cargo build --release --features="local,gifting" --bin faucet # timeout-minutes: 30 # - name: Build churn tests - # run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run + # run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run # env: # # only set the target dir for windows to bypass the linker issue. # # happens if we build the node manager via testnet action @@ -616,7 +616,7 @@ jobs: # fi # - name: Chunks data integrity during nodes churn - # run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture + # run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture # env: # TEST_DURATION_MINS: 5 # TEST_TOTAL_CHURN_CYCLES: 15 @@ -710,11 +710,11 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode + run: cargo build --release --features local --bin safenode timeout-minutes: 30 - name: Build data location and routing table tests - run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + run: cargo test --release -p sn_node --features=local --test verify_data_location --test verify_routing_table --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -745,13 +745,13 @@ jobs: fi - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture env: CHURN_COUNT: 6 SN_LOG: "all" @@ -759,7 +759,7 @@ jobs: timeout-minutes: 25 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index de69269638..aac0ac9ad4 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -199,11 +199,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode --bin faucet + run: cargo build --release --features=local --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run + run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -221,21 +221,21 @@ jobs: build: true - name: execute the sequential transfers test - run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test sequential_transfers -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} SN_LOG: "all" timeout-minutes: 10 - name: execute the storage payment tests - run: cargo test --release -p sn_node --features="local-discovery" --test storage_payments -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test storage_payments -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} SN_LOG: "all" timeout-minutes: 10 - name: execute the double spend tests - run: cargo test --release -p sn_node --features="local-discovery" --test double_spend -- --nocapture --test-threads=1 + run: cargo test --release -p sn_node --features="local" --test double_spend -- --nocapture --test-threads=1 env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 25 @@ -277,11 +277,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode --bin faucet + run: cargo build --release --features=local --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + run: cargo test --release -p sn_node --features=local --test spend_simulation --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -300,7 +300,7 @@ jobs: build: true - name: execute the spend simulation test - run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + run: cargo test --release -p sn_node --features="local" --test spend_simulation -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 25 @@ -341,11 +341,11 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features=local-discovery,distribution --bin safenode --bin faucet + run: cargo build --release --features=local,distribution --bin safenode --bin faucet timeout-minutes: 30 - name: Build testing executable - run: cargo test --release --features=local-discovery,distribution --no-run + run: cargo test --release --features=local,distribution --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -373,7 +373,7 @@ jobs: fi - name: execute token_distribution tests - run: cargo test --release --features=local-discovery,distribution token_distribution -- --nocapture --test-threads=1 + run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} @@ -412,11 +412,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode --bin faucet timeout-minutes: 30 - name: Build churn tests - run: cargo test --release -p sn_node --features=local-discovery --test data_with_churn --no-run + run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -434,7 +434,7 @@ jobs: build: true - name: Chunks data integrity during nodes churn (during 10min) (in theory) - run: cargo test --release -p sn_node --features="local-discovery" --test data_with_churn -- --nocapture + run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture env: TEST_DURATION_MINS: 60 TEST_CHURN_CYCLES: 6 @@ -537,11 +537,11 @@ jobs: continue-on-error: true - name: Build binaries - run: cargo build --release --features local-discovery --bin safenode --bin faucet + run: cargo build --release --features local --bin safenode --bin faucet timeout-minutes: 30 - name: Build data location and routing table tests - run: cargo test --release -p sn_node --features=local-discovery --test verify_data_location --test verify_routing_table --no-run + run: cargo test --release -p sn_node --features=local --test verify_data_location --test verify_routing_table --no-run env: # only set the target dir for windows to bypass the linker issue. # happens if we build the node manager via testnet action @@ -559,20 +559,20 @@ jobs: build: true - name: Verify the Routing table of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 - name: Verify the location of the data on the network - run: cargo test --release -p sn_node --features="local-discovery" --test verify_data_location -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_data_location -- --nocapture env: SN_LOG: "all" CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 90 - name: Verify the routing tables of the nodes - run: cargo test --release -p sn_node --features="local-discovery" --test verify_routing_table -- --nocapture + run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} timeout-minutes: 5 diff --git a/.github/workflows/nightly_wan.yml b/.github/workflows/nightly_wan.yml index 0ee6bc1ad3..9c84f58488 100644 --- a/.github/workflows/nightly_wan.yml +++ b/.github/workflows/nightly_wan.yml @@ -3,7 +3,7 @@ name: Nightly -- Full WAN Network Tests on: schedule: - cron: "0 0 * * *" -# enable as below for testing purpose. + # enable as below for testing purpose. # pull_request: # branches: ["*"] workflow_dispatch: @@ -60,14 +60,14 @@ jobs: - name: Check env variables shell: bash run: | - echo "Peer is $SAFE_PEERS" - echo "Deployment inventory is $SN_INVENTORY" + echo "Peer is $SAFE_PEERS" + echo "Deployment inventory is $SN_INVENTORY" - name: start faucet uses: maidsafe/sn-testnet-control-action/start-faucet@main with: - network-name: ${{ env.NETWORK_NAME }} - + network-name: ${{ env.NETWORK_NAME }} + - name: Obtain the funds from the faucet run: | set -e @@ -80,7 +80,7 @@ jobs: env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to carry out chunk actions run: | set -e @@ -88,7 +88,7 @@ jobs: env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to create a register run: | set -e @@ -96,7 +96,7 @@ jobs: env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to get a register run: | set -e @@ -104,7 +104,7 @@ jobs: env: SN_LOG: "all" timeout-minutes: 2 - + - name: Start a client to edit a register run: | set -e @@ -112,7 +112,7 @@ jobs: env: SN_LOG: "all" timeout-minutes: 2 - + # - name: Fetch network logs # uses: maidsafe/sn-testnet-control-action/fetch-logs@main # with: @@ -120,7 +120,7 @@ jobs: # rust-log: debug # provider: digital-ocean # network-name: ${{ env.NETWORK_NAME }} - + - name: Upload local logs if: always() uses: actions/upload-artifact@v4 @@ -154,17 +154,17 @@ jobs: # os: [ubuntu-latest] # steps: # - uses: actions/checkout@v4 - + # - name: Install Rust # uses: dtolnay/rust-toolchain@stable - + # - uses: Swatinem/rust-cache@v2 # continue-on-error: true - + # - name: Build testing executable - # run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run + # run: cargo test --release -p sn_node --features=local --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run # timeout-minutes: 40 - + # - name: setup testnet-deploy # uses: maidsafe/sn-testnet-control-action/init-testnet-deploy@main # with: @@ -188,38 +188,37 @@ jobs: # provider: digital-ocean # safe-network-branch: main # safe-network-user: maidsafe - + # - name: Check env variables # shell: bash # run: | # echo "Peer is $SAFE_PEERS" # echo "Deployment inventory is $SN_INVENTORY" - + # - name: execute the sequential transfers test # run: cargo test --release -p sn_node --test sequential_transfers -- --nocapture --test-threads=1 # env: # SN_LOG: "all" # timeout-minutes: 45 - + # - name: execute the storage payment tests # run: cargo test --release -p sn_node --test storage_payments -- --nocapture --test-threads=1 # env: # SN_LOG: "all" # timeout-minutes: 45 - + # - name: execute the double spend tests # run: cargo test --release -p sn_node --test double_spend -- --nocapture --test-threads=1 # timeout-minutes: 45 - - + # - name: execute the spend simulation tests # run: cargo test --release -p sn_node --test spend_simulation -- --nocapture --test-threads=1 # timeout-minutes: 45 - + # - name: Small wait to allow reward receipt # run: sleep 30 # timeout-minutes: 1 - + # - name: Fetch network logs # uses: ermineJose/sn-testnet-control-action/fetch-logs@feat-add_fetch-logs-action # with: @@ -237,13 +236,13 @@ jobs: # ~/.local/share/safe/node/*/logs/*.log* # ~/.local/share/safe/*/*/*.log* # ~/.local/share/safe/client/logs/*/*.log* - + # - name: destroy network # uses: maidsafe/sn-testnet-control-action/destroy-network@main # with: # network-name: ${{ env.NETWORK_NAME }} # provider: digital-ocean - + # - name: post notification to slack on failure # if: ${{ failure() }} # uses: bryannice/gitactions-slack-notification@2.0.0 @@ -251,7 +250,7 @@ jobs: # SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} # SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" # SLACK_TITLE: "Nightly Spend Test Run Failed" - + # churn: # name: Network churning tests # runs-on: ${{ matrix.os }} diff --git a/README.md b/README.md index 54f69bd62a..b3322d8cf6 100644 --- a/README.md +++ b/README.md @@ -165,13 +165,13 @@ Take note of the console output for the next step (`RPC URL`, `Payment token add `--rewards-address` _is the address where you will receive your node earnings on._ ```bash -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --data-payments-address +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --data-payments-address ``` 4. Verify node status:
```bash -cargo run --bin safenode-manager --features local-discovery -- status +cargo run --bin safenode-manager --features local -- status ``` The node manager's `run` command starts the node processes. The `status` command should show twenty-five @@ -185,7 +185,7 @@ retrieving them. Upload a file or a directory: ```bash -cargo run --bin safe --features local-discovery -- files upload +cargo run --bin safe --features local -- files upload ``` The output will show that the upload costs some tokens. @@ -193,7 +193,7 @@ The output will show that the upload costs some tokens. Now download the files again: ```bash -cargo run --bin safe --features local-discovery -- files download +cargo run --bin safe --features local -- files download ``` ### Folders @@ -212,7 +212,7 @@ Initialise a directory to then be able to track changes made on it, and sync the network: ```bash -cargo run --bin safe --features local-discovery -- folders init +cargo run --bin safe --features local -- folders init ``` Make sure you made a backup copy of the "recovery secret" generated by the above command, or the @@ -223,7 +223,7 @@ folders are considered new since it has just been initalised for tracking), befo those changes to the network, we can get a report of the changes that have been made locally: ```bash -cargo run --bin safe --features local-discovery -- folders status +cargo run --bin safe --features local -- folders status ``` We can now push all local changes made to files and directories to the network, as well as pull any @@ -231,7 +231,7 @@ changes that could have been made to the version stored on the network since las with it: ```bash -cargo run --bin safe --features local-discovery -- folders sync +cargo run --bin safe --features local -- folders sync ``` Now that's all stored on the network, you can download the folders onto any other path by providing @@ -239,7 +239,7 @@ it as the target directory to the following command (you will be prompted to ent secret" you obtained when initialising the directory with `init` command): ```bash -cargo run --bin safe --features local-discovery -- folders download +cargo run --bin safe --features local -- folders download ``` ### Token Transfers @@ -261,7 +261,7 @@ cargo run --bin safe -- wallet balance Now to send some tokens to an address: ``` -cargo run --bin safe --features local-discovery -- wallet send 2 [address] +cargo run --bin safe --features local -- wallet send 2 [address] ``` This will output a transfer as a hex string, which should be sent to the recipient. @@ -269,7 +269,7 @@ This transfer is encrypted to the recipient so only the recipient can read and r To receive a transfer, simply paste it after the wallet receive command: ``` -cargo run --bin safe --features local-discovery -- wallet receive [transfer] +cargo run --bin safe --features local -- wallet receive [transfer] ``` #### Out of band transaction signing @@ -324,13 +324,13 @@ Steps on the online device/computer with the watch-only wallet: We can verify a spend, optionally going back to the genesis transaction: ``` -cargo run --bin safe --features local-discovery -- wallet verify [--genesis] [spend address] +cargo run --bin safe --features local -- wallet verify [--genesis] [spend address] ``` All spends from genesis can be audited: ``` -cargo run --bin safe --features local-discovery -- wallet audit +cargo run --bin safe --features local -- wallet audit ``` ### Registers @@ -341,7 +341,7 @@ their use by two users to exchange text messages in a crude chat application. In the first terminal, using the registers example, Alice creates a register: ``` -cargo run --example registers --features=local-discovery -- --user alice --reg-nickname myregister +cargo run --example registers --features=local -- --user alice --reg-nickname myregister ``` Alice can now write a message to the register and see anything written by anyone else. For example @@ -384,7 +384,7 @@ message Alice has written, and he can write back by running this command with th from Alice. (Note that the command should all be on one line): ``` -cargo run --example registers --features=local-discovery -- --user bob --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d +cargo run --example registers --features=local -- --user bob --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d ``` After retrieving the register and displaying the message from Alice, Bob can reply and at any time, @@ -411,7 +411,7 @@ A second example, `register_inspect` allows you to view its structure and conten the above example you again provide the address of the register. For example: ``` -cargo run --example register_inspect --features=local-discovery -- --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d +cargo run --example register_inspect --features=local -- --reg-address 50f4c9d55aa1f4fc19149a86e023cd189e509519788b4ad8625a1ce62932d1938cf4242e029cada768e7af0123a98c25973804d84ad397ca65cb89d6580d04ff07e5b196ea86f882b925be6ade06fc8d ``` After printing a summary of the register, this example will display diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 416e580bd2..bc84942118 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -16,14 +16,16 @@ data = [] vault = ["data"] files = ["data"] fs = ["tokio/fs", "files"] -local-discovery = ["sn_networking/local-discovery", "test_utils/local-discovery"] +local = ["sn_networking/local", "test_utils/local"] registers = [] [dependencies] bip39 = "2.0.0" bls = { package = "blsttc", version = "8.0.1" } bytes = { version = "1.0.1", features = ["serde"] } -curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = ["num-bigint"] } +curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ + "num-bigint", +] } eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" evmlib = { path = "../evmlib", version = "0.1" } @@ -59,7 +61,7 @@ wasm-bindgen-test = "0.3.43" console_error_panic_hook = "0.1.7" evmlib = { path = "../evmlib", version = "0.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available -instant = { version = "0.1", features = [ "wasm-bindgen", "inaccurate" ] } +instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } tracing-web = "0.1.3" [lints] diff --git a/autonomi/README.md b/autonomi/README.md index a1468a209e..3b27c6b0f0 100644 --- a/autonomi/README.md +++ b/autonomi/README.md @@ -26,10 +26,10 @@ autonomi = { path = "../autonomi", version = "0.1.0" } cargo run --bin evm_testnet ``` -3. Run a local network with the `local-discovery` feature and use the local evm node. +3. Run a local network with the `local` feature and use the local evm node. ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-local +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-local ``` 4. Then run the tests with the `local` feature and pass the EVM params again: @@ -37,7 +37,7 @@ cargo run --bin=safenode-manager --features=local-discovery -- local run --build ```sh $ EVM_NETWORK=local cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local-discovery -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=local -- --nocapture ``` ### Using a live testnet or mainnet @@ -45,10 +45,10 @@ $ RUST_LOG=autonomi EVM_NETWORK=local cargo test --package=autonomi --features=l Using the hardcoded `Arbitrum One` option as an example, but you can also use the command flags of the steps above and point it to a live network. -1. Run a local network with the `local-discovery` feature: +1. Run a local network with the `local` feature: ```sh -cargo run --bin=safenode-manager --features=local-discovery -- local run --build --clean --rewards-address evm-arbitrum-one +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-arbitrum-one ``` 2. Then run the tests with the `local` feature. Make sure that the wallet of the private key you pass has enough gas and @@ -57,7 +57,7 @@ cargo run --bin=safenode-manager --features=local-discovery -- local run --build ```sh $ EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local # Or with logs -$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local-discovery -- --nocapture +$ RUST_LOG=autonomi EVM_NETWORK=arbitrum-one EVM_PRIVATE_KEY= cargo test --package=autonomi --features=local -- --nocapture ``` ### WebAssembly diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index 1bdf94792c..b06d90d67b 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -5,12 +5,17 @@ edition = "2021" [features] default = ["metrics"] -local-discovery = ["sn_peers_acquisition/local-discovery"] +local = ["sn_peers_acquisition/local"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] [dependencies] -autonomi = { path = "../autonomi", version = "0.1.0", features = ["data", "files", "fs", "registers"] } +autonomi = { path = "../autonomi", version = "0.1.0", features = [ + "data", + "files", + "fs", + "registers", +] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" dirs-next = "~2.0.0" diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index d6717cdfd8..13a86e4b7e 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -11,10 +11,7 @@ readme = "README.md" [features] default = [] -local-discovery = [ - "sn_client/local-discovery", - "sn_peers_acquisition/local-discovery", -] +local = ["sn_client/local", "sn_peers_acquisition/local"] network-contacts = ["sn_peers_acquisition/network-contacts"] nightly = [] open-metrics = ["sn_client/open-metrics"] diff --git a/sn_auditor/README.md b/sn_auditor/README.md index e8291f9f3d..1d8f96d59f 100644 --- a/sn_auditor/README.md +++ b/sn_auditor/README.md @@ -13,7 +13,7 @@ Running an auditor instance: cargo run --release --peer "/ip4/" # on a local testnet -cargo run --release --features=local-discovery +cargo run --release --features=local ``` It can be run with the following flags: diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs index 1cbdaf2f58..c8a420be1f 100644 --- a/sn_auditor/src/main.rs +++ b/sn_auditor/src/main.rs @@ -194,7 +194,7 @@ async fn connect_to_network(peers_args: PeersArgs) -> Result { bootstrap_peers.len(), ); let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided + // empty vec is returned if `local` flag is provided None } else { Some(bootstrap_peers) diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index be2a42c323..f46f0f32ae 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -21,10 +21,7 @@ harness = false [features] default = ["metrics"] distribution = ["base64", "bitcoin"] -local-discovery = [ - "sn_client/local-discovery", - "sn_peers_acquisition/local-discovery", -] +local = ["sn_client/local", "sn_peers_acquisition/local"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] nightly = [] diff --git a/sn_cli/benches/files.rs b/sn_cli/benches/files.rs index 288801d980..cece183f5c 100644 --- a/sn_cli/benches/files.rs +++ b/sn_cli/benches/files.rs @@ -23,7 +23,7 @@ const SAMPLE_SIZE: usize = 20; // This procedure includes the client startup, which will be measured by criterion as well. // As normal user won't care much about initial client startup, // but be more alerted on communication speed during transmission. -// It will be better to execute bench test with `local-discovery`, +// It will be better to execute bench test with `local`, // to make the measurement results reflect speed improvement or regression more accurately. fn safe_files_upload(dir: &str) { let output = Command::new("./target/release/safe") diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs index d4c8cac1d0..2fa931f217 100644 --- a/sn_cli/src/bin/main.rs +++ b/sn_cli/src/bin/main.rs @@ -140,7 +140,7 @@ async fn main() -> Result<()> { ); let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided + // empty vec is returned if `local` flag is provided None } else { Some(bootstrap_peers) diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 51c0bb1df0..57e3b4041a 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -12,7 +12,7 @@ version = "0.110.2" [features] default = [] -local-discovery = ["sn_networking/local-discovery"] +local = ["sn_networking/local"] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] test-utils = ["sn_peers_acquisition", "eyre"] # required to pass on flag to node builds diff --git a/sn_client/README.md b/sn_client/README.md index 1e3d5a8259..48a4fe9cf9 100644 --- a/sn_client/README.md +++ b/sn_client/README.md @@ -37,10 +37,10 @@ let client = Client::new(signer, peers, req_response_timeout, custom_concurrency Prerequisites: * A running local network. Refer to [`safe_network/README.md`](../README.md) to run a local test network. -* `SAFE_PEERS` environment variable or running the tests with `--feature=local-discovery`: +* `SAFE_PEERS` environment variable or running the tests with `--feature=local`: ```bash -$ cargo test --package sn_client --release --tests --features=local-discovery +$ cargo test --package sn_client --release --tests --features=local ``` ## Contributing diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 5e9035ce37..e13cdd21a0 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -252,7 +252,7 @@ impl Client { self.events_broadcaster.broadcast(ClientEvent::PeerAdded { max_peers_to_connect: CLOSE_GROUP_SIZE, }); - // In case client running in non-local-discovery mode, + // In case client running in non-local mode, // it may take some time to fill up the RT. // To avoid such delay may fail the query with RecordNotFound, // wait till certain amount of peers populated into RT diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs index bce997d510..5560b1e0b8 100644 --- a/sn_client/src/test_utils.rs +++ b/sn_client/src/test_utils.rs @@ -36,7 +36,7 @@ static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); /// Get a new Client for testing pub async fn get_new_client(owner_sk: SecretKey) -> Result { - let bootstrap_peers = if cfg!(feature = "local-discovery") { + let bootstrap_peers = if cfg!(feature = "local") { None } else { match std::env::var("SAFE_PEERS") { diff --git a/sn_faucet/src/faucet_server.rs b/sn_faucet/src/faucet_server.rs index ddd11d2f1f..0147b434e3 100644 --- a/sn_faucet/src/faucet_server.rs +++ b/sn_faucet/src/faucet_server.rs @@ -52,13 +52,13 @@ use tokio::{fs, io::AsyncWriteExt}; /// /// ```bash /// # run faucet server -/// cargo run --features="local-discovery" --bin faucet --release -- server +/// cargo run --features="local" --bin faucet --release -- server /// /// # query faucet server for money for our address `get local wallet address` -/// curl "localhost:8000/`cargo run --features="local-discovery" --bin safe --release wallet address | tail -n 1`" > transfer_hex +/// curl "localhost:8000/`cargo run --features="local" --bin safe --release wallet address | tail -n 1`" > transfer_hex /// /// # receive transfer with our wallet -/// cargo run --features="local-discovery" --bin safe --release wallet receive --file transfer_hex +/// cargo run --features="local" --bin safe --release wallet receive --file transfer_hex /// /// # balance should be updated /// ``` diff --git a/sn_faucet/src/main.rs b/sn_faucet/src/main.rs index e01aecf426..ad1bf336f9 100644 --- a/sn_faucet/src/main.rs +++ b/sn_faucet/src/main.rs @@ -62,7 +62,7 @@ async fn main() -> Result<()> { let bootstrap_peers = opt.peers.get_peers().await?; let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local-discovery` flag is provided + // empty vec is returned if `local` flag is provided None } else { Some(bootstrap_peers) diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index eb8de53126..251f16bf71 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -12,7 +12,7 @@ version = "0.18.3" [features] default = [] -local-discovery = ["libp2p/mdns"] +local = ["libp2p/mdns"] upnp = ["libp2p/upnp"] # tcp is automatically enabled when compiling for wasm32 websockets = ["libp2p/tcp"] @@ -54,7 +54,7 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.14" } +sn_build_info = { path = "../sn_build_info", version = "0.1.14" } sn_protocol = { path = "../sn_protocol", version = "0.17.10" } sn_transfers = { path = "../sn_transfers", version = "0.19.2" } sn_registers = { path = "../sn_registers", version = "0.3.20" } diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 2a56235bde..a01db526b2 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -30,7 +30,7 @@ use crate::{ use crate::{transport, NodeIssue}; use futures::future::Either; use futures::StreamExt; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; @@ -236,7 +236,7 @@ pub(super) struct NodeBehaviour { pub(super) blocklist: libp2p::allow_block_list::Behaviour, pub(super) identify: libp2p::identify::Behaviour, - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] pub(super) mdns: mdns::tokio::Behaviour, #[cfg(feature = "upnp")] pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle, @@ -584,7 +584,7 @@ impl NetworkBuilder { } }; - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] let mdns_config = mdns::Config { // lower query interval to speed up peer discovery // this increases traffic, but means we no longer have clients unable to connect @@ -593,7 +593,7 @@ impl NetworkBuilder { ..Default::default() }; - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] let mdns = mdns::tokio::Behaviour::new(mdns_config, peer_id)?; // Identify Behaviour @@ -639,7 +639,7 @@ impl NetworkBuilder { request_response, kademlia, identify, - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] mdns, }; diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index ee0d0bbbb2..c313c73d07 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -13,7 +13,7 @@ mod swarm; use crate::{driver::SwarmDriver, error::Result}; use core::fmt; use custom_debug::Debug as CustomDebug; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; use libp2p::{ kad::{Record, RecordKey, K_VALUE}, @@ -39,7 +39,7 @@ pub(super) enum NodeEvent { Upnp(libp2p::upnp::Event), MsgReceived(libp2p::request_response::Event), Kademlia(libp2p::kad::Event), - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] Mdns(Box), Identify(Box), RelayClient(Box), @@ -66,7 +66,7 @@ impl From for NodeEvent { } } -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] impl From for NodeEvent { fn from(event: mdns::Event) -> Self { NodeEvent::Mdns(Box::new(event)) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 2da2037ae2..ba35f2bf18 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -10,7 +10,7 @@ use crate::{ cmd::LocalSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; -#[cfg(feature = "local-discovery")] +#[cfg(feature = "local")] use libp2p::mdns; #[cfg(feature = "open-metrics")] use libp2p::metrics::Recorder; @@ -277,7 +277,7 @@ impl SwarmDriver { libp2p::identify::Event::Error { .. } => debug!("identify: {iden:?}"), } } - #[cfg(feature = "local-discovery")] + #[cfg(feature = "local")] SwarmEvent::Behaviour(NodeEvent::Mdns(mdns_event)) => { event_string = "mdns"; match *mdns_event { diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 144a7b86fe..99f523ca72 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -15,7 +15,7 @@ path = "src/bin/safenode/main.rs" [features] default = ["metrics", "upnp", "open-metrics", "encrypt-records"] -local-discovery = ["sn_networking/local-discovery", "test_utils/local-discovery"] +local = ["sn_networking/local", "test_utils/local"] otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 37ad21d0f2..4bb21c720c 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -401,7 +401,7 @@ impl Node { } NetworkEvent::NewListenAddr(_) => { event_header = "NewListenAddr"; - if !cfg!(feature = "local-discovery") { + if !cfg!(feature = "local") { let network = self.network().clone(); let peers = self.initial_peers().clone(); let _handle = spawn(async move { diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index b1bb531244..fcb1755371 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -20,7 +20,7 @@ path = "src/bin/daemon/main.rs" [features] chaos = [] default = ["quic"] -local-discovery = [] +local = [] network-contacts = [] nightly = [] open-metrics = [] diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 3e2598e676..81c780dc00 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -829,7 +829,7 @@ pub enum LocalSubCmd { metrics_port: Option, /// Path to a safenode binary. /// - /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. + /// Make sure to enable the local feature flag on the safenode when compiling the binary. /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version")] @@ -953,7 +953,7 @@ pub enum LocalSubCmd { metrics_port: Option, /// Path to a safenode binary /// - /// Make sure to enable the local-discovery feature flag on the safenode when compiling the binary. + /// Make sure to enable the local feature flag on the safenode when compiling the binary. /// /// The path and version arguments are mutually exclusive. #[clap(long, conflicts_with = "node_version", conflicts_with = "build")] diff --git a/sn_node_manager/src/cmd/mod.rs b/sn_node_manager/src/cmd/mod.rs index a8cb0bde8f..9e6af9351d 100644 --- a/sn_node_manager/src/cmd/mod.rs +++ b/sn_node_manager/src/cmd/mod.rs @@ -177,8 +177,8 @@ fn build_binary(bin_type: &ReleaseType) -> Result { if cfg!(feature = "otlp") { args.extend(["--features", "otlp"]); } - if cfg!(feature = "local-discovery") { - args.extend(["--features", "local-discovery"]); + if cfg!(feature = "local") { + args.extend(["--features", "local"]); } if cfg!(feature = "network-contacts") { args.extend(["--features", "network-contacts"]); diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index a8c51425a3..34fccf19a2 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.5.2" [features] -local-discovery = [] +local = [] network-contacts = ["sn_protocol"] websockets = [] @@ -20,10 +20,12 @@ clap = { version = "4.2.1", features = ["derive", "env"] } lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" -reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.10", optional = true} +reqwest = { version = "0.12.2", default-features = false, features = [ + "rustls-tls", +] } +sn_protocol = { path = "../sn_protocol", version = "0.17.10", optional = true } thiserror = "1.0.23" -tokio = { version = "1.32.0", default-features = false} +tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } url = { version = "2.4.0" } diff --git a/sn_peers_acquisition/src/lib.rs b/sn_peers_acquisition/src/lib.rs index 65967bebaa..8c39764d96 100644 --- a/sn_peers_acquisition/src/lib.rs +++ b/sn_peers_acquisition/src/lib.rs @@ -55,7 +55,7 @@ pub struct PeersArgs { /// Specify the URL to fetch the network contacts from. /// - /// This argument will be overridden if the "peers" argument is set or if the `local-discovery` + /// This argument will be overridden if the "peers" argument is set or if the `local` /// feature flag is enabled. #[cfg(feature = "network-contacts")] #[clap(long, conflicts_with = "first")] @@ -70,7 +70,7 @@ impl PeersArgs { /// Otherwise, peers are obtained in the following order of precedence: /// * The `--peer` argument. /// * The `SAFE_PEERS` environment variable. - /// * Using the `local-discovery` feature, which will return an empty peer list. + /// * Using the `local` feature, which will return an empty peer list. /// * Using the `network-contacts` feature, which will download the peer list from a file on S3. /// /// Note: the current behaviour is that `--peer` and `SAFE_PEERS` will be combined. Some tests @@ -86,7 +86,7 @@ impl PeersArgs { /// Otherwise, peers are obtained in the following order of precedence: /// * The `--peer` argument. /// * The `SAFE_PEERS` environment variable. - /// * Using the `local-discovery` feature, which will return an empty peer list. + /// * Using the `local` feature, which will return an empty peer list. /// /// This will not fetch the peers from network-contacts even if the `network-contacts` feature is enabled. Use /// get_peers() instead. @@ -106,11 +106,9 @@ impl PeersArgs { let mut peers = if !self.peers.is_empty() { info!("Using peers supplied with the --peer argument(s) or SAFE_PEERS"); self.peers - } else if cfg!(feature = "local-discovery") { + } else if cfg!(feature = "local") { info!("No peers given"); - info!( - "The `local-discovery` feature is enabled, so peers will be discovered through mDNS." - ); + info!("The `local` feature is enabled, so peers will be discovered through mDNS."); return Ok(vec![]); } else if skip_network_contacts { info!("Skipping network contacts"); diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index ca708ccc85..37431d81fc 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -10,7 +10,7 @@ repository = "https://github.com/maidsafe/safe_network" version = "0.4.6" [features] -local-discovery = ["sn_peers_acquisition/local-discovery"] +local = ["sn_peers_acquisition/local"] [dependencies] bytes = { version = "1.0.1", features = ["serde"] } @@ -19,6 +19,6 @@ dirs-next = "~2.0.0" evmlib = { path = "../evmlib", version = "0.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" -serde = { version = "1.0.133", features = [ "derive"]} +serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } \ No newline at end of file +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index 75a3276071..3151878ade 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -26,7 +26,7 @@ pub fn gen_random_data(len: usize) -> Bytes { /// /// An empty `Vec` will be returned if the env var is not set or if local discovery is enabled. pub fn peers_from_env() -> Result> { - let bootstrap_peers = if cfg!(feature = "local-discovery") { + let bootstrap_peers = if cfg!(feature = "local") { Ok(vec![]) } else if let Ok(peers_str) = std::env::var("SAFE_PEERS") { peers_str.split(',').map(parse_peer_addr).collect() From 96f3cca14d64f6b139ed273ac280fe91ab0520fd Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 10 Oct 2024 15:18:15 +0900 Subject: [PATCH 146/255] [MERGE] main into evm-lib --- .github/workflows/release.yml | 11 + CHANGELOG.md | 57 ++ Cargo.lock | 650 +++++++++--------- Justfile | 6 +- README.md | 22 +- adr/libp2p/identify-interval.md | 2 +- autonomi/Cargo.toml | 12 +- autonomi/src/self_encryption.rs | 6 +- nat-detection/Cargo.toml | 8 +- node-launchpad/Cargo.toml | 12 +- .../src/components/popup/beta_programme.rs | 5 +- node-launchpad/src/components/status.rs | 2 +- node-launchpad/src/config.rs | 14 +- node-launchpad/src/node_mgmt.rs | 4 + release-cycle-info | 4 +- resources/scripts/find_prs.py | 132 ++++ sn_auditor/Cargo.toml | 12 +- sn_auditor/src/main.rs | 82 ++- sn_build_info/Cargo.toml | 2 +- sn_cli/Cargo.toml | 14 +- sn_client/Cargo.toml | 21 +- sn_client/src/chunks/pac_man.rs | 11 +- sn_client/src/files/download.rs | 5 +- sn_client/src/folders.rs | 2 +- sn_faucet/Cargo.toml | 16 +- sn_logging/Cargo.toml | 2 +- sn_logging/src/lib.rs | 20 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 +- sn_networking/src/driver.rs | 11 + sn_networking/src/event/mod.rs | 36 +- sn_networking/src/metrics/bad_node.rs | 396 ++++++++++- sn_networking/src/metrics/mod.rs | 68 +- sn_networking/src/record_store.rs | 58 +- sn_node/Cargo.toml | 24 +- sn_node/src/bin/safenode/main.rs | 24 +- sn_node/tests/data_with_churn.rs | 8 +- sn_node_manager/Cargo.toml | 14 +- sn_node_manager/src/add_services/config.rs | 14 +- sn_node_manager/src/add_services/mod.rs | 18 +- sn_node_manager/src/add_services/tests.rs | 352 +++++++++- sn_node_manager/src/bin/cli/main.rs | 15 + sn_node_manager/src/cmd/node.rs | 10 +- sn_node_manager/src/lib.rs | 381 ++++++++++ sn_node_manager/src/local.rs | 2 + sn_node_manager/src/rpc.rs | 6 + sn_node_rpc_client/Cargo.toml | 16 +- sn_peers_acquisition/Cargo.toml | 8 +- sn_protocol/Cargo.toml | 8 +- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 6 +- sn_service_management/src/node.rs | 10 + sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- token_supplies/Cargo.toml | 2 +- 55 files changed, 2009 insertions(+), 630 deletions(-) create mode 100755 resources/scripts/find_prs.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fd095472c0..a401a1483c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,6 +2,11 @@ name: release on: workflow_dispatch: + inputs: + chunk-size: + description: Specify the chunk size in bytes. If not used, the current default is 1048576. + type: number + required: false # The key variables also need to be passed to `cross`, which runs in a container and does not # inherit variables from the parent environment. The `cross` tool is used in the `build` @@ -49,6 +54,12 @@ jobs: - shell: bash run: cargo binstall --no-confirm just + - name: Set chunk size if applicable + if: ${{ inputs.chunk-size != '' }} + shell: bash + run: | + echo "MAX_CHUNK_SIZE=${{ inputs.chunk-size }}" >> $GITHUB_ENV + - name: build release artifacts shell: bash run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index d7eabe0d6d..070c3c661a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,63 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-10-08 + +### Network + +#### Changed + +- Optimize auditor tracking by not to re-attempt fetched spend. +- Optimize auditor tracking function by using DashMap and stream. + +## 2024-10-07 + +### Network + +#### Changed + +- Increase chunk size to 4MB with node size remaining at 32GB +- Bootstrap peer parsing in CI was changed to accommodate new log format in libp2p + +### Node Manager + +#### Added + +- The `add` command has new `--max-log-files` and `--max-archived-log-files` arguments to support + capping node log output + +#### Fixed + +- The Discord username on the `--owner` argument will always be converted to lower case + +#### Launchpad + +### Added + +- Increased logging related to app configuration. This could help solving issues on launchpad start + up. + +## 2024-10-03 + +### Launchpad + +### Changed + +- Upgrade to `Ratatui` v0.28.1 +- Styling and layout fixes + +#### Added + +- Drives that don't have enough space are being shown and flagged +- Error handling and generic error popup +- New metrics in the `Status` section +- Confirmation needed when changing connection mode + +### Fixed + +- NAT mode only on first start in `Automatic Connection Mode` +- Force Discord username to be in lowercase + ## 2024-10-01 ### Launchpad diff --git a/Cargo.lock b/Cargo.lock index 7d1738c96a..5abb5c540a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.33" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" +checksum = "94c225801d42099570d0674701dddd4142f0ef715282aeb5985042e2ec962df7" dependencies = [ "num_enum", "strum", @@ -187,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" +checksum = "eeb750349efda145ca6aada68d0336067f7f364d7d44ef09e2cf000b040c5e99" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -200,9 +200,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" +checksum = "f95d76a38cae906fd394a5afb0736aaceee5432efe76addfd71048e623e208af" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -268,9 +268,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" +checksum = "03c66eec1acdd96b39b995b8f5ee5239bc0c871d62c527ae1ac9fd1d7fecd455" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -345,18 +345,19 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +checksum = "8ecb848c43f6b06ae3de2e4a67496cbbabd78ae87db0f1248934f15d76192c6a" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", "derive_more", - "hashbrown 0.14.5", + "foldhash", + "hashbrown 0.15.0", "hex-literal", - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "k256", "keccak-asm", @@ -398,7 +399,7 @@ dependencies = [ "futures-utils-wasm", "lru", "pin-project", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", @@ -426,7 +427,7 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -441,7 +442,7 @@ dependencies = [ "alloy-transport-http", "futures", "pin-project", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "tokio", @@ -537,42 +538,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +checksum = "661c516eb1fa3294cc7f2fb8955b3b609d639c282ac81a4eedb14d3046db503a" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +checksum = "ecbabb8fc3d75a0c2cea5215be22e7a267e3efde835b0f2a8922f5e3f5d47683" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +checksum = "16517f2af03064485150d89746b8ffdcdbc9b6eeb3d536fb66efd7c2846fbc75" dependencies = [ "alloy-json-abi", "const-hex", @@ -581,15 +582,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.77", + "syn 2.0.79", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" +checksum = "c07ebb0c1674ff8cbb08378d7c2e0e27919d2a2dae07ad3bca26174deda8d389" dependencies = [ "serde", "winnow", @@ -597,9 +598,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +checksum = "8e448d879903624863f608c552d10efb0e0905ddbee98b0049412799911eb062" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -636,7 +637,7 @@ checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde_json", "tower 0.5.1", "tracing", @@ -885,8 +886,8 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", - "synstructure 0.13.1", + "syn 2.0.79", + "synstructure", ] [[package]] @@ -897,7 +898,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -969,9 +970,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -980,24 +981,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1043,7 +1044,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1052,18 +1053,18 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "autonomi" -version = "0.1.1" +version = "0.1.2" dependencies = [ "bip39", "blsttc", @@ -1078,7 +1079,7 @@ dependencies = [ "libp2p 0.54.1", "rand 0.8.5", "rmp-serde", - "self_encryption 0.29.2", + "self_encryption", "serde", "sn_bls_ckd", "sn_curv", @@ -1088,7 +1089,6 @@ dependencies = [ "sn_peers_acquisition", "sn_protocol", "sn_registers", - "sn_transfers", "test_utils", "thiserror", "tokio", @@ -1253,9 +1253,9 @@ dependencies = [ [[package]] name = "bip39" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" dependencies = [ "bitcoin_hashes", "serde", @@ -1277,11 +1277,21 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] [[package]] name = "bitflags" @@ -1460,7 +1470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.8", "serde", ] @@ -1612,9 +1622,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -1705,9 +1715,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -1715,9 +1725,9 @@ dependencies = [ [[package]] name = "clap-verbosity-flag" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63d19864d6b68464c59f7162c9914a0b569ddc2926b4a2d71afe62a9738eff53" +checksum = "e099138e1807662ff75e2cebe4ae2287add879245574489f9b1588eb5e5564ed" dependencies = [ "clap", "log", @@ -1725,9 +1735,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.17" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -1735,19 +1745,19 @@ dependencies = [ "strsim", "terminal_size", "unicase", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -1867,7 +1877,7 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width", + "unicode-width 0.1.14", "windows-sys 0.52.0", ] @@ -1883,9 +1893,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -2207,7 +2217,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2229,8 +2239,8 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.77", - "synstructure 0.13.1", + "syn 2.0.79", + "synstructure", ] [[package]] @@ -2254,7 +2264,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2265,7 +2275,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2399,7 +2409,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "unicode-xid", ] @@ -2515,7 +2525,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2671,7 +2681,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -2690,20 +2700,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" -[[package]] -name = "err-derive" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c34a887c8df3ed90498c1c437ce21f211c8e27672921a8ffa293cb8d6d4caa9e" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn 1.0.109", - "synstructure 0.12.6", -] - [[package]] name = "errno" version = "0.3.9" @@ -2917,9 +2913,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide 0.8.0", @@ -2940,6 +2936,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2985,9 +2987,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -3010,9 +3012,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -3020,15 +3022,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -3038,9 +3040,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -3054,13 +3056,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3070,21 +3072,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-ticker" @@ -3109,9 +3111,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -3463,7 +3465,7 @@ checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -3713,8 +3715,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -3776,7 +3778,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util 0.7.12", @@ -3813,6 +3815,17 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", "serde", ] @@ -3891,6 +3904,12 @@ dependencies = [ "serde", ] +[[package]] +name = "hex-conservative" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" + [[package]] name = "hex-literal" version = "0.4.1" @@ -4055,9 +4074,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -4148,7 +4167,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -4170,9 +4189,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -4183,7 +4202,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -4295,7 +4313,7 @@ dependencies = [ "globset", "log", "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.8", "same-file", "walkdir", "winapi-util", @@ -4333,18 +4351,18 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", "hashbrown 0.12.3", ] [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -4359,7 +4377,7 @@ dependencies = [ "number_prefix", "portable-atomic", "tokio", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -4369,7 +4387,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.5.0", + "indexmap 2.6.0", "is-terminal", "itoa", "log", @@ -4397,7 +4415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -4426,9 +4444,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -4491,9 +4509,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -4549,9 +4567,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libm" @@ -5022,7 +5040,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.13", + "rustls 0.23.14", "socket2", "thiserror", "tokio", @@ -5133,7 +5151,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5165,7 +5183,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5271,7 +5289,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", "scopeguard", ] @@ -5283,11 +5301,11 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -5470,7 +5488,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5493,9 +5511,9 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" dependencies = [ "arrayref", "byteorder", @@ -5506,7 +5524,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.7.2", + "unsigned-varint 0.8.0", "url", ] @@ -5553,7 +5571,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.6" +version = "0.2.7" dependencies = [ "clap", "clap-verbosity-flag", @@ -5670,7 +5688,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.17" +version = "0.3.19" dependencies = [ "atty", "better-panic", @@ -5693,7 +5711,7 @@ dependencies = [ "pretty_assertions", "prometheus-parse", "ratatui", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "signal-hook", @@ -5765,7 +5783,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", "num-integer", "num-traits", ] @@ -5812,7 +5830,7 @@ version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", "libm", ] @@ -5843,7 +5861,7 @@ checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -5881,9 +5899,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -6212,7 +6230,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6233,27 +6251,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6290,9 +6308,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plist" @@ -6301,7 +6319,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42cf17e9a1800f5f396bc67d193dc9411b59012a5876445ef450d449881e1016" dependencies = [ "base64 0.22.1", - "indexmap 2.5.0", + "indexmap 2.6.0", "quick-xml 0.32.0", "serde", "time", @@ -6375,9 +6393,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -6489,30 +6507,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -6532,14 +6526,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -6570,7 +6564,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -6599,7 +6593,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift 0.3.0", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -6750,7 +6744,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.13", + "rustls 0.23.14", "socket2", "thiserror", "tokio", @@ -6767,7 +6761,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash", - "rustls 0.23.13", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -7030,7 +7024,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -7076,9 +7070,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -7096,14 +7090,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -7117,13 +7111,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -7134,9 +7128,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -7181,9 +7175,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -7203,8 +7197,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.13", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "serde_json", @@ -7475,9 +7469,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "once_cell", "ring 0.17.8", @@ -7498,19 +7492,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" @@ -7666,31 +7659,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "self_encryption" -version = "0.29.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "894da3241a9e426c16fb8cb28b19416eae5fafdc7742e4bc505c1821661c140f" -dependencies = [ - "aes", - "bincode", - "brotli", - "bytes", - "cbc", - "err-derive", - "hex 0.4.3", - "itertools 0.10.5", - "num_cpus", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "serde", - "tempfile", - "tiny-keccak", - "tokio", - "xor_name", -] - [[package]] name = "self_encryption" version = "0.30.0" @@ -7782,7 +7750,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -7799,9 +7767,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -7833,7 +7801,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -8010,7 +7978,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.3.0", + "autocfg 1.4.0", ] [[package]] @@ -8021,7 +7989,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.5" +version = "0.10.6" dependencies = [ "assert_cmd", "assert_fs", @@ -8040,7 +8008,7 @@ dependencies = [ "predicates 3.1.2", "prost 0.9.0", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.8", "semver 1.0.23", "serde", "serde_json", @@ -8074,7 +8042,7 @@ dependencies = [ "flate2", "lazy_static", "regex", - "reqwest 0.12.7", + "reqwest 0.12.8", "semver 1.0.23", "serde_json", "tar", @@ -8097,7 +8065,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.14" +version = "0.1.15" dependencies = [ "chrono", "tracing", @@ -8161,7 +8129,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.35" +version = "0.2.36" dependencies = [ "chrono", "color-eyre", @@ -8186,7 +8154,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.15" +version = "0.1.16" dependencies = [ "clap", "color-eyre", @@ -8200,7 +8168,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.18.3" +version = "0.18.4" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8222,7 +8190,7 @@ dependencies = [ "rand 0.8.5", "rayon", "rmp-serde", - "self_encryption 0.30.0", + "self_encryption", "serde", "sn_build_info", "sn_evm", @@ -8245,7 +8213,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.111.3" +version = "0.111.4" dependencies = [ "assert_fs", "async-trait", @@ -8270,9 +8238,9 @@ dependencies = [ "prost 0.9.0", "rand 0.8.5", "rayon", - "reqwest 0.12.7", + "reqwest 0.12.8", "rmp-serde", - "self_encryption 0.29.2", + "self_encryption", "serde", "serde_json", "sn_build_info", @@ -8301,7 +8269,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.30" +version = "0.6.31" dependencies = [ "assert_fs", "async-trait", @@ -8328,13 +8296,13 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.2" +version = "0.5.3" dependencies = [ "clap", "lazy_static", "libp2p 0.54.1", "rand 0.8.5", - "reqwest 0.12.7", + "reqwest 0.12.8", "sn_protocol", "thiserror", "tokio", @@ -8344,7 +8312,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.10" +version = "0.17.11" dependencies = [ "blsttc", "bytes", @@ -8374,7 +8342,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.20" +version = "0.3.21" dependencies = [ "blsttc", "crdts", @@ -8391,7 +8359,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.13" +version = "0.3.14" dependencies = [ "async-trait", "dirs-next", @@ -8417,7 +8385,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.19.2" +version = "0.19.3" dependencies = [ "assert_fs", "blsttc", @@ -8574,7 +8542,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8585,9 +8553,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.11.1" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" +checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" dependencies = [ "debugid", "memmap2", @@ -8597,9 +8565,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.11.1" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" +checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -8619,9 +8587,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -8630,14 +8598,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" +checksum = "20e7b52ad118b2153644eea95c6fc740b6c1555b2344fdab763fc9de4075f665" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8655,18 +8623,6 @@ dependencies = [ "futures-core", ] -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "unicode-xid", -] - [[package]] name = "synstructure" version = "0.13.1" @@ -8675,7 +8631,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8722,9 +8678,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909" +checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" dependencies = [ "filetime", "libc", @@ -8733,9 +8689,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -8746,12 +8702,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -8762,7 +8718,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.6" +version = "0.4.7" dependencies = [ "bytes", "color-eyre", @@ -8777,22 +8733,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8893,7 +8849,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.53" +version = "0.1.54" dependencies = [ "dirs-next", "reqwest 0.11.27", @@ -8939,7 +8895,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -8969,7 +8925,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -9048,11 +9004,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -9209,7 +9165,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9329,7 +9285,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9358,7 +9314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3e785f863a3af4c800a2a669d0b64c879b538738e352607e2624d03f868dc01" dependencies = [ "crossterm 0.27.0", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -9388,9 +9344,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "uint" @@ -9421,9 +9377,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-bom" @@ -9460,7 +9416,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -9469,6 +9425,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -9687,9 +9649,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if", "once_cell", @@ -9698,24 +9660,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" dependencies = [ "cfg-if", "js-sys", @@ -9725,9 +9687,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9735,28 +9697,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "wasm-bindgen-test" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" +checksum = "a93d2a9ae98f1af8953f6415397299d808cce0a24f6d7c613d27bd83edf98da8" dependencies = [ "console_error_panic_hook", "js-sys", @@ -9769,13 +9731,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" +checksum = "fb8b294691f640bad8f2bb35a11bb28272701b1d687bd5fd661a27684e894d4d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -9794,9 +9756,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" dependencies = [ "js-sys", "wasm-bindgen", @@ -10116,9 +10078,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -10290,7 +10252,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] @@ -10310,7 +10272,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.79", ] [[package]] diff --git a/Justfile b/Justfile index 693929fcaf..fc0aadb9f1 100644 --- a/Justfile +++ b/Justfile @@ -108,8 +108,12 @@ build-release-artifacts arch nightly="false": mkdir artifacts cargo clean + if [ -n "$MAX_CHUNK_SIZE" ]; then + echo "Overriding chunk size to $MAX_CHUNK_SIZE bytes" + fi + echo "================" - echo "= Network Keys =" + echo " Network Keys " echo "================" echo "FOUNDATION_PK: $FOUNDATION_PK" echo "GENESIS_PK: $GENESIS_PK" diff --git a/README.md b/README.md index b3322d8cf6..6eff1e78fe 100644 --- a/README.md +++ b/README.md @@ -31,10 +31,10 @@ You should build from the `stable` branch, as follows: ``` git checkout stable -export GENESIS_PK=806b5c2eba70354ea92ba142c57587c9c5467ff69f0d43c482cda2313f9351e40c6120d76a2495cb3ca8367eee0a676f -export FOUNDATION_PK=8296d1f92f0bccaf1a90d57b65122f8c671172074000fc4db538dda4042360ea493a475ae548f1096adf83bd7e940727 -export NETWORK_ROYALTIES_PK=81b8f8a6bc51a6d4d6a2d7a7713ae86467ea0efd971c56d4c7c9380e0a7346b319a0a14178bd134cbb2fdb8571cbe15b -export PAYMENT_FORWARD_PK=8f97398fe7565a39ba83df42d18d253138d17493b4b64cb70282a562ee18a1c4bfe87405519f9c53e728753a103cd953 +export FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad +export GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c +export NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 +export PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f cargo build --release --features=network-contacts --bin safenode ``` @@ -53,10 +53,10 @@ a connecting node is compatible. For a client to connect to the current beta network, these keys must be set at build time: ``` -GENESIS_PK=8829ca178d6022de16fb8d3498411dd8a674a69c5f12e04d8b794a52ab056f1d419d12f690df1082dfa7efbbb10f62fa -FOUNDATION_PK=84418659a8581b510c40b12e57da239787fd0d3b323f102f09fae9daf2ac96907e0045b1653c301de45117d393d92678 -NETWORK_ROYALTIES_PK=8c027130571cea2387a0ceb37c14fec12849015be1573ea6d0a8e4d48da2c1fbe2907ae7503bb7c385b21e2d7ac9d6ff -PAYMENT_FORWARD_PK=8c2f406a52d48d48505e1a3fdbb0c19ab42cc7c4807e9ea19c1fff3e5148f3bbe53431ec5a07544aaeef764e073e4b2f +FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad +GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c +NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 +PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f ``` ##### Features @@ -103,7 +103,7 @@ The `websockets` feature is available for the `sn_networking` crate, and above, tcp over websockets. If building for `wasm32` then `websockets` are enabled by default as this is the only method -avilable to communicate with a network as things stand. (And that network must have `websockets` +available to communicate with a network as things stand. (And that network must have `websockets` enabled.) ##### Building for wasm32 @@ -133,7 +133,7 @@ YMMV until stabilised. - [Transfers](https://github.com/maidsafe/safe_network/blob/main/sn_transfers/README.md) The transfers crate, used to send and receive tokens on the network. - [Peers Acquisition](https://github.com/maidsafe/safe_network/blob/main/sn_peers_acquisition/README.md) - The peers peers acqisition crate, or: how the network layer discovers bootstrap peers. + The peers acquisition crate, or: how the network layer discovers bootstrap peers. - [Build Info](https://github.com/maidsafe/safe_network/blob/main/sn_build_info/README.md) Small helper used to get the build/commit versioning info for debug purposes. @@ -219,7 +219,7 @@ Make sure you made a backup copy of the "recovery secret" generated by the above one you have provided when prompted. If any changes are now made to files or directories within this folder (at this point all files and -folders are considered new since it has just been initalised for tracking), before trying to push +folders are considered new since it has just been initialised for tracking), before trying to push those changes to the network, we can get a report of the changes that have been made locally: ```bash diff --git a/adr/libp2p/identify-interval.md b/adr/libp2p/identify-interval.md index 59dd9db4c6..1b068c1637 100644 --- a/adr/libp2p/identify-interval.md +++ b/adr/libp2p/identify-interval.md @@ -8,7 +8,7 @@ Accepted Idle nodes in a network of moderate data have a high ongoing bandwidth. -This appears to be because of the identify polling of nodes, which occurs at the deafult libp2p rate, of once per 5 minutes. +This appears to be because of the identify polling of nodes, which occurs at the default libp2p rate, of once per 5 minutes. We see ~1mb/s traffic on nodes in a moderate network. diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index bc84942118..4f03191d31 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.1.1" +version = "0.1.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -33,12 +33,12 @@ hex = "~0.4.3" libp2p = "0.54.1" rand = "0.8.5" rmp-serde = "1.1.1" -self_encryption = "~0.29.0" +self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.3" } -sn_protocol = { version = "0.17.10", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.3.20" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } +sn_networking = { path = "../sn_networking", version = "0.18.4" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { version = "0.17.11", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.3.21" } sn_evm = { path = "../sn_evm" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } diff --git a/autonomi/src/self_encryption.rs b/autonomi/src/self_encryption.rs index b43648f332..097dcb69ce 100644 --- a/autonomi/src/self_encryption.rs +++ b/autonomi/src/self_encryption.rs @@ -10,6 +10,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use self_encryption::{DataMap, MAX_CHUNK_SIZE}; use serde::{Deserialize, Serialize}; use sn_protocol::storage::Chunk; +use tracing::debug; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -54,14 +55,15 @@ fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec), Error> { let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?; let (data_map_chunk, additional_chunks) = loop { + debug!("Max chunk size: {}", *MAX_CHUNK_SIZE); let chunk = Chunk::new(chunk_content); // If datamap chunk is less than `MAX_CHUNK_SIZE` return it so it can be directly sent to the network. - if MAX_CHUNK_SIZE >= chunk.serialised_size() { + if *MAX_CHUNK_SIZE >= chunk.serialised_size() { chunks.reverse(); // Returns the last datamap, and all the chunks produced. break (chunk, chunks); } else { - let mut bytes = BytesMut::with_capacity(MAX_CHUNK_SIZE).writer(); + let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE).writer(); let mut serialiser = rmp_serde::Serializer::new(&mut bytes); chunk.serialize(&mut serialiser)?; let serialized_chunk = bytes.into_inner().freeze(); diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 6413c0fce6..d43c9fe407 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.6" +version = "0.2.7" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_networking = { path = "../sn_networking", version = "0.18.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_networking = { path = "../sn_networking", version = "0.18.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 050ee6b2cb..3b55815bfe 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.17" +version = "0.3.19" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,12 +51,12 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn-node-manager = { version = "0.10.5", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.2", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn-node-manager = { version = "0.10.6", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.3", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.13", path = "../sn_service_management" } +sn_service_management = { version = "0.3.14", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 8f0a547fe9..615c20bcf4 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -58,14 +58,15 @@ impl BetaProgramme { fn capture_inputs(&mut self, key: KeyEvent) -> Vec { let send_back = match key.code { KeyCode::Enter => { - let username = self.discord_input_filed.value().to_string(); + let username = self.discord_input_filed.value().to_string().to_lowercase(); + self.discord_input_filed = username.clone().into(); debug!( "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", ); self.state = BetaProgrammeState::DiscordIdAlreadySet; vec![ - Action::StoreDiscordUserName(self.discord_input_filed.value().to_string()), + Action::StoreDiscordUserName(username.clone()), Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), Action::SwitchScene(Scene::Status), ] diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 7d31f1fc92..43e0970782 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -299,7 +299,7 @@ impl Component for Status { self.lock_registry = Some(LockRegistryState::ResettingNodes); info!("Resetting safenode services because the Discord Username was reset."); let action_sender = self.get_actions_sender()?; - reset_nodes(action_sender, true); + reset_nodes(action_sender, false); } } Action::StoreStorageDrive(ref drive_mountpoint, ref _drive_name) => { diff --git a/node-launchpad/src/config.rs b/node-launchpad/src/config.rs index 93b0bd60be..c7869eaf69 100644 --- a/node-launchpad/src/config.rs +++ b/node-launchpad/src/config.rs @@ -137,11 +137,15 @@ impl AppData { return Ok(Self::default()); } - let data = std::fs::read_to_string(&config_path) - .map_err(|_| color_eyre::eyre::eyre!("Failed to read app data file"))?; - - let app_data: AppData = serde_json::from_str(&data) - .map_err(|_| color_eyre::eyre::eyre!("Failed to parse app data"))?; + let data = std::fs::read_to_string(&config_path).map_err(|e| { + error!("Failed to read app data file: {}", e); + color_eyre::eyre::eyre!("Failed to read app data file: {}", e) + })?; + + let app_data: AppData = serde_json::from_str(&data).map_err(|e| { + error!("Failed to parse app data: {}", e); + color_eyre::eyre::eyre!("Failed to parse app data: {}", e) + })?; Ok(app_data) } diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 1b591e5a95..893523f245 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -297,6 +297,8 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { None, None, None, + None, + None, None, // We don't care about the port, as we are scaling down config.owner.clone(), config.peers_args.clone(), @@ -368,6 +370,8 @@ async fn add_nodes( None, None, None, + None, + None, port_range, config.owner.clone(), config.peers_args.clone(), diff --git a/release-cycle-info b/release-cycle-info index 1bc2281ec8..2b83422132 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -14,5 +14,5 @@ # number for all the released binaries. release-year: 2024 release-month: 10 -release-cycle: 1 -release-cycle-counter: 2 +release-cycle: 2 +release-cycle-counter: 3 diff --git a/resources/scripts/find_prs.py b/resources/scripts/find_prs.py new file mode 100755 index 0000000000..dbfc3e8c03 --- /dev/null +++ b/resources/scripts/find_prs.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 + +import requests +import argparse +import os +from typing import List +from datetime import datetime + +class GitHubPRFinder: + def __init__(self, token: str): + self.owner = "maidsafe" + self.repo = "safe_network" + self.token = token + self.api_url = f"https://api.github.com/repos/{self.owner}/{self.repo}/commits" + + def get_pr_for_commit(self, commit_sha: str) -> List[dict]: + """ + Retrieves the list of pull requests that include the given commit SHA. + + Args: + commit_sha (str): The commit hash to search for. + + Returns: + List[dict]: A list of pull request data dictionaries. + """ + headers = { + 'Accept': 'application/vnd.github.groot-preview+json', + 'Authorization': f'token {self.token}' + } + url = f"{self.api_url}/{commit_sha}/pulls" + response = requests.get(url, headers=headers) + + if response.status_code == 200: + return response.json() + else: + return [] + +def parse_arguments() -> argparse.Namespace: + """ + Parses command-line arguments. + + Returns: + argparse.Namespace: The parsed arguments. + """ + parser = argparse.ArgumentParser(description="Find merged PRs for commit hashes listed in a file.") + parser.add_argument('--path', required=True, help='Path to the file containing commit hashes, one per line.') + parser.add_argument('--token', help='GitHub personal access token. Can also be set via GITHUB_PAT_SAFE_NETWORK_PR_LIST environment variable.') + return parser.parse_args() + +def read_commits_from_file(file_path: str) -> List[str]: + """ + Reads commit hashes from a file, one per line. + + Args: + file_path (str): The path to the file containing commit hashes. + + Returns: + List[str]: A list of commit hashes. + """ + try: + with open(file_path, 'r') as file: + commits = [line.strip() for line in file if line.strip()] + return commits + except FileNotFoundError: + return [] + except Exception: + return [] + +def format_date(iso_date_str: str) -> str: + """ + Formats an ISO 8601 date string to 'YYYY-MM-DD'. + + Args: + iso_date_str (str): The ISO 8601 date string. + + Returns: + str: The formatted date string. + """ + try: + date_obj = datetime.strptime(iso_date_str, "%Y-%m-%dT%H:%M:%SZ") + return date_obj.strftime("%Y-%m-%d") + except ValueError: + return iso_date_str.split('T')[0] if 'T' in iso_date_str else iso_date_str + +def main(): + args = parse_arguments() + token = args.token or os.getenv('GITHUB_PAT_SAFE_NETWORK_PR_LIST') + if not token: + print("GitHub token not provided. Use --token argument or set GITHUB_PAT_SAFE_NETWORK_PR_LIST environment variable.") + return + + commits = read_commits_from_file(args.path) + if not commits: + print("No commit hashes to process.") + return + + finder = GitHubPRFinder(token=token) + + pr_entries = [] + no_pr_entries = [] + + for commit in commits: + prs = finder.get_pr_for_commit(commit) + if prs: + pr_found = False + for pr in prs: + merged_at = pr.get('merged_at') + if merged_at: + pr_found = True + formatted_date = format_date(merged_at) + pr_entry = { + 'date': formatted_date, + 'commit': commit, + 'pr_number': pr['number'], + 'pr_title': pr['title'] + } + pr_entries.append(pr_entry) + if not pr_found: + no_pr_entries.append(f"No merged PR found for commit {commit}.") + else: + no_pr_entries.append(f"No merged PR found for commit {commit}.") + + pr_entries_sorted = sorted(pr_entries, key=lambda x: x['date']) + + for entry in pr_entries_sorted: + print(f"{entry['date']} - {entry['commit']} #{entry['pr_number']}: {entry['pr_title']}") + + for entry in no_pr_entries: + print(entry) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index 13a86e4b7e..f89d345672 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.3.2" +version = "0.3.5" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -29,11 +29,11 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_client = { path = "../sn_client", version = "0.110.2" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_client = { path = "../sn_client", version = "0.110.4" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs index c8a420be1f..8a340d55fe 100644 --- a/sn_auditor/src/main.rs +++ b/sn_auditor/src/main.rs @@ -308,43 +308,59 @@ async fn initialize_background_spend_dag_collection( } async fn start_server(dag: SpendDagDb) -> Result<()> { - let server = Server::http("0.0.0.0:4242").expect("Failed to start server"); - info!("Starting dag-query server listening on port 4242..."); - for request in server.incoming_requests() { - info!( - "Received request! method: {:?}, url: {:?}", - request.method(), - request.url(), - ); - - // Dispatch the request to the appropriate handler - let response = match request.url() { - "/" => routes::spend_dag_svg(&dag), - s if s.starts_with("/spend/") => routes::spend(&dag, &request).await, - s if s.starts_with("/add-participant/") => { - routes::add_participant(&dag, &request).await - } - "/beta-rewards" => routes::beta_rewards(&dag).await, - _ => routes::not_found(), - }; + loop { + let server = Server::http("0.0.0.0:4242").expect("Failed to start server"); + info!("Starting dag-query server listening on port 4242..."); + for request in server.incoming_requests() { + info!( + "Received request! method: {:?}, url: {:?}", + request.method(), + request.url(), + ); - // Send a response to the client - match response { - Ok(res) => { - let _ = request - .respond(res) - .map_err(|err| eprintln!("Failed to send response: {err}")); - } - Err(e) => { - eprint!("Sending error to client: {e}"); - let res = Response::from_string(format!("Error: {e}")).with_status_code(500); - let _ = request - .respond(res) - .map_err(|err| eprintln!("Failed to send error response: {err}")); + // Dispatch the request to the appropriate handler + let response = match request.url() { + "/" => routes::spend_dag_svg(&dag), + s if s.starts_with("/spend/") => routes::spend(&dag, &request).await, + s if s.starts_with("/add-participant/") => { + routes::add_participant(&dag, &request).await + } + "/beta-rewards" => routes::beta_rewards(&dag).await, + "/restart" => { + info!("Restart auditor web service as to client's request"); + break; + } + "/terminate" => { + info!("Terminate auditor web service as to client's request"); + return Ok(()); + } + _ => routes::not_found(), + }; + + // Send a response to the client + match response { + Ok(res) => { + info!("Sending response to client"); + let _ = request.respond(res).map_err(|err| { + warn!("Failed to send response: {err}"); + eprintln!("Failed to send response: {err}") + }); + } + Err(e) => { + eprint!("Sending error to client: {e}"); + let res = Response::from_string(format!("Error: {e}")).with_status_code(500); + let _ = request.respond(res).map_err(|err| { + warn!("Failed to send error response: {err}"); + eprintln!("Failed to send error response: {err}") + }); + } } } + // Reaching this point indicates a restarting of auditor web service + // Sleep for a while to allowing OS cleanup and settlement. + drop(server); + std::thread::sleep(std::time::Duration::from_secs(10)); } - Ok(()) } // get the data dir path for auditor diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index fb2eff313b..50a09650b8 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.14" +version = "0.1.15" build = "build.rs" [build-dependencies] diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index f46f0f32ae..0b130d77e4 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.95.2" +version = "0.95.3" [[bin]] path = "src/bin/main.rs" @@ -52,11 +52,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_client = { path = "../sn_client", version = "0.110.2" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_client = { path = "../sn_client", version = "0.110.4" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -78,7 +78,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.110.2", features = [ +sn_client = { path = "../sn_client", version = "0.110.4", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 57e3b4041a..ce0f2b5ee8 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.110.2" +version = "0.110.4" [features] default = [] @@ -48,18 +48,18 @@ prometheus-client = { version = "0.22", optional = true } rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" -self_encryption = "~0.29.0" +self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } -sn_registers = { path = "../sn_registers", version = "0.3.20" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } +sn_networking = { path = "../sn_networking", version = "0.18.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,9 +67,8 @@ assert_matches = "1.5.0" dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } -# sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_registers = { path = "../sn_registers", version = "0.3.20", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_registers = { path = "../sn_registers", version = "0.3.21", features = [ "test-utils", ] } @@ -84,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_client/src/chunks/pac_man.rs b/sn_client/src/chunks/pac_man.rs index 93d5785d9c..3cd368e320 100644 --- a/sn_client/src/chunks/pac_man.rs +++ b/sn_client/src/chunks/pac_man.rs @@ -50,8 +50,8 @@ pub(crate) fn encrypt_large( output_dir: &Path, ) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { let mut encryptor = StreamSelfEncryptor::encrypt_from_file( - Box::new(file_path.to_path_buf()), - Some(Box::new(output_dir.to_path_buf())), + file_path.to_path_buf(), + Some(output_dir.to_path_buf()), )?; let data_map; @@ -99,16 +99,17 @@ pub(crate) fn to_chunk(chunk_content: Bytes) -> Chunk { fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec)> { let mut chunks = vec![]; let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?; + debug!("Max chunk size: {} bytes", *MAX_CHUNK_SIZE); let (data_map_chunk, additional_chunks) = loop { let chunk = to_chunk(chunk_content); - // If datamap chunk is less than `MAX_CHUNK_SIZE` return it so it can be directly sent to the network. - if MAX_CHUNK_SIZE >= chunk.serialised_size() { + // If datamap chunk is less than or equal to MAX_CHUNK_SIZE return it so it can be directly sent to the network. + if chunk.serialised_size() <= *MAX_CHUNK_SIZE { chunks.reverse(); // Returns the last datamap, and all the chunks produced. break (chunk, chunks); } else { - let mut bytes = BytesMut::with_capacity(MAX_CHUNK_SIZE).writer(); + let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE).writer(); let mut serialiser = rmp_serde::Serializer::new(&mut bytes); chunk.serialize(&mut serialiser)?; let serialized_chunk = bytes.into_inner().freeze(); diff --git a/sn_client/src/files/download.rs b/sn_client/src/files/download.rs index a1f8de5f09..4444fab023 100644 --- a/sn_client/src/files/download.rs +++ b/sn_client/src/files/download.rs @@ -351,10 +351,7 @@ impl FilesDownload { let mut download_kind = { if let Some(path) = decrypted_file_path { - DownloadKind::FileSystem(StreamSelfDecryptor::decrypt_to_file( - Box::new(path), - &data_map, - )?) + DownloadKind::FileSystem(StreamSelfDecryptor::decrypt_to_file(path, &data_map)?) } else { DownloadKind::Memory(Vec::new()) } diff --git a/sn_client/src/folders.rs b/sn_client/src/folders.rs index 4e3152e33c..e2c94ef929 100644 --- a/sn_client/src/folders.rs +++ b/sn_client/src/folders.rs @@ -314,7 +314,7 @@ impl FoldersApi { children: &BTreeSet, encryption_pk: Option, ) -> Result<(EntryHash, XorName, Metadata)> { - let mut bytes = BytesMut::with_capacity(MAX_CHUNK_SIZE); + let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE); let serialised_metadata = rmp_serde::to_vec(&metadata)?; if let Some(pk) = encryption_pk { bytes.put( diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 404e467640..9fed6af601 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.2" +version = "0.5.3" [features] default = ["gifting"] @@ -38,13 +38,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_cli = { path = "../sn_cli", version = "0.95.2" } -sn_client = { path = "../sn_client", version = "0.110.2" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_cli = { path = "../sn_cli", version = "0.95.3" } +sn_client = { path = "../sn_client", version = "0.110.4" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 4ebde30490..bd73bb2773 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.35" +version = "0.2.36" [dependencies] chrono = "~0.4.19" diff --git a/sn_logging/src/lib.rs b/sn_logging/src/lib.rs index 464581d9e3..880de56e08 100644 --- a/sn_logging/src/lib.rs +++ b/sn_logging/src/lib.rs @@ -124,8 +124,8 @@ pub struct LogBuilder { default_logging_targets: Vec<(String, Level)>, output_dest: LogOutputDest, format: LogFormat, - max_uncompressed_log_files: Option, - max_compressed_log_files: Option, + max_log_files: Option, + max_archived_log_files: Option, /// Setting this would print the sn_logging related updates to stdout. print_updates_to_stdout: bool, } @@ -140,8 +140,8 @@ impl LogBuilder { default_logging_targets, output_dest: LogOutputDest::Stderr, format: LogFormat::Default, - max_uncompressed_log_files: None, - max_compressed_log_files: None, + max_log_files: None, + max_archived_log_files: None, print_updates_to_stdout: true, } } @@ -157,13 +157,13 @@ impl LogBuilder { } /// The max number of uncompressed log files to store - pub fn max_uncompressed_log_files(&mut self, files: usize) { - self.max_uncompressed_log_files = Some(files); + pub fn max_log_files(&mut self, files: usize) { + self.max_log_files = Some(files); } /// The max number of compressed files to store - pub fn max_compressed_log_files(&mut self, files: usize) { - self.max_compressed_log_files = Some(files); + pub fn max_archived_log_files(&mut self, files: usize) { + self.max_archived_log_files = Some(files); } /// Setting this to false would prevent sn_logging from printing things to stdout. @@ -182,8 +182,8 @@ impl LogBuilder { self.default_logging_targets.clone(), &self.output_dest, self.format, - self.max_uncompressed_log_files, - self.max_compressed_log_files, + self.max_log_files, + self.max_archived_log_files, self.print_updates_to_stdout, )?; diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index f3b9b9df43..cd2ad4b26d 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.15" +version = "0.1.16" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 251f16bf71..0b03bb87b1 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.3" +version = "0.18.4" [features] default = [] @@ -54,10 +54,10 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } -sn_registers = { path = "../sn_registers", version = "0.3.20" } +sn_build_info = { path="../sn_build_info", version = "0.1.15" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_registers = { path = "../sn_registers", version = "0.3.21" } sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index a01db526b2..d440109764 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -666,6 +666,8 @@ impl NetworkBuilder { local: self.local, is_client, is_behind_home_network: self.is_behind_home_network, + #[cfg(feature = "open-metrics")] + close_group: Vec::with_capacity(CLOSE_GROUP_SIZE), peers_in_rt: 0, bootstrap, relay_manager, @@ -715,6 +717,8 @@ pub struct SwarmDriver { pub(crate) local: bool, pub(crate) is_client: bool, pub(crate) is_behind_home_network: bool, + #[cfg(feature = "open-metrics")] + pub(crate) close_group: Vec, pub(crate) peers_in_rt: usize, pub(crate) bootstrap: ContinuousBootstrap, pub(crate) external_address_manager: ExternalAddressManager, @@ -991,6 +995,13 @@ impl SwarmDriver { metrics_recorder.record_from_marker(marker) } } + #[cfg(feature = "open-metrics")] + /// Updates metrics that rely on our current close group. + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + if let Some(metrics_recorder) = self.metrics_recorder.as_ref() { + metrics_recorder.record_change_in_close_group(new_close_group); + } + } /// Listen on the provided address. Also records it within RelayManager pub(crate) fn listen_on(&mut self, addr: Multiaddr) -> Result<()> { diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index c313c73d07..6585f4d61a 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -24,10 +24,10 @@ use libp2p::{ use sn_evm::PaymentQuote; use sn_protocol::{ messages::{Query, Request, Response}, - NetworkAddress, PrettyPrintRecordKey, + NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, }; use std::{ - collections::BTreeSet, + collections::{BTreeSet, HashSet}, fmt::{Debug, Formatter}, }; use tokio::sync::oneshot; @@ -216,6 +216,28 @@ impl Debug for NetworkEvent { } impl SwarmDriver { + /// Check for changes in our close group + #[cfg(feature = "open-metrics")] + pub(crate) fn check_for_change_in_our_close_group(&mut self) { + // this includes self + let closest_k_peers = self.get_closest_k_value_local_peers(); + + let new_closest_peers: Vec<_> = + closest_k_peers.into_iter().take(CLOSE_GROUP_SIZE).collect(); + + let old = self.close_group.iter().cloned().collect::>(); + let new_members: Vec<_> = new_closest_peers + .iter() + .filter(|p| !old.contains(p)) + .collect(); + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}"); + debug!("New close group: {new_closest_peers:?}"); + self.close_group = new_closest_peers.clone(); + self.record_change_in_close_group(new_closest_peers); + } + } + /// Update state on addition of a peer to the routing table. pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_add(1); @@ -226,6 +248,11 @@ impl SwarmDriver { self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = &self.metrics_recorder { metrics_recorder @@ -244,6 +271,11 @@ impl SwarmDriver { self.log_kbuckets(&removed_peer); self.send_event(NetworkEvent::PeerRemoved(removed_peer, self.peers_in_rt)); + #[cfg(feature = "open-metrics")] + if self.metrics_recorder.is_some() { + self.check_for_change_in_our_close_group(); + } + #[cfg(feature = "open-metrics")] if let Some(metrics_recorder) = &self.metrics_recorder { metrics_recorder diff --git a/sn_networking/src/metrics/bad_node.rs b/sn_networking/src/metrics/bad_node.rs index 578ba25cce..7b64e248ec 100644 --- a/sn_networking/src/metrics/bad_node.rs +++ b/sn_networking/src/metrics/bad_node.rs @@ -7,20 +7,52 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::target_arch::interval; -use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; -use prometheus_client::metrics::{family::Family, gauge::Gauge}; -use std::time::{Duration, Instant}; +use libp2p::PeerId; +use prometheus_client::{ + encoding::{EncodeLabelSet, EncodeLabelValue}, + metrics::{family::Family, gauge::Gauge}, +}; +use sn_protocol::CLOSE_GROUP_SIZE; +use std::{ + collections::{HashSet, VecDeque}, + time::{Duration, Instant}, +}; use strum::IntoEnumIterator; const UPDATE_INTERVAL: Duration = Duration::from_secs(20); +#[cfg(not(test))] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = 5 * CLOSE_GROUP_SIZE; +#[cfg(test)] +const MAX_EVICTED_CLOSE_GROUP_PEERS: usize = CLOSE_GROUP_SIZE + 2; + +pub struct BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames, + shunned_by_close_group: ShunnedByCloseGroup, +} + +pub enum BadNodeMetricsMsg { + ShunnedByPeer(PeerId), + CloseGroupUpdated(Vec), +} + +struct ShunnedByCloseGroup { + metric_current_group: Gauge, + metric_old_group: Gauge, + + // trackers + close_group_peers: Vec, + old_close_group_peers: VecDeque, + old_new_group_shunned_list: HashSet, +} + /// A struct to record the the number of reports against our node across different time frames. -pub struct ShunnedCountAcrossTimeFrames { +struct ShunnedCountAcrossTimeFrames { metric: Family, - tracked_values: Vec, + shunned_report_tracker: Vec, } -struct TrackedValue { +struct ShunnedReportTracker { time: Instant, least_bucket_it_fits_in: TimeFrameType, } @@ -77,38 +109,122 @@ impl TimeFrameType { } } -impl ShunnedCountAcrossTimeFrames { +impl BadNodeMetrics { pub fn spawn_background_task( time_based_shunned_count: Family, - ) -> tokio::sync::mpsc::Sender<()> { - let (tx, mut rx) = tokio::sync::mpsc::channel(10); + shunned_by_close_group: Gauge, + shunned_by_old_close_group: Gauge, + ) -> tokio::sync::mpsc::Sender { + let mut bad_node_metrics = BadNodeMetrics { + shunned_count_across_time_frames: ShunnedCountAcrossTimeFrames { + metric: time_based_shunned_count, + shunned_report_tracker: Vec::new(), + }, + shunned_by_close_group: ShunnedByCloseGroup { + metric_current_group: shunned_by_close_group, + metric_old_group: shunned_by_old_close_group, + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + // Shunned by old or new close group + old_new_group_shunned_list: HashSet::new(), + }, + }; + let (tx, mut rx) = tokio::sync::mpsc::channel(10); tokio::spawn(async move { - let mut shunned_metrics = ShunnedCountAcrossTimeFrames { - metric: time_based_shunned_count, - tracked_values: Vec::new(), - }; let mut update_interval = interval(UPDATE_INTERVAL); update_interval.tick().await; loop { tokio::select! { - _ = rx.recv() => { - shunned_metrics.record_shunned_metric(); + msg = rx.recv() => { + match msg { + Some(BadNodeMetricsMsg::ShunnedByPeer(peer)) => { + bad_node_metrics.shunned_count_across_time_frames.record_shunned_metric(); + bad_node_metrics.shunned_by_close_group.record_shunned_metric(peer); + + } + Some(BadNodeMetricsMsg::CloseGroupUpdated(new_closest_peers)) => { + bad_node_metrics.shunned_by_close_group.update_close_group_peers(new_closest_peers); + } + None => break, + } + } _ = update_interval.tick() => { - shunned_metrics.update(); + bad_node_metrics.shunned_count_across_time_frames.try_update_state(); } } } }); tx } +} - pub fn record_shunned_metric(&mut self) { +impl ShunnedByCloseGroup { + pub(crate) fn record_shunned_metric(&mut self, peer: PeerId) { + // increment the metric if the peer is in the close group (new or old) and hasn't shunned us before + if !self.old_new_group_shunned_list.contains(&peer) { + if self.close_group_peers.contains(&peer) { + self.metric_current_group.inc(); + self.old_new_group_shunned_list.insert(peer); + } else if self.old_close_group_peers.contains(&peer) { + self.metric_old_group.inc(); + self.old_new_group_shunned_list.insert(peer); + } + } + } + + pub(crate) fn update_close_group_peers(&mut self, new_closest_peers: Vec) { + let new_members: Vec = new_closest_peers + .iter() + .filter(|p| !self.close_group_peers.contains(p)) + .cloned() + .collect(); + let evicted_members: Vec = self + .close_group_peers + .iter() + .filter(|p| !new_closest_peers.contains(p)) + .cloned() + .collect(); + for new_member in &new_members { + // if it has shunned us before, update the metrics. + if self.old_new_group_shunned_list.contains(new_member) { + self.metric_old_group.dec(); + self.metric_current_group.inc(); + } + } + + for evicted_member in &evicted_members { + self.old_close_group_peers.push_back(*evicted_member); + // if it has shunned us before, update the metrics. + if self.old_new_group_shunned_list.contains(evicted_member) { + self.metric_current_group.dec(); + self.metric_old_group.inc(); + } + } + + if !new_members.is_empty() { + debug!("The close group has been updated. The new members are {new_members:?}. The evicted members are {evicted_members:?}"); + self.close_group_peers = new_closest_peers; + + while self.old_close_group_peers.len() > MAX_EVICTED_CLOSE_GROUP_PEERS { + if let Some(removed_peer) = self.old_close_group_peers.pop_front() { + if self.old_new_group_shunned_list.remove(&removed_peer) { + self.metric_old_group.dec(); + } + } + } + } + } +} + +impl ShunnedCountAcrossTimeFrames { + fn record_shunned_metric(&mut self) { let now = Instant::now(); - self.tracked_values.push(TrackedValue { + self.shunned_report_tracker.push(ShunnedReportTracker { time: now, least_bucket_it_fits_in: TimeFrameType::LastTenMinutes, }); @@ -121,11 +237,11 @@ impl ShunnedCountAcrossTimeFrames { } } - pub fn update(&mut self) { + fn try_update_state(&mut self) { let now = Instant::now(); let mut idx_to_remove = Vec::new(); - for (idx, tracked_value) in self.tracked_values.iter_mut().enumerate() { + for (idx, tracked_value) in self.shunned_report_tracker.iter_mut().enumerate() { let time_elapsed_since_adding = now.duration_since(tracked_value.time).as_secs(); if time_elapsed_since_adding > tracked_value.least_bucket_it_fits_in.get_duration_sec() @@ -145,7 +261,7 @@ impl ShunnedCountAcrossTimeFrames { } // remove the ones that are now indefinite for idx in idx_to_remove { - self.tracked_values.remove(idx); + self.shunned_report_tracker.remove(idx); } } } @@ -153,16 +269,17 @@ impl ShunnedCountAcrossTimeFrames { #[cfg(test)] mod tests { use super::*; + use eyre::Result; #[test] - fn update_should_move_to_next_state() -> eyre::Result<()> { + fn update_should_move_to_next_timeframe() -> Result<()> { let mut shunned_metrics = ShunnedCountAcrossTimeFrames { metric: Family::default(), - tracked_values: Vec::new(), + shunned_report_tracker: Vec::new(), }; shunned_metrics.record_shunned_metric(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastTenMinutes)); // all the counters should be 1 for variant in TimeFrameType::iter() { @@ -179,8 +296,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastHour)); // all the counters except LastTenMinutes should be 1 for variant in TimeFrameType::iter() { @@ -201,8 +318,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastSixHours)); // all the counters except LastTenMinutes and LastHour should be 1 for variant in TimeFrameType::iter() { @@ -223,8 +340,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastDay)); // all the counters except LastTenMinutes, LastHour and LastSixHours should be 1 for variant in TimeFrameType::iter() { @@ -248,8 +365,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - let current_state = shunned_metrics.tracked_values[0].least_bucket_it_fits_in; + shunned_metrics.try_update_state(); + let current_state = shunned_metrics.shunned_report_tracker[0].least_bucket_it_fits_in; assert!(matches!(current_state, TimeFrameType::LastWeek)); // all the counters except LastTenMinutes, LastHour, LastSixHours and LastDay should be 1 for variant in TimeFrameType::iter() { @@ -274,8 +391,8 @@ mod tests { std::thread::sleep(std::time::Duration::from_secs( current_state.get_duration_sec() + 1, )); - shunned_metrics.update(); - assert_eq!(shunned_metrics.tracked_values.len(), 0); + shunned_metrics.try_update_state(); + assert_eq!(shunned_metrics.shunned_report_tracker.len(), 0); // all the counters except Indefinite should be 0 for variant in TimeFrameType::iter() { let time_frame = TimeFrame { @@ -290,4 +407,215 @@ mod tests { Ok(()) } + + #[test] + fn metrics_should_not_be_updated_if_close_group_is_not_set() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn close_group_shunned_metric_should_be_updated_on_new_report() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + // report by a peer in the close group should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by same peer should not increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[0]); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a different peer should increment the metric + close_group_shunned.record_shunned_metric(close_group_shunned.close_group_peers[1]); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // report by a peer that is not in the close group should not increment the metric + close_group_shunned.record_shunned_metric(PeerId::random()); + assert_eq!(close_group_shunned.metric_current_group.get(), 2); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn change_in_close_group_should_update_the_metrics() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + let old_member = close_group_shunned.close_group_peers[0]; + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + // update close group + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the peer that shunned us before should now be in the old group + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // report by the old member should not increment the metric + close_group_shunned.record_shunned_metric(old_member); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + + // update close group with old member + close_group_shunned.update_close_group_peers(vec![ + old_member, + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + ]); + + // the metrics of current_group and old_group should be updated + assert_eq!(close_group_shunned.metric_current_group.get(), 1); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + + Ok(()) + } + + #[test] + fn update_close_group_metrics_on_reaching_max_evicted_peer_count() -> Result<()> { + let mut close_group_shunned = ShunnedByCloseGroup { + metric_current_group: Gauge::default(), + metric_old_group: Gauge::default(), + + close_group_peers: Vec::new(), + old_close_group_peers: VecDeque::new(), + old_new_group_shunned_list: HashSet::new(), + }; + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // evict 1 members + let old_member_1 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // evict 1 members + let old_member_2 = close_group_shunned.close_group_peers[0]; + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // report by the evicted members should increment the old group metric + close_group_shunned.record_shunned_metric(old_member_1); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + close_group_shunned.record_shunned_metric(old_member_2); + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict all the members + close_group_shunned.update_close_group_peers(vec![ + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + PeerId::random(), + ]); + + // the metrics should still remain the same + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 2); + + // evict 1 more members to cross the threshold + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_1 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 1); + assert!(!close_group_shunned + .old_close_group_peers + .contains(&old_member_1)); + assert!(close_group_shunned + .old_close_group_peers + .contains(&old_member_2)); + + // evict 1 more member + close_group_shunned.update_close_group_peers(vec![ + close_group_shunned.close_group_peers[1], + close_group_shunned.close_group_peers[2], + close_group_shunned.close_group_peers[3], + close_group_shunned.close_group_peers[4], + PeerId::random(), + ]); + + // the metric from the member_2 should be removed + assert_eq!(close_group_shunned.metric_current_group.get(), 0); + assert_eq!(close_group_shunned.metric_old_group.get(), 0); + assert!(!close_group_shunned + .old_close_group_peers + .contains(&old_member_1)); + + Ok(()) + } } diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index e6511e7d99..b4837d35fa 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -12,11 +12,13 @@ pub mod service; #[cfg(feature = "upnp")] mod upnp; -#[cfg(feature = "open-metrics")] use crate::MetricsRegistries; use crate::{log_markers::Marker, target_arch::sleep}; -use bad_node::{ShunnedCountAcrossTimeFrames, TimeFrame}; -use libp2p::metrics::{Metrics as Libp2pMetrics, Recorder}; +use bad_node::{BadNodeMetrics, BadNodeMetricsMsg, TimeFrame}; +use libp2p::{ + metrics::{Metrics as Libp2pMetrics, Recorder}, + PeerId, +}; use prometheus_client::{ metrics::family::Family, metrics::{counter::Counter, gauge::Gauge}, @@ -52,16 +54,20 @@ pub(crate) struct NetworkMetricsRecorder { // bad node metrics bad_peers_count: Counter, - #[allow(dead_code)] // This is updated by the background task - shunned_across_time_frames: Family, shunned_count: Counter, + #[allow(dead_code)] // updated by background task + shunned_count_across_time_frames: Family, + #[allow(dead_code)] + shunned_by_close_group: Gauge, + #[allow(dead_code)] + shunned_by_old_close_group: Gauge, // system info process_memory_used_mb: Gauge, process_cpu_usage_percentage: Gauge, // helpers - shunned_report_notifier: tokio::sync::mpsc::Sender<()>, + bad_nodes_notifier: tokio::sync::mpsc::Sender, } impl NetworkMetricsRecorder { @@ -181,13 +187,29 @@ impl NetworkMetricsRecorder { .extended_metrics .sub_registry_with_prefix("sn_networking"); let shunned_count_across_time_frames = Family::default(); - let shunned_report_notifier = ShunnedCountAcrossTimeFrames::spawn_background_task( + extended_metrics_sub_registry.register( + "shunned_count_across_time_frames", + "The number of times our node has been shunned by other nodes across different time frames", shunned_count_across_time_frames.clone(), ); + + let shunned_by_close_group = Gauge::default(); extended_metrics_sub_registry.register( - "shunned_count_across_time_frames", - "The number of peers that have been shunned across different time frames", + "shunned_by_close_group", + "The number of close group peers that have shunned our node", + shunned_by_close_group.clone(), + ); + + let shunned_by_old_close_group = Gauge::default(); + extended_metrics_sub_registry.register( + "shunned_by_old_close_group", + "The number of close group peers that have shunned our node. This contains the peers that were once in our close group but have since been evicted.", + shunned_by_old_close_group.clone(), + ); + let bad_nodes_notifier = BadNodeMetrics::spawn_background_task( shunned_count_across_time_frames.clone(), + shunned_by_close_group.clone(), + shunned_by_old_close_group.clone(), ); let network_metrics = Self { @@ -207,13 +229,15 @@ impl NetworkMetricsRecorder { live_time, bad_peers_count, - shunned_across_time_frames: shunned_count_across_time_frames, + shunned_count_across_time_frames, shunned_count, + shunned_by_close_group, + shunned_by_old_close_group, process_memory_used_mb, process_cpu_usage_percentage, - shunned_report_notifier, + bad_nodes_notifier, }; network_metrics.system_metrics_recorder_task(); @@ -255,11 +279,15 @@ impl NetworkMetricsRecorder { Marker::PeerConsideredAsBad { .. } => { let _ = self.bad_peers_count.inc(); } - Marker::FlaggedAsBadNode { .. } => { + Marker::FlaggedAsBadNode { flagged_by } => { let _ = self.shunned_count.inc(); - let shunned_report_notifier = self.shunned_report_notifier.clone(); + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + let flagged_by = *flagged_by; crate::target_arch::spawn(async move { - if let Err(err) = shunned_report_notifier.send(()).await { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::ShunnedByPeer(flagged_by)) + .await + { error!("Failed to send shunned report via notifier: {err:?}"); } }); @@ -287,6 +315,18 @@ impl NetworkMetricsRecorder { _ => {} } } + + pub(crate) fn record_change_in_close_group(&self, new_close_group: Vec) { + let bad_nodes_notifier = self.bad_nodes_notifier.clone(); + crate::target_arch::spawn(async move { + if let Err(err) = bad_nodes_notifier + .send(BadNodeMetricsMsg::CloseGroupUpdated(new_close_group)) + .await + { + error!("Failed to send shunned report via notifier: {err:?}"); + } + }); + } } /// Impl the Recorder traits again for our struct. diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index d65a97acde..599dee835b 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -18,7 +18,6 @@ use aes_gcm_siv::{ }; use itertools::Itertools; -use lazy_static::lazy_static; use libp2p::{ identity::PeerId, kad::{ @@ -30,7 +29,6 @@ use libp2p::{ use prometheus_client::metrics::gauge::Gauge; use rand::RngCore; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; -use self_encryption::MAX_CHUNK_SIZE; use serde::{Deserialize, Serialize}; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{ @@ -50,27 +48,13 @@ use tokio::sync::mpsc; use walkdir::{DirEntry, WalkDir}; use xor_name::XorName; -/// The default value of targted max storage space is to be 32GB. -const DEFAULT_MAX_STORAGE_SPACE: u64 = 32 * 1024 * 1024 * 1024; - -lazy_static! { - /// The max storage space for the records. - /// A `node size` is to be defined as this plus the logging space assigned. - pub static ref MAX_STORAGE_SPACE: u64 = std::option_env!("MAX_STORAGE_SPACE") - .unwrap_or(&DEFAULT_MAX_STORAGE_SPACE.to_string()) - .parse::() - .unwrap_or(DEFAULT_MAX_STORAGE_SPACE); - - // A spend record is at the size of 2KB roughly. - // During Beta phase, it's almost one spend per chunk, - // which makes the average record size is to be half of the MAX_CHUNK_SIZE - static ref MAX_RECORDS_COUNT: usize = { - let records_count: usize = ((*MAX_STORAGE_SPACE as f64 / *MAX_CHUNK_SIZE as f64) * 2.0) as usize; - info!("MAX_STORAGE_SPACE is {}, MAX_CHUNK_SIZE is {}, MAX_RECORDS_COUNT is {records_count}", - *MAX_STORAGE_SPACE, *MAX_CHUNK_SIZE); - records_count - }; -} +// A spend record is at the size of 4KB roughly. +// Given chunk record is maxed at size of 4MB. +// During Beta phase, it's almost one spend per chunk, +// which makes the average record size is around 2MB. +// Given we are targeting node size to be 32GB, +// this shall allow around 16K records. +const MAX_RECORDS_COUNT: usize = 16 * 1024; /// The maximum number of records to cache in memory. const MAX_RECORDS_CACHE_SIZE: usize = 100; @@ -143,7 +127,7 @@ impl Default for NodeRecordStoreConfig { Self { storage_dir: historic_quote_dir.clone(), historic_quote_dir, - max_records: *MAX_RECORDS_COUNT, + max_records: MAX_RECORDS_COUNT, max_value_bytes: MAX_PACKET_SIZE, records_cache_size: MAX_RECORDS_CACHE_SIZE, } @@ -477,7 +461,7 @@ impl NodeRecordStore { // result in mis-calculation of relevant records. pub fn cleanup_unrelevant_records(&mut self) { let accumulated_records = self.records.len(); - if accumulated_records < *MAX_RECORDS_COUNT * 6 / 10 { + if accumulated_records < MAX_RECORDS_COUNT * 6 / 10 { return; } @@ -948,7 +932,7 @@ impl RecordStore for ClientRecordStore { pub fn calculate_cost_for_records(records_stored: usize) -> u64 { use std::cmp::{max, min}; - let max_records = *MAX_RECORDS_COUNT; + let max_records = MAX_RECORDS_COUNT; let ori_cost = positive_input_0_1_sigmoid(records_stored as f64 / max_records as f64) * MAX_STORE_COST as f64; @@ -1025,13 +1009,13 @@ mod tests { #[test] fn test_calculate_max_cost_for_records() { - let sut = calculate_cost_for_records(*MAX_RECORDS_COUNT + 1); + let sut = calculate_cost_for_records(MAX_RECORDS_COUNT + 1); assert_eq!(sut, MAX_STORE_COST - 1); } #[test] fn test_calculate_50_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 50 / 100; + let percent = MAX_RECORDS_COUNT * 50 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1039,34 +1023,34 @@ mod tests { } #[test] fn test_calculate_60_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 60 / 100; + let percent = MAX_RECORDS_COUNT * 60 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost - assert_eq!(sut, 952561); + assert_eq!(sut, 952541); } #[test] fn test_calculate_65_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 65 / 100; + let percent = MAX_RECORDS_COUNT * 65 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost - assert_eq!(sut, 989011); + assert_eq!(sut, 989001); } #[test] fn test_calculate_70_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 70 / 100; + let percent = MAX_RECORDS_COUNT * 70 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost - assert_eq!(sut, 997527); + assert_eq!(sut, 997523); } #[test] fn test_calculate_80_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 80 / 100; + let percent = MAX_RECORDS_COUNT * 80 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost @@ -1075,7 +1059,7 @@ mod tests { #[test] fn test_calculate_90_percent_cost_for_records() { - let percent = *MAX_RECORDS_COUNT * 90 / 100; + let percent = MAX_RECORDS_COUNT * 90 / 100; let sut = calculate_cost_for_records(percent); // at this point we should be at max cost assert_eq!(sut, 999993); @@ -1797,7 +1781,7 @@ mod tests { timestamp: std::time::SystemTime::now(), quoting_metrics: QuotingMetrics { close_records_stored: peer.records_stored.load(Ordering::Relaxed), - max_records: *MAX_RECORDS_COUNT, + max_records: MAX_RECORDS_COUNT, received_payment_count: 1, // unimportant for cost calc live_time: 0, // unimportant for cost calc }, diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 99f523ca72..980d1efaa3 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.111.3" +version = "0.111.4" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -49,16 +49,16 @@ tonic = { version = "0.6.2" } rand = { version = "~0.8.5", features = ["small_rng"] } rmp-serde = "1.1.1" rayon = "1.8.0" -self_encryption = "~0.29.0" +self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_networking = { path = "../sn_networking", version = "0.18.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } -sn_registers = { path = "../sn_registers", version = "0.3.20" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } -sn_service_management = { path = "../sn_service_management", version = "0.3.13" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_networking = { path = "../sn_networking", version = "0.18.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_service_management = { path = "../sn_service_management", version = "0.3.14" } sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -87,10 +87,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.10", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.2", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.19.3", features = [ "test-utils", ] } sn_evm = { path = "../sn_evm", version = "0.1.0" } diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 96a1087183..dacbfad45d 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -114,16 +114,16 @@ struct Opt { /// /// After reaching this limit, the older files are archived to save space. /// You can also specify the maximum number of archived log files to keep. - #[clap(long = "max_log_files", verbatim_doc_comment)] - max_uncompressed_log_files: Option, + #[clap(long, verbatim_doc_comment)] + max_log_files: Option, /// Specify the maximum number of archived log files to store. /// /// This argument is ignored if `log_output_dest` is set to "stdout" /// /// After reaching this limit, the older archived files are deleted. - #[clap(long = "max_archived_log_files", verbatim_doc_comment)] - max_compressed_log_files: Option, + #[clap(long, verbatim_doc_comment)] + max_archived_log_files: Option, /// Specify the rewards address. /// The rewards address is the address that will receive the rewards for the node. @@ -517,11 +517,11 @@ fn init_logging(opt: &Opt, peer_id: PeerId) -> Result<(String, ReloadHandle, Opt let mut log_builder = sn_logging::LogBuilder::new(logging_targets); log_builder.output_dest(output_dest.clone()); log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - if let Some(files) = opt.max_uncompressed_log_files { - log_builder.max_uncompressed_log_files(files); + if let Some(files) = opt.max_log_files { + log_builder.max_log_files(files); } - if let Some(files) = opt.max_compressed_log_files { - log_builder.max_compressed_log_files(files); + if let Some(files) = opt.max_archived_log_files { + log_builder.max_archived_log_files(files); } log_builder.initialize()? @@ -535,11 +535,11 @@ fn init_logging(opt: &Opt, peer_id: PeerId) -> Result<(String, ReloadHandle, Opt let mut log_builder = sn_logging::LogBuilder::new(logging_targets); log_builder.output_dest(output_dest.clone()); log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - if let Some(files) = opt.max_uncompressed_log_files { - log_builder.max_uncompressed_log_files(files); + if let Some(files) = opt.max_log_files { + log_builder.max_log_files(files); } - if let Some(files) = opt.max_compressed_log_files { - log_builder.max_compressed_log_files(files); + if let Some(files) = opt.max_archived_log_files { + log_builder.max_archived_log_files(files); } log_builder.initialize() })?; diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index ac8caf8b66..4d9b71974b 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -22,7 +22,7 @@ use std::{ collections::{BTreeMap, VecDeque}, fmt, fs::create_dir_all, - sync::Arc, + sync::{Arc, LazyLock}, time::{Duration, Instant}, }; use tempfile::tempdir; @@ -36,7 +36,7 @@ const CHURN_CYCLES: u32 = 2; const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; -const DATA_SIZE: usize = MAX_CHUNK_SIZE / 3; +static DATA_SIZE: LazyLock = LazyLock::new(|| *MAX_CHUNK_SIZE / 3); const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; @@ -273,7 +273,7 @@ fn create_registers_task( loop { let owner = Client::register_generate_key(); let random_name = XorName(rand::random()).to_string(); - let random_data = gen_random_data(DATA_SIZE); + let random_data = gen_random_data(*DATA_SIZE); sleep(delay).await; @@ -313,7 +313,7 @@ fn store_chunks_task( let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; loop { - let random_data = gen_random_data(DATA_SIZE); + let random_data = gen_random_data(*DATA_SIZE); let data_map = client.put(random_data, &wallet).await.inspect_err(|err| { println!("Error to put chunk: {err:?}"); diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index fcb1755371..217f9c531d 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.5" +version = "0.10.6" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10" } -sn_service_management = { path = "../sn_service_management", version = "0.3.13" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_service_management = { path = "../sn_service_management", version = "0.3.14" } sn-releases = "0.2.6" sn_evm = { path = "../sn_evm", version = "0.1" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_manager/src/add_services/config.rs b/sn_node_manager/src/add_services/config.rs index ba0873d82c..2d5cac69dc 100644 --- a/sn_node_manager/src/add_services/config.rs +++ b/sn_node_manager/src/add_services/config.rs @@ -78,6 +78,8 @@ pub struct InstallNodeServiceCtxBuilder { pub log_dir_path: PathBuf, pub log_format: Option, pub name: String, + pub max_archived_log_files: Option, + pub max_log_files: Option, pub metrics_port: Option, pub node_ip: Option, pub node_port: Option, @@ -132,6 +134,14 @@ impl InstallNodeServiceCtxBuilder { args.push(OsString::from("--owner")); args.push(OsString::from(owner)); } + if let Some(log_files) = self.max_archived_log_files { + args.push(OsString::from("--max-archived-log-files")); + args.push(OsString::from(log_files.to_string())); + } + if let Some(log_files) = self.max_log_files { + args.push(OsString::from("--max-log-files")); + args.push(OsString::from(log_files.to_string())); + } if !self.bootstrap_peers.is_empty() { let peers_str = self @@ -169,10 +179,12 @@ pub struct AddNodeServiceOptions { pub home_network: bool, pub local: bool, pub log_format: Option, + pub max_archived_log_files: Option, + pub max_log_files: Option, pub metrics_port: Option, - pub owner: Option, pub node_ip: Option, pub node_port: Option, + pub owner: Option, pub rpc_address: Option, pub rpc_port: Option, pub safenode_src_path: PathBuf, diff --git a/sn_node_manager/src/add_services/mod.rs b/sn_node_manager/src/add_services/mod.rs index bb9b75541a..86137d881d 100644 --- a/sn_node_manager/src/add_services/mod.rs +++ b/sn_node_manager/src/add_services/mod.rs @@ -78,6 +78,16 @@ pub async fn add_node( check_port_availability(port_option, &node_registry.nodes)?; } + let owner = match &options.owner { + Some(owner) => { + if owner.chars().any(|c| c.is_uppercase()) { + warn!("Owner name ({owner}) contains uppercase characters and will be converted to lowercase"); + } + Some(owner.to_lowercase()) + } + None => None, + }; + let safenode_file_name = options .safenode_src_path .file_name() @@ -217,11 +227,13 @@ pub async fn add_node( local: options.local, log_dir_path: service_log_dir_path.clone(), log_format: options.log_format, + max_archived_log_files: options.max_archived_log_files, + max_log_files: options.max_log_files, metrics_port: metrics_free_port, name: service_name.clone(), node_ip: options.node_ip, node_port, - owner: options.owner.clone(), + owner: owner.clone(), rpc_socket_addr, safenode_path: service_safenode_path.clone(), service_user: options.user.clone(), @@ -250,13 +262,15 @@ pub async fn add_node( local: options.local, log_dir_path: service_log_dir_path.clone(), log_format: options.log_format, + max_archived_log_files: options.max_archived_log_files, + max_log_files: options.max_log_files, metrics_port: metrics_free_port, node_ip: options.node_ip, node_port, number: node_number, reward_balance: None, rpc_socket_addr, - owner: options.owner.clone(), + owner: owner.clone(), peer_id: None, pid: None, safenode_path: service_safenode_path, diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index ab0ba5fd03..34a572ffce 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -120,6 +120,8 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res local: true, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -154,6 +156,8 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res home_network: false, local: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -227,6 +231,8 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -272,6 +278,8 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n home_network: false, local: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -341,6 +349,8 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> home_network: false, local: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -416,6 +426,8 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -454,6 +466,8 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode2"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode2".to_string(), node_ip: None, @@ -492,6 +506,8 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( local: false, log_format: None, log_dir_path: node_logs_dir.to_path_buf().join("safenode3"), + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode3".to_string(), node_ip: None, @@ -527,6 +543,8 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -651,6 +669,8 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -685,6 +705,8 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re genesis: false, home_network: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -783,6 +805,8 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -817,6 +841,8 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -889,6 +915,8 @@ async fn add_new_node_should_add_another_service() -> Result<()> { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -934,6 +962,8 @@ async fn add_new_node_should_add_another_service() -> Result<()> { local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode2"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode2".to_string(), node_ip: None, @@ -969,6 +999,8 @@ async fn add_new_node_should_add_another_service() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1105,6 +1137,8 @@ async fn add_node_should_use_custom_ip() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: Some(custom_ip), @@ -1181,6 +1215,8 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -1216,6 +1252,8 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1435,6 +1473,8 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1487,6 +1527,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R local: false, log_format: None, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: Some(12000), @@ -1530,6 +1572,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1580,6 +1624,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us local: false, log_format: None, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: Some(12000), @@ -1623,6 +1669,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1690,6 +1738,8 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1762,6 +1812,8 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1887,6 +1939,8 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -1912,6 +1966,246 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> Ok(()) } +#[tokio::test] +async fn add_node_should_set_max_archived_log_files() -> Result<()> { + let tmp_data_dir = assert_fs::TempDir::new()?; + let node_reg_path = tmp_data_dir.child("node_reg.json"); + + let mut mock_service_control = MockServiceControl::new(); + + let mut node_registry = NodeRegistry { + auditor: None, + faucet: None, + save_path: node_reg_path.to_path_buf(), + nat_status: None, + nodes: vec![], + bootstrap_peers: vec![], + environment_variables: None, + daemon: None, + }; + + let latest_version = "0.96.4"; + let temp_dir = assert_fs::TempDir::new()?; + let node_data_dir = temp_dir.child("data"); + node_data_dir.create_dir_all()?; + let node_logs_dir = temp_dir.child("logs"); + node_logs_dir.create_dir_all()?; + let safenode_download_path = temp_dir.child(SAFENODE_FILE_NAME); + safenode_download_path.write_binary(b"fake safenode bin")?; + + let mut seq = Sequence::new(); + + // Expected calls for first installation + mock_service_control + .expect_get_available_port() + .times(1) + .returning(|| Ok(8081)) + .in_sequence(&mut seq); + + mock_service_control + .expect_install() + .times(1) + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from( + node_data_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--log-output-dest"), + OsString::from( + node_logs_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--max-archived-log-files"), + OsString::from("20"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: node_data_dir + .to_path_buf() + .join("safenode1") + .join(SAFENODE_FILE_NAME), + username: Some(get_username()), + working_directory: None, + }), + eq(false), + ) + .returning(|_, _| Ok(())) + .in_sequence(&mut seq); + + add_node( + AddNodeServiceOptions { + auto_restart: false, + auto_set_nat_flags: false, + bootstrap_peers: vec![], + count: Some(1), + delete_safenode_src: false, + enable_metrics_server: false, + env_variables: None, + genesis: false, + home_network: false, + local: false, + log_format: None, + max_archived_log_files: Some(20), + max_log_files: None, + metrics_port: None, + owner: None, + node_ip: None, + node_port: None, + rpc_address: None, + rpc_port: None, + safenode_dir_path: temp_dir.to_path_buf(), + safenode_src_path: safenode_download_path.to_path_buf(), + service_data_dir_path: node_data_dir.to_path_buf(), + service_log_dir_path: node_logs_dir.to_path_buf(), + upnp: false, + user: Some(get_username()), + user_mode: false, + version: latest_version.to_string(), + }, + &mut node_registry, + &mock_service_control, + VerbosityLevel::Normal, + ) + .await?; + + assert_matches!(node_registry.nodes[0].max_archived_log_files, Some(20)); + + Ok(()) +} + +#[tokio::test] +async fn add_node_should_set_max_log_files() -> Result<()> { + let tmp_data_dir = assert_fs::TempDir::new()?; + let node_reg_path = tmp_data_dir.child("node_reg.json"); + + let mut mock_service_control = MockServiceControl::new(); + + let mut node_registry = NodeRegistry { + auditor: None, + faucet: None, + save_path: node_reg_path.to_path_buf(), + nat_status: None, + nodes: vec![], + bootstrap_peers: vec![], + environment_variables: None, + daemon: None, + }; + + let latest_version = "0.96.4"; + let temp_dir = assert_fs::TempDir::new()?; + let node_data_dir = temp_dir.child("data"); + node_data_dir.create_dir_all()?; + let node_logs_dir = temp_dir.child("logs"); + node_logs_dir.create_dir_all()?; + let safenode_download_path = temp_dir.child(SAFENODE_FILE_NAME); + safenode_download_path.write_binary(b"fake safenode bin")?; + + let mut seq = Sequence::new(); + + // Expected calls for first installation + mock_service_control + .expect_get_available_port() + .times(1) + .returning(|| Ok(8081)) + .in_sequence(&mut seq); + + mock_service_control + .expect_install() + .times(1) + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from( + node_data_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--log-output-dest"), + OsString::from( + node_logs_dir + .to_path_buf() + .join("safenode1") + .to_string_lossy() + .to_string(), + ), + OsString::from("--max-log-files"), + OsString::from("20"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: node_data_dir + .to_path_buf() + .join("safenode1") + .join(SAFENODE_FILE_NAME), + username: Some(get_username()), + working_directory: None, + }), + eq(false), + ) + .returning(|_, _| Ok(())) + .in_sequence(&mut seq); + + add_node( + AddNodeServiceOptions { + auto_restart: false, + auto_set_nat_flags: false, + bootstrap_peers: vec![], + count: Some(1), + delete_safenode_src: false, + enable_metrics_server: false, + env_variables: None, + genesis: false, + home_network: false, + local: false, + log_format: None, + max_archived_log_files: None, + max_log_files: Some(20), + metrics_port: None, + owner: None, + node_ip: None, + node_port: None, + rpc_address: None, + rpc_port: None, + safenode_dir_path: temp_dir.to_path_buf(), + safenode_src_path: safenode_download_path.to_path_buf(), + service_data_dir_path: node_data_dir.to_path_buf(), + service_log_dir_path: node_logs_dir.to_path_buf(), + upnp: false, + user: Some(get_username()), + user_mode: false, + version: latest_version.to_string(), + }, + &mut node_registry, + &mock_service_control, + VerbosityLevel::Normal, + ) + .await?; + + assert_matches!(node_registry.nodes[0].max_log_files, Some(20)); + + Ok(()) +} + #[tokio::test] async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result<()> { let tmp_data_dir = assert_fs::TempDir::new()?; @@ -2100,6 +2394,8 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(PortRange::Range(12000, 12002)), owner: None, node_ip: None, @@ -2149,6 +2445,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(12000), node_ip: None, node_port: None, @@ -2192,6 +2490,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(PortRange::Single(12000)), owner: None, node_ip: None, @@ -2243,6 +2543,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(12000), node_ip: None, node_port: None, @@ -2286,6 +2588,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(PortRange::Range(12000, 12002)), owner: None, node_ip: None, @@ -2483,6 +2787,8 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -2543,6 +2849,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2586,6 +2894,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -2637,6 +2947,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2680,6 +2992,8 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -2754,6 +3068,8 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -2788,6 +3104,8 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() genesis: false, home_network: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -2859,6 +3177,8 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -2893,6 +3213,8 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { genesis: false, home_network: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -2964,6 +3286,8 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -2998,6 +3322,8 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul genesis: false, home_network: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -3073,6 +3399,8 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ genesis: false, home_network: true, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -3685,6 +4013,8 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -3720,6 +4050,8 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -3792,6 +4124,8 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -3827,6 +4161,8 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( home_network: true, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -3899,6 +4235,8 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -3934,6 +4272,8 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { home_network: true, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -4003,6 +4343,8 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { local: false, log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, name: "safenode1".to_string(), node_ip: None, @@ -4038,6 +4380,8 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { home_network: true, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: None, node_ip: None, @@ -4066,7 +4410,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { } #[tokio::test] -async fn add_node_should_assign_an_owner() -> Result<()> { +async fn add_node_should_assign_an_owner_in_lowercase() -> Result<()> { let tmp_data_dir = assert_fs::TempDir::new()?; let node_reg_path = tmp_data_dir.child("node_reg.json"); @@ -4154,8 +4498,10 @@ async fn add_node_should_assign_an_owner() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, - owner: Some("discord_username".to_string()), + owner: Some("Discord_Username".to_string()), node_ip: None, node_port: None, rpc_address: None, @@ -4272,6 +4618,8 @@ async fn add_node_should_auto_restart() -> Result<()> { home_network: false, local: false, log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, owner: Some("discord_username".to_string()), node_ip: None, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 81c780dc00..5165eefae1 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -150,6 +150,17 @@ pub enum SubCmd { /// If the argument is not used, the default format will be applied. #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] log_format: Option, + /// Specify the maximum number of uncompressed log files to store. + /// + /// After reaching this limit, the older files are archived to save space. + /// You can also specify the maximum number of archived log files to keep. + #[clap(long, verbatim_doc_comment)] + max_log_files: Option, + /// Specify the maximum number of archived log files to store. + /// + /// After reaching this limit, the older archived files are deleted. + #[clap(long, verbatim_doc_comment)] + max_archived_log_files: Option, /// Specify a port for the open metrics server. /// /// If you're passing the compiled safenode via --node-path, make sure to enable the open-metrics feature @@ -1075,6 +1086,8 @@ async fn main() -> Result<()> { local, log_dir_path, log_format, + max_archived_log_files, + max_log_files, metrics_port, node_ip, node_port, @@ -1099,6 +1112,8 @@ async fn main() -> Result<()> { local, log_dir_path, log_format, + max_archived_log_files, + max_log_files, metrics_port, node_ip, node_port, diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 8a7ba87435..ea30532c45 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -46,6 +46,8 @@ pub async fn add( local: bool, log_dir_path: Option, log_format: Option, + max_archived_log_files: Option, + max_log_files: Option, metrics_port: Option, node_ip: Option, node_port: Option, @@ -145,10 +147,12 @@ pub async fn add( home_network, local, log_format, + max_archived_log_files, + max_log_files, metrics_port, - owner, node_ip, node_port, + owner, rpc_address, rpc_port, safenode_src_path, @@ -605,6 +609,8 @@ pub async fn maintain_n_running_nodes( local: bool, log_dir_path: Option, log_format: Option, + max_archived_log_files: Option, + max_log_files: Option, metrics_port: Option, node_ip: Option, node_port: Option, @@ -706,6 +712,8 @@ pub async fn maintain_n_running_nodes( local, log_dir_path.clone(), log_format, + max_archived_log_files, + max_log_files, metrics_port.clone(), node_ip, Some(PortRange::Single(port)), diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 5ee8d4c5d7..64c32bb9bb 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -774,6 +774,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -876,6 +878,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -943,6 +947,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1053,6 +1059,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1133,6 +1141,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1223,6 +1233,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1312,6 +1324,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1371,6 +1385,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1420,6 +1436,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1467,6 +1485,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1517,6 +1537,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1580,6 +1602,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1706,6 +1730,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1794,6 +1820,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -1927,6 +1955,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2072,6 +2102,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2212,6 +2244,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2353,6 +2387,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2524,6 +2560,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2678,6 +2716,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: Some(LogFormat::Json), + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2835,6 +2875,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -2989,6 +3031,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, number: 1, node_ip: Some(Ipv4Addr::new(192, 168, 1, 1)), @@ -3146,6 +3190,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, number: 1, node_ip: None, @@ -3190,6 +3236,321 @@ mod tests { Ok(()) } + #[tokio::test] + async fn upgrade_should_retain_max_archived_log_files() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--max-archived-log-files"), + OsString::from("20"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: false, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + max_archived_log_files: Some(20), + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + number: 1, + owner: None, + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + reward_balance: Some(AttoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: false, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + assert_matches!( + service_manager.service.service_data.max_archived_log_files, + Some(20) + ); + + Ok(()) + } + + #[tokio::test] + async fn upgrade_should_retain_max_log_files() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--max-log-files"), + OsString::from("20"), + ], + autostart: false, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: false, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + max_archived_log_files: None, + max_log_files: Some(20), + metrics_port: None, + node_ip: None, + node_port: None, + number: 1, + owner: None, + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + reward_balance: Some(AttoTokens::zero()), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: false, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + assert_matches!(service_manager.service.service_data.max_log_files, Some(20)); + + Ok(()) + } + #[tokio::test] async fn upgrade_should_retain_custom_metrics_ports() -> Result<()> { let current_version = "0.1.0"; @@ -3300,6 +3661,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(12000), node_ip: None, node_port: None, @@ -3457,6 +3820,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: Some(12000), node_ip: None, node_port: None, @@ -3614,6 +3979,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -3771,6 +4138,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -3927,6 +4296,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -3999,6 +4370,8 @@ mod tests { local: false, log_dir_path: log_dir.to_path_buf(), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -4056,6 +4429,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -4129,6 +4504,8 @@ mod tests { local: false, log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -4194,6 +4571,8 @@ mod tests { local: false, log_dir_path: log_dir.to_path_buf(), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, @@ -4257,6 +4636,8 @@ mod tests { local: false, log_dir_path: log_dir.to_path_buf(), log_format: None, + max_archived_log_files: None, + max_log_files: None, metrics_port: None, node_ip: None, node_port: None, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index 68d7ba7e56..e718d3dad6 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -498,6 +498,8 @@ pub async fn run_node( local: true, log_dir_path: node_info.log_path, log_format: run_options.log_format, + max_archived_log_files: None, + max_log_files: None, metrics_port: run_options.metrics_port, node_ip: None, node_port: run_options.node_port, diff --git a/sn_node_manager/src/rpc.rs b/sn_node_manager/src/rpc.rs index 2c8f15a88b..b9fc50ced8 100644 --- a/sn_node_manager/src/rpc.rs +++ b/sn_node_manager/src/rpc.rs @@ -71,6 +71,8 @@ pub async fn restart_node_service( local: current_node_clone.local, log_dir_path: current_node_clone.log_dir_path.clone(), log_format: current_node_clone.log_format, + max_archived_log_files: current_node_clone.max_archived_log_files, + max_log_files: current_node_clone.max_log_files, metrics_port: None, owner: current_node_clone.owner.clone(), name: current_node_clone.service_name.clone(), @@ -188,6 +190,8 @@ pub async fn restart_node_service( log_dir_path: log_dir_path.clone(), log_format: current_node_clone.log_format, name: new_service_name.clone(), + max_archived_log_files: current_node_clone.max_archived_log_files, + max_log_files: current_node_clone.max_log_files, metrics_port: None, node_ip: current_node_clone.node_ip, node_port: None, @@ -212,6 +216,8 @@ pub async fn restart_node_service( local: current_node_clone.local, log_dir_path, log_format: current_node_clone.log_format, + max_archived_log_files: current_node_clone.max_archived_log_files, + max_log_files: current_node_clone.max_log_files, metrics_port: None, node_ip: current_node_clone.node_ip, node_port: None, diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index daed9e08ab..d53bb4627a 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.30" +version = "0.6.31" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_node = { path = "../sn_node", version = "0.111.3" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.13" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_node = { path = "../sn_node", version = "0.111.4" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.14" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 34fccf19a2..f1bfe2d4d7 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.2" +version = "0.5.3" [features] local = [] @@ -20,10 +20,8 @@ clap = { version = "4.2.1", features = ["derive", "env"] } lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" -reqwest = { version = "0.12.2", default-features = false, features = [ - "rustls-tls", -] } -sn_protocol = { path = "../sn_protocol", version = "0.17.10", optional = true } +reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } +sn_protocol = { path = "../sn_protocol", version = "0.17.11", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 15854dfbff..284a8c6216 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.10" +version = "0.17.11" [features] default = [] @@ -28,9 +28,9 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.14" } -sn_transfers = { path = "../sn_transfers", version = "0.19.2" } -sn_registers = { path = "../sn_registers", version = "0.3.20" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_registers = { path = "../sn_registers", version = "0.3.21" } sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index dbbcf90564..cfdaaccc5f 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.20" +version = "0.3.21" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index ab312a35cb..11863f7a4d 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.13" +version = "0.3.14" [dependencies] async-trait = "0.1" @@ -19,8 +19,8 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.35" } -sn_protocol = { path = "../sn_protocol", version = "0.17.10", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.36" } +sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ "rpc", ] } sn_evm = { path = "../sn_evm", version = "0.1.0" } diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index 2cc7060d33..ee109cc15c 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -101,6 +101,14 @@ impl<'a> ServiceStateActions for NodeService<'a> { args.push(OsString::from("--metrics-server-port")); args.push(OsString::from(metrics_port.to_string())); } + if let Some(max_archived_log_files) = self.service_data.max_archived_log_files { + args.push(OsString::from("--max-archived-log-files")); + args.push(OsString::from(max_archived_log_files.to_string())); + } + if let Some(max_log_files) = self.service_data.max_log_files { + args.push(OsString::from("--max-log-files")); + args.push(OsString::from(max_log_files.to_string())); + } if let Some(owner) = &self.service_data.owner { args.push(OsString::from("--owner")); @@ -267,6 +275,8 @@ pub struct NodeServiceData { pub local: bool, pub log_dir_path: PathBuf, pub log_format: Option, + pub max_archived_log_files: Option, + pub max_log_files: Option, #[serde(default)] pub metrics_port: Option, #[serde(default)] diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 79ee0bff96..a530dd1c8a 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.2" +version = "0.19.3" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 37431d81fc..697c43a69c 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.6" +version = "0.4.7" [features] local = ["sn_peers_acquisition/local"] diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index a98ec803c4..b040667397 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.53" +version = "0.1.54" [dependencies] From 8eb7df8a05d32594cd3f2f3d955c5b8a30d2708b Mon Sep 17 00:00:00 2001 From: grumbach Date: Thu, 10 Oct 2024 18:40:38 +0900 Subject: [PATCH 147/255] feat: docs and useability improvements for merge --- README.md | 262 ++++++----------------------- autonomi_cli/src/access/keys.rs | 2 +- autonomi_cli/src/access/network.rs | 23 ++- evm_testnet/src/main.rs | 36 ++-- evmlib/src/utils.rs | 42 +++-- sn_evm/src/evm.rs | 3 + sn_node/src/bin/safenode/main.rs | 5 + 7 files changed, 124 insertions(+), 249 deletions(-) diff --git a/README.md b/README.md index 6eff1e78fe..ee5f1573bd 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,27 @@ -# The Safe Network +# The Autonomi Network (previously Safe Network) -[SafenetForum.org](https://safenetforum.org/) +[Autonomi.com](https://autonomi.com/) Own your data. Share your disk space. Get paid for doing so.
-The Data on the Safe Network is Decentralised, Autonomous, and built atop of Kademlia and +The Data on the Autonomi Network is Decentralised, Autonomous, and built atop of Kademlia and Libp2p.
## Table of Contents -- [For Users](#for-Users) +- [For Users](#for-users) - [For Developers](#for-developers) - [For the Technical](#for-the-technical) -- [Using a Local Network](#Using-a-local-network) +- [Using a Local Network](#using-a-local-network) - [Metrics Dashboard](#metrics-dashboard) ### For Users -- [CLI](https://github.com/maidsafe/safe_network/blob/main/sn_cli/README.md) The Command Line +- [CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line Interface, allowing users to interact with the network from their terminal. - [Node](https://github.com/maidsafe//safe_network/blob/main/sn_node/README.md) The backbone of the safe network. Nodes can be run on commodity hardware and provide storage space and validation of transactions to the network. +- Web App: Coming Soon! #### Building the Node from Source @@ -31,35 +32,22 @@ You should build from the `stable` branch, as follows: ``` git checkout stable -export FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad -export GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c -export NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 -export PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f cargo build --release --features=network-contacts --bin safenode ``` -For more information about the keys, please refer to the [Keys](#keys) section below. +#### Running the Node -### For Developers - -#### Connecting to the Beta Network - -##### Keys - -Various keys in the network control where initial funds are distributed and how ongoing fees and -royalties are collected. They are also used as part of the node version string, to determine whether -a connecting node is compatible. - -For a client to connect to the current beta network, these keys must be set at build time: +To run a node and receive rewards, you need to specify your Ethereum address as a parameter. Rewards are paid to the specified address. ``` -FOUNDATION_PK=8c89a2230096d07b3013089ffd594b23f468e72f19672c3dc1e50747c4c954fbf54ef8e98809e2d2253b14321e2123ad -GENESIS_PK=93fa3c5e1b68ace4fb02845f89f1eb5ff42c64cd31ee1b908d7c3bbb236d009ae4ae9a1a16d42bc7586e88db1248494c -NETWORK_ROYALTIES_PK=87ec7d1e5e0252d29b1b0ac42a04d7e8daf7dd9f212a23dbc4a9ca5a6442fdab74196ef7cca150ecd6f9848d49148ed4 -PAYMENT_FORWARD_PK=887c9371cb9a1467cd45b6f31367520ab44cc40281d52039acfd6f967bdb0c3e214bb81f2a0adcf683bd6608980a7b5f +cargo run --release --bin safenode --features=network-contacts -- --rewards-address ``` -##### Features +More options about EVM Network below. + +### For Developers + +#### Build You should also build `safe` with the `network-contacts` and `distribution` features enabled: @@ -73,25 +61,16 @@ For `safenode`, only the `network-contacts` feature should be required: cargo build --release --features=network-contacts --bin safenode ``` -#### Utility Scripts - -When you start a network there are a few scripts to aid with basic processes: +#### Main Crates -- `resources/scripts/claim-genesis.sh` which will claim the genesis tokens for a wallet on a launched network (if you - have set up the foundation wallet locally by adding a `client/account_secret` and regenerating the wallet or directly - adding the `client/wallet/main_secret_key` itself). -- `resources/scripts/make-wallets.sh` which if you have a wallet with a balance will create a number of wallets with - another balance. eg `resources/scripts/make-wallets.sh 5 1` will make 5 wallets with 1 token. -- `resources/scripts/upload-random-data` will use the existing `client` to upload random data to the network. - -- [Client](https://github.com/maidsafe/safe_network/blob/main/sn_client/README.md) The client APIs - allowing use of the SafeNetwork to users and developers. -- [Registers](https://github.com/maidsafe/safe_network/blob/main/sn_registers/README.md) The CRDT - registers structures available on the network. +- [Autonomi API](https://github.com/maidsafe/safe_network/blob/main/autonomi/README.md) The client APIs + allowing use of the Autonomi Network to users and developers. +- [Autonomi CLI](https://github.com/maidsafe/safe_network/blob/main/autonomi_cli/README.md) The Command Line + Interface, allowing users to interact with the network from their terminal. +- [Node](https://github.com/maidsafe/safe_network/blob/main/sn_node/README.md) The backbone of the + autonomi network. Nodes can be run on commodity hardware and run the Network. - [Node Manager](https://github.com/maidsafe/safe_network/blob/main/sn_node_manager/README.md) Use to create a local network for development and testing. -- [Faucet](https://github.com/maidsafe/safe_network/blob/main/sn_faucet/README.md) The local faucet - server, used to claim genesis and request tokens from the network. - [Node RPC](https://github.com/maidsafe/safe_network/blob/main/sn_node_rpc_client/README.md) The RPC server used by the nodes to expose API calls to the outside world. @@ -106,19 +85,9 @@ If building for `wasm32` then `websockets` are enabled by default as this is the available to communicate with a network as things stand. (And that network must have `websockets` enabled.) -##### Building for wasm32 +#### Building for wasm32 -- Install [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/) -- `cd sn_client && wasm-pack build` - -You can then pull this package into a web app eg, to use it. - -eg `await safe.get_data("/ip4/127.0.0.1/tcp/59324/ws/p2p/12D3KooWG6kyBwLVHj5hYK2SqGkP4GqrCz5gfwsvPBYic4c4TeUz","9d7e115061066126482a229822e6d68737bd67d826c269762c0f64ce87af6b4c")` - -#### Browser usage - -Browser usage is highly experimental, but the wasm32 target for `sn_client` _should_ work here. -YMMV until stabilised. +WASM support for the autonomi API is currently under active development. More docs coming soon. ### For the Technical @@ -131,44 +100,49 @@ YMMV until stabilised. - [Protocol](https://github.com/maidsafe/safe_network/blob/main/sn_protocol/README.md) The protocol used by the safe network. - [Transfers](https://github.com/maidsafe/safe_network/blob/main/sn_transfers/README.md) The - transfers crate, used to send and receive tokens on the network. + transfers crate, used to send and receive tokens Native to the network. +- [Registers](https://github.com/maidsafe/safe_network/blob/main/sn_registers/README.md) The + registers crate, used for the Register CRDT data type on the network. - [Peers Acquisition](https://github.com/maidsafe/safe_network/blob/main/sn_peers_acquisition/README.md) The peers acquisition crate, or: how the network layer discovers bootstrap peers. - [Build Info](https://github.com/maidsafe/safe_network/blob/main/sn_build_info/README.md) Small helper used to get the build/commit versioning info for debug purposes. -## Using a Local Network +### Using a Local Network We can explore the network's features by using multiple node processes to form a local network. e also need to run a local EVM network for our nodes and client to connect to. -The latest version of [Rust](https://www.rust-lang.org/learn/get-started) should be installed. If -you already have an installation, use `rustup update` to get the latest version. +Follow these steps to create a local network: + +##### 1. Prerequisites + +The latest version of [Rust](https://www.rust-lang.org/learn/get-started) should be installed. If you already have an installation, use `rustup update` to get the latest version. Run all the commands from the root of this repository. -### Run the Network +If you haven't already, install Foundry. We need to have access to Anvil, which is packaged with Foundry, to run an EVM node: https://book.getfoundry.sh/getting-started/installation -Follow these steps to create a local network: +To collect rewards for you nodes, you will need an EVM address, you can create one using [metamask](https://metamask.io/). -1. If you haven't already, install Foundry. We need to have access to Anvil, which is packaged with Foundry, to run an - EVM node: https://book.getfoundry.sh/getting-started/installation
-2. Run a local EVM node:
+##### 2. Run a local EVM node ```sh cargo run --bin evm_testnet ``` -Take note of the console output for the next step (`RPC URL`, `Payment token address` & `Chunk payments address`). +This creates a CSV file with the EVM network params in your data directory. -3. Create the test network and pass the EVM params:
+##### 3. Create the test network and pass the EVM params `--rewards-address` _is the address where you will receive your node earnings on._ ```bash -cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address evm-custom --rpc-url --payment-token-address --data-payments-address +cargo run --bin=safenode-manager --features=local -- local run --build --clean --rewards-address ``` -4. Verify node status:
+The EVM Network parameters are loaded from the CSV file in your data directory automatically when the `local` feature flag is enabled (`--features=local`). + +##### 4. Verify node status ```bash cargo run --bin safenode-manager --features local -- status @@ -177,160 +151,22 @@ cargo run --bin safenode-manager --features local -- status The node manager's `run` command starts the node processes. The `status` command should show twenty-five running nodes. -### Files +##### 5. Uploading and Downloading Data -The file storage capability can be demonstrated by uploading files to the local network, then -retrieving them. +To upload a file or a directory, you need to set the `SECRET_KEY` environment variable to your EVM secret key: -Upload a file or a directory: +> When running a local network, you can use the `SECRET_KEY` printed by the `evm_testnet` command [step 2](#2-run-a-local-evm-node) as it has all the money. ```bash -cargo run --bin safe --features local -- files upload +SECRET_KEY= cargo run --bin autonomi_cli --features local -- file upload ``` -The output will show that the upload costs some tokens. +The output will print out the address at which the content was uploaded. -Now download the files again: +Now to download the files again: ```bash -cargo run --bin safe --features local -- files download -``` - -### Folders - -The folders storage capability can be demonstrated by storing folders on the network, making -changes and syncing them with the stored version on the network, as well as downloading the entire -folders hierarchy onto a local directory. - -All the following commands act on the current directory by default, but since we are building the -CLI binary to run it, we will have to always provide the directory we want them to act as a path -argument. -When otherwise running directly an already built CLI binary, we can simply make sure we are located -at the directory we want to act on without the need of providing the path as argument. - -Initialise a directory to then be able to track changes made on it, and sync them up with the -network: - -```bash -cargo run --bin safe --features local -- folders init -``` - -Make sure you made a backup copy of the "recovery secret" generated by the above command, or the -one you have provided when prompted. - -If any changes are now made to files or directories within this folder (at this point all files and -folders are considered new since it has just been initialised for tracking), before trying to push -those changes to the network, we can get a report of the changes that have been made locally: - -```bash -cargo run --bin safe --features local -- folders status -``` - -We can now push all local changes made to files and directories to the network, as well as pull any -changes that could have been made to the version stored on the network since last time we synced -with it: - -```bash -cargo run --bin safe --features local -- folders sync -``` - -Now that's all stored on the network, you can download the folders onto any other path by providing -it as the target directory to the following command (you will be prompted to enter the "recovery -secret" you obtained when initialising the directory with `init` command): - -```bash -cargo run --bin safe --features local -- folders download -``` - -### Token Transfers - -Use your local wallet to demonstrate sending tokens and receiving transfers. - -First, get your wallet address, this address can be safely shared publicly - -``` -cargo run --bin safe -- wallet address -``` - -You can also get your balance with: - -``` -cargo run --bin safe -- wallet balance -``` - -Now to send some tokens to an address: - -``` -cargo run --bin safe --features local -- wallet send 2 [address] -``` - -This will output a transfer as a hex string, which should be sent to the recipient. -This transfer is encrypted to the recipient so only the recipient can read and redeem it. -To receive a transfer, simply paste it after the wallet receive command: - -``` -cargo run --bin safe --features local -- wallet receive [transfer] -``` - -#### Out of band transaction signing - -When you want to transfer tokens from a cold storage or hardware wallet, you can create and sign -the transaction offline. This is done to prevent the private key from being exposed to any online -threats. -For this type of scenarios you can create a watch-only wallet (it holds only a public key) on the -online device, while using a hot-wallet (which holds the secret key) on a device that is offline. -The following steps are a simple guide for performing such an operation. - -Steps on the online device/computer with a watch-only wallet: - -1. Create a watch-only wallet using the hex-encoded public key: - `cargo run --release --bin safe -- wowallet create ` - -2. Deposit a cash-note, owned by the public key used above when creating, into the watch-only - wallet: - `cargo run --release --bin safe -- wowallet deposit --cash-note ` - -3. Build an unsigned transaction: - `cargo run --release --bin safe -- wowallet transaction ` - -4. Copy the built unsigned Tx generated by the above command, and send it out-of-band to the - desired device where the hot-wallet can be loaded. - -Steps on the offline device/computer with the corresponding hot-wallet: - -5. If you still don't have a hot-wallet created, which owns the cash-notes used to build the - unsigned transaction, create it with the corresponding secret key: - `cargo run --release --bin safe -- wallet create --key ` - -6. Use the hot-wallet to sign the built transaction: - `cargo run --release --bin safe -- wallet sign ` - -7. Copy the signed Tx generated by the above command, and send it out-of-band back to the online - device. - -Steps on the online device/computer with the watch-only wallet: - -8. Broadcast the signed transaction to the network using the watch-only wallet: - `cargo run --release --bin safe -- wowallet broadcast ` - -9. Deposit the change cash-note to the watch-only wallet: - `cargo run --release --bin safe -- wowallet deposit ` - -10. Send/share the output cash-note generated by the above command at step #8 to/with the - recipient. - -### Auditing - -We can verify a spend, optionally going back to the genesis transaction: - -``` -cargo run --bin safe --features local -- wallet verify [--genesis] [spend address] -``` - -All spends from genesis can be audited: - -``` -cargo run --bin safe --features local -- wallet audit +cargo run --bin autonomi_cli --features local -- file download ``` ### Registers diff --git a/autonomi_cli/src/access/keys.rs b/autonomi_cli/src/access/keys.rs index 9fca310124..ef06e8c4e1 100644 --- a/autonomi_cli/src/access/keys.rs +++ b/autonomi_cli/src/access/keys.rs @@ -24,7 +24,7 @@ const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; pub fn load_evm_wallet() -> Result { let secret_key = get_secret_key().wrap_err("The secret key is required to perform this action")?; - let network = crate::network::get_evm_network_from_env(); + let network = crate::network::get_evm_network_from_env()?; let wallet = Wallet::new_from_private_key(network, &secret_key) .wrap_err("Failed to load EVM wallet from key")?; Ok(wallet) diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index 65e2495377..0502988ddf 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -22,11 +22,22 @@ pub async fn get_peers(peers: PeersArgs) -> Result> { .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") } -pub fn get_evm_network_from_env() -> EvmNetwork { - let network = autonomi::evm::network_from_env(); - if matches!(network, EvmNetwork::Custom(_)) { - println!("Using custom EVM network found from environment variables"); - info!("Using custom EVM network found from environment variables {network:?}"); +pub fn get_evm_network_from_env() -> Result { + #[cfg(feature = "local")] + { + println!("Getting EVM network from local CSV as the local feature is enabled"); + let network = autonomi::evm::local_evm_network_from_csv() + .wrap_err("Failed to get EVM network from local CSV") + .with_suggestion(|| "make sure you've set up the local EVM network by running `cargo run --bin evm_testnet`")?; + Ok(network) + } + #[cfg(not(feature = "local"))] + { + let network = autonomi::evm::network_from_env(); + if matches!(network, EvmNetwork::Custom(_)) { + println!("Using custom EVM network found from environment variables"); + info!("Using custom EVM network found from environment variables {network:?}"); + } + Ok(network) } - network } diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index d3e4d93ec0..1e9ddca501 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -44,8 +44,8 @@ async fn start_node(genesis_wallet: Option
) { testnet_data.print(); keep_alive(testnet).await; - TestnetData::remove_csv(); println!("Ethereum node stopped."); + TestnetData::remove_csv(); } async fn transfer_funds(testnet: &Testnet, genesis_wallet: Address) { @@ -128,7 +128,7 @@ impl TestnetData { println!(); println!("--------------"); - println!("Run the CLI or Node with the following env vars set to use this network:"); + println!("Run the CLI or Node with the following env vars set to manually connect to this network:"); println!( "{}=\"{}\" {}=\"{}\" {}=\"{}\"", sn_evm::evm::RPC_URL, @@ -139,33 +139,39 @@ impl TestnetData { self.data_payments_address ); println!("--------------"); + println!("For CLI operations that required a payment: use the deployer secret key by providing this env var:"); + println!("SECRET_KEY=\"{}\"", self.deployer_wallet_private_key); + println!("--------------"); println!(); } fn save_csv(&self) { - let path = dirs_next::data_dir() - .expect("Could not get data_dir to save evm testnet data") - .join("safe"); + let csv_path = evmlib::utils::get_evm_testnet_csv_path() + .expect("Could not get data_dir to save evm testnet data"); + let path = csv_path + .parent() + .expect("Could not get parent dir of csv_path"); if !path.exists() { - std::fs::create_dir_all(&path).expect("Could not create safe directory"); + std::fs::create_dir_all(path).expect("Could not create safe directory"); } - let path = path.join("evm_testnet_data.csv"); let csv = format!( "{},{},{}", self.rpc_url, self.payment_token_address, self.data_payments_address ); - std::fs::write(&path, csv).expect("Could not write to evm_testnet_data.csv file"); - println!("EVM testnet data saved to: {path:?}"); + std::fs::write(&csv_path, csv).expect("Could not write to evm_testnet_data.csv file"); + println!("EVM testnet data saved to: {csv_path:?}"); + println!("When running the Node or CLI with --feature=local, it will automatically use this network by loading the EVM Network's info from the CSV file."); + println!(); } fn remove_csv() { - let path = dirs_next::data_dir() - .expect("Could not get data_dir to remove evm testnet data") - .join("safe") - .join("evm_testnet_data.csv"); - if path.exists() { - std::fs::remove_file(&path).expect("Could not remove evm_testnet_data.csv file"); + let csv_path = evmlib::utils::get_evm_testnet_csv_path() + .expect("Could not get data_dir to remove evm testnet data"); + if csv_path.exists() { + std::fs::remove_file(&csv_path).expect("Could not remove evm_testnet_data.csv file"); + } else { + eprintln!("No EVM testnet data CSV file found to remove"); } } } diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index e878c83a9f..f037f03967 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -3,6 +3,7 @@ use crate::{CustomNetwork, Network}; use dirs_next::data_dir; use rand::Rng; use std::env; +use std::path::PathBuf; pub const EVM_TESTNET_CSV_FILENAME: &str = "evm_testnet_data.csv"; @@ -16,8 +17,8 @@ const DATA_PAYMENTS_ADDRESS_BUILD_TIME_VAL: Option<&str> = option_env!("DATA_PAY #[derive(thiserror::Error, Debug)] pub enum Error { - #[error("Failed to get EVM network")] - FailedToGetEvmNetwork, + #[error("Failed to get EVM network: {0}")] + FailedToGetEvmNetwork(String), } /// Generate a random Address. @@ -44,7 +45,11 @@ pub fn evm_network_from_env() -> Result { .or_else(|| DATA_PAYMENTS_ADDRESS_BUILD_TIME_VAL.map(|s| s.to_string())), ] .into_iter() - .map(|var| var.ok_or(Error::FailedToGetEvmNetwork)) + .map(|var| { + var.ok_or(Error::FailedToGetEvmNetwork(format!( + "missing env var, make sure to set all of: {RPC_URL}, {PAYMENT_TOKEN_ADDRESS}, {DATA_PAYMENTS_ADDRESS}" + ))) + }) .collect::, Error>>(); let use_local_evm = std::env::var("EVM_NETWORK") @@ -69,24 +74,31 @@ pub fn evm_network_from_env() -> Result { } } +pub fn get_evm_testnet_csv_path() -> Result { + let file = data_dir() + .ok_or(Error::FailedToGetEvmNetwork( + "failed to get data dir when fetching evm testnet CSV file".to_string(), + ))? + .join("safe") + .join(EVM_TESTNET_CSV_FILENAME); + Ok(file) +} + /// Get the `Network::Custom` from the local EVM testnet CSV file pub fn local_evm_network_from_csv() -> Result { // load the csv - let csv_path = data_dir() - .ok_or(Error::FailedToGetEvmNetwork) - .inspect_err(|_| error!("Failed to get data dir when fetching evm testnet CSV file"))? - .join("safe") - .join(EVM_TESTNET_CSV_FILENAME); + let csv_path = get_evm_testnet_csv_path()?; if !csv_path.exists() { error!("evm data csv path does not exist {:?}", csv_path); - return Err(Error::FailedToGetEvmNetwork) - .inspect_err(|_| error!("Missing evm testnet CSV file"))?; + return Err(Error::FailedToGetEvmNetwork(format!( + "evm data csv path does not exist {csv_path:?}" + ))); } - let csv = std::fs::read_to_string(&csv_path) - .map_err(|_| Error::FailedToGetEvmNetwork) - .inspect_err(|_| error!("Failed to read evm testnet CSV file"))?; + let csv = std::fs::read_to_string(&csv_path).map_err(|_| { + Error::FailedToGetEvmNetwork(format!("failed to read evm testnet CSV file {csv_path:?}")) + })?; let parts: Vec<&str> = csv.split(',').collect(); match parts.as_slice() { [rpc_url, payment_token_address, chunk_payments_address] => Ok(Network::Custom( @@ -94,7 +106,9 @@ pub fn local_evm_network_from_csv() -> Result { )), _ => { error!("Invalid data in evm testnet CSV file"); - Err(Error::FailedToGetEvmNetwork) + Err(Error::FailedToGetEvmNetwork( + "invalid data in evm testnet CSV file".to_string(), + )) } } } diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs index 13b3ca9500..ee82824e53 100644 --- a/sn_evm/src/evm.rs +++ b/sn_evm/src/evm.rs @@ -20,3 +20,6 @@ pub fn network_from_env() -> EvmNetwork { } } } + +/// Load the evm network from local CSV +pub use evmlib::utils::local_evm_network_from_csv; diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index dacbfad45d..5e541198de 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -252,6 +252,10 @@ fn main() -> Result<()> { println!("Package version: {}", sn_build_info::package_version()); return Ok(()); } + + #[cfg(feature = "local")] + let evm_network = sn_evm::utils::local_evm_network_from_csv()?; + #[cfg(not(feature = "local"))] let evm_network: EvmNetwork = opt .evm_network .as_ref() @@ -302,6 +306,7 @@ fn main() -> Result<()> { bootstrap_peers, opt.local, root_dir, + #[cfg(feature = "upnp")] opt.upnp, ); node_builder.is_behind_home_network = opt.home_network; From c214a4d0bb7454f2e872ae224978af2b261b4e61 Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 24 Sep 2024 23:07:09 +0800 Subject: [PATCH 148/255] feat(register)!: network only store ops list BREAKING CHANGE! crdt MerkleReg only stored in client locally node side only store ops list (signed by owner) --- autonomi/src/client/registers.rs | 158 +++-- sn_client/src/api.rs | 97 ++- sn_client/src/register.rs | 267 +++---- sn_client/src/uploader/mod.rs | 4 +- sn_client/src/uploader/tests/setup.rs | 22 +- sn_client/src/uploader/upload.rs | 27 +- sn_networking/src/driver.rs | 4 +- sn_node/tests/storage_payments.rs | 21 + sn_registers/src/lib.rs | 1 + sn_registers/src/reg_crdt.rs | 103 ++- sn_registers/src/register.rs | 972 ++++---------------------- sn_registers/src/register_op.rs | 2 +- 12 files changed, 519 insertions(+), 1159 deletions(-) diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index ad279837e6..6e65389c2a 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -10,27 +10,22 @@ pub use bls::SecretKey as RegisterSecretKey; use sn_evm::Amount; use sn_evm::AttoTokens; -use sn_networking::GetRecordError; use sn_networking::VerificationKind; use sn_protocol::storage::RetryStrategy; pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; -use tracing::warn; use crate::client::data::PayError; use crate::client::Client; use bytes::Bytes; use evmlib::wallet::Wallet; use libp2p::kad::{Quorum, Record}; -use sn_networking::GetRecordCfg; -use sn_networking::NetworkError; -use sn_networking::PutRecordCfg; +use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg}; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; use sn_protocol::storage::RecordKind; use sn_protocol::NetworkAddress; -use sn_registers::Register as ClientRegister; -use sn_registers::SignedRegister; -use sn_registers::{EntryHash, Permissions}; +use sn_registers::Register as BaseRegister; +use sn_registers::{Permissions, RegisterCrdt, RegisterOp, SignedRegister}; use std::collections::BTreeSet; use xor_name::XorName; @@ -56,27 +51,67 @@ pub enum RegisterError { #[derive(Clone, Debug)] pub struct Register { - pub(crate) inner: SignedRegister, + signed_reg: SignedRegister, + crdt_reg: RegisterCrdt, } impl Register { pub fn address(&self) -> &RegisterAddress { - self.inner.address() + self.signed_reg.address() } /// Retrieve the current values of the register. There can be multiple values /// in case a register was updated concurrently. This is because of the nature /// of registers, which allows for network concurrency. pub fn values(&self) -> Vec { - self.inner - .clone() - .register() - .expect("register to be valid") + self.crdt_reg .read() .into_iter() .map(|(_hash, value)| value.into()) .collect() } + + fn new( + initial_value: Option, + name: XorName, + owner: RegisterSecretKey, + permissions: RegisterPermissions, + ) -> Result { + let pk = owner.public_key(); + + let base_register = BaseRegister::new(pk, name, permissions); + + let signature = owner.sign(base_register.bytes().map_err(RegisterError::Write)?); + let signed_reg = SignedRegister::new(base_register, signature, BTreeSet::new()); + + let crdt_reg = RegisterCrdt::new(*signed_reg.address()); + + let mut register = Register { + signed_reg, + crdt_reg, + }; + + if let Some(value) = initial_value { + register.write_atop(&value, &owner)?; + } + + Ok(register) + } + + fn write_atop(&mut self, entry: &[u8], owner: &RegisterSecretKey) -> Result<(), RegisterError> { + let children: BTreeSet<_> = self.crdt_reg.read().iter().map(|(hash, _)| *hash).collect(); + + let (_hash, address, crdt_op) = self + .crdt_reg + .write(entry.to_vec(), &children) + .map_err(RegisterError::Write)?; + + let op = RegisterOp::new(address, crdt_op, owner); + + let _ = self.signed_reg.add_op(op); + + Ok(()) + } } impl Client { @@ -99,9 +134,11 @@ impl Client { is_register: true, }; - let register = match self.network.get_record_from_network(key, &get_cfg).await { + let signed_reg = match self.network.get_record_from_network(key, &get_cfg).await { Ok(record) => { - try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)? + let signed_reg: SignedRegister = + try_deserialize_record(&record).map_err(|_| RegisterError::Serialization)?; + signed_reg } // manage forked register case Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { @@ -128,58 +165,33 @@ impl Client { }; // Make sure the fetched record contains valid CRDT operations - register.verify().map_err(|err| { - error!("Failed to verify register {address:?} with error: {err}"); - RegisterError::FailedVerification - })?; + signed_reg + .verify() + .map_err(|_| RegisterError::FailedVerification)?; + + let mut crdt_reg = RegisterCrdt::new(*signed_reg.address()); + for op in signed_reg.ops() { + if let Err(err) = crdt_reg.apply_op(op.clone()) { + return Err(RegisterError::Write(err)); + } + } - Ok(Register { inner: register }) + Ok(Register { + signed_reg, + crdt_reg, + }) } /// Updates a Register on the network with a new value. This will overwrite existing value(s). pub async fn register_update( &self, - register: Register, + mut register: Register, new_value: Bytes, owner: RegisterSecretKey, ) -> Result<(), RegisterError> { - // Fetch the current register - let mut signed_register = register.inner; - let mut register = signed_register - .clone() - .register() - .map_err(|err| { - error!( - "Failed to get register from signed register as it failed verification: {err}" - ); - RegisterError::FailedVerification - })? - .clone(); + register.write_atop(&new_value, &owner)?; - info!("Updating register at addr: {}", register.address()); - - // Get all current branches - let children: BTreeSet = register.read().into_iter().map(|(e, _)| e).collect(); - - // Write the new value to all branches - let (_, op) = register - .write(new_value.into(), &children, &owner) - .map_err(|err| { - error!( - "Failed to write to register at addr: {} : {err}", - register.address() - ); - RegisterError::Write(err) - })?; - - // Apply the operation to the register - signed_register.add_op(op.clone()).map_err(|err| { - error!( - "Failed to add op to register at addr: {} : {err}", - register.address() - ); - RegisterError::Write(err) - })?; + let signed_register = register.signed_reg.clone(); // Prepare the record for network storage let record = Record { @@ -230,7 +242,7 @@ impl Client { let pk = owner.public_key(); let name = XorName::from_content_parts(&[name.as_bytes()]); let permissions = Permissions::new_with([pk]); - let register = ClientRegister::new(pk, name, permissions); + let register = Register::new(None, name, owner, permissions)?; let reg_xor = register.address().xorname(); // get cost to store register @@ -281,23 +293,14 @@ impl Client { permissions: RegisterPermissions, wallet: &Wallet, ) -> Result { - let pk = owner.public_key(); + info!("Creating register with name: {name}"); let name = XorName::from_content_parts(&[name.as_bytes()]); // Owner can write to the register. - let mut register = ClientRegister::new(pk, name, permissions); - let address = NetworkAddress::from_register_address(*register.address()); - - info!("Creating register at address: {address}"); + let register = Register::new(Some(value), name, owner, permissions)?; + let address = register.address(); - let entries = register - .read() - .into_iter() - .map(|(entry_hash, _value)| entry_hash) - .collect(); - - let _ = register.write(value.into(), &entries, &owner); - let reg_xor = register.address().xorname(); + let reg_xor = address.xorname(); debug!("Paying for register at address: {address}"); let (payment_proofs, _skipped) = self .pay(std::iter::once(reg_xor), wallet) @@ -317,13 +320,10 @@ impl Client { .to_peer_id_payee() .ok_or(RegisterError::InvalidQuote) .inspect_err(|err| error!("Failed to get payee from payment proof: {err}"))?; - let signed_register = register.clone().into_signed(&owner).map_err(|err| { - error!("Failed to sign register at address: {address} : {err}"); - RegisterError::CouldNotSign(err) - })?; + let signed_register = register.signed_reg.clone(); let record = Record { - key: address.to_record_key(), + key: NetworkAddress::from_register_address(*address).to_record_key(), value: try_serialize_record( &(proof, &signed_register), RecordKind::RegisterWithPayment, @@ -356,8 +356,6 @@ impl Client { error!("Failed to put record - register {address} to the network: {err}") })?; - Ok(Register { - inner: signed_register, - }) + Ok(register) } } diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index e13cdd21a0..54bf53f8a2 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -314,37 +314,6 @@ impl Client { /// /// [Signature] /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::callsite::register; - /// use xor_name::XorName; - /// use sn_registers::Register; - /// use sn_protocol::messages::RegisterCmd; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// - /// // Set up register prerequisites - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let owner_sk = SecretKey::random(); - /// let owner_pk = owner_sk.public_key(); - /// - /// // set up register - /// let mut register = Register::new(owner_pk, xorname, Default::default()); - /// let mut register_clone = register.clone(); - /// - /// // Use of client.sign() with register through RegisterCmd::Create - /// let cmd = RegisterCmd::Create { - /// register, - /// signature: client.sign(register_clone.bytes()?), - /// }; - /// # Ok(()) - /// # } - /// ``` pub fn sign>(&self, data: T) -> Signature { self.signer.sign(data) } @@ -1105,10 +1074,27 @@ fn merge_register_records( mod tests { use std::collections::BTreeSet; - use sn_registers::Register; + use sn_registers::{Register, RegisterCrdt, RegisterOp}; use super::*; + fn write_atop( + signed_reg: &mut SignedRegister, + crdt_reg: &mut RegisterCrdt, + entry: &[u8], + owner: &SecretKey, + ) -> eyre::Result<()> { + let children: BTreeSet<_> = crdt_reg.read().iter().map(|(hash, _)| *hash).collect(); + + let (_hash, address, crdt_op) = crdt_reg.write(entry.to_vec(), &children)?; + + let op = RegisterOp::new(address, crdt_op, owner); + + signed_reg.add_op(op)?; + + Ok(()) + } + #[test] fn test_merge_register_records() -> eyre::Result<()> { let mut rng = rand::thread_rng(); @@ -1117,28 +1103,33 @@ mod tests { let owner_pk = owner_sk.public_key(); let address = RegisterAddress::new(meta, owner_pk); + let base_register = Register::new(owner_pk, meta, Default::default()); + let signature = owner_sk.sign(base_register.bytes()?); + // prepare registers - let mut register_root = Register::new(owner_pk, meta, Default::default()); - let (root_hash, _) = - register_root.write(b"root_entry".to_vec(), &BTreeSet::default(), &owner_sk)?; - let root = BTreeSet::from_iter(vec![root_hash]); - let signed_root = register_root.clone().into_signed(&owner_sk)?; - - let mut register1 = register_root.clone(); - let (_hash, op1) = register1.write(b"entry1".to_vec(), &root, &owner_sk)?; - let mut signed_register1 = signed_root.clone(); - signed_register1.add_op(op1)?; - - let mut register2 = register_root.clone(); - let (_hash, op2) = register2.write(b"entry2".to_vec(), &root, &owner_sk)?; - let mut signed_register2 = signed_root; - signed_register2.add_op(op2)?; - - let mut register_bad = Register::new(owner_pk, meta, Default::default()); - let (_hash, _op_bad) = - register_bad.write(b"bad_root".to_vec(), &BTreeSet::default(), &owner_sk)?; - let invalid_sig = register2.sign(&owner_sk)?; // steal sig from something else - let signed_register_bad = SignedRegister::new(register_bad, invalid_sig); + let mut register_root = SignedRegister::new(base_register, signature, BTreeSet::new()); + let mut crdt_reg_root = RegisterCrdt::new(address); + + write_atop( + &mut register_root, + &mut crdt_reg_root, + b"root_entry", + &owner_sk, + )?; + + let mut signed_register1 = register_root.clone(); + let mut crdt_reg1 = crdt_reg_root.clone(); + write_atop(&mut signed_register1, &mut crdt_reg1, b"entry1", &owner_sk)?; + + let mut signed_register2 = register_root.clone(); + let mut crdt_reg2 = crdt_reg_root.clone(); + write_atop(&mut signed_register2, &mut crdt_reg2, b"entry2", &owner_sk)?; + + let base_register_bad = Register::new(owner_pk, meta, Default::default()); + let bad_sk = SecretKey::random(); + let signature_bad = bad_sk.sign(base_register_bad.bytes()?); + let signed_register_bad = + SignedRegister::new(base_register_bad, signature_bad, BTreeSet::new()); // prepare records let record1 = Record { diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs index 1b164a2f71..f657898bf6 100644 --- a/sn_client/src/register.rs +++ b/sn_client/src/register.rs @@ -15,36 +15,41 @@ use libp2p::{ }; use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; use sn_protocol::{ - error::Error as ProtocolError, - messages::RegisterCmd, storage::{try_serialize_record, RecordKind, RetryStrategy}, NetworkAddress, }; -use sn_registers::{Entry, EntryHash, Permissions, Register, RegisterAddress, SignedRegister}; +use sn_registers::{ + Entry, EntryHash, Error as RegisterError, Permissions, Register, RegisterAddress, RegisterCrdt, + RegisterOp, SignedRegister, +}; use sn_transfers::{NanoTokens, Payment}; -use std::collections::{BTreeSet, HashSet, LinkedList}; +use std::collections::{BTreeSet, HashSet}; use xor_name::XorName; -/// Cached operations made to an offline Register instance are applied locally only, +/// Cached operations made to an offline RegisterCrdt instance are applied locally only, /// and accumulated until the user explicitly calls 'sync'. The user can /// switch back to sync with the network for every op by invoking `online` API. #[derive(Clone, custom_debug::Debug)] pub struct ClientRegister { #[debug(skip)] client: Client, - pub register: Register, - pub ops: LinkedList, // Cached operations. + register: Register, + /// CRDT data of the Register + crdt: RegisterCrdt, + /// Cached operations. + ops: BTreeSet, } impl ClientRegister { - fn create_register(client: Client, meta: XorName, perms: Permissions) -> Self { - let public_key = client.signer_pk(); - - let register = Register::new(public_key, meta, perms); + /// Create with specified meta and permission + pub fn create_register(client: Client, meta: XorName, perms: Permissions) -> Self { + let register = Register::new(client.signer_pk(), meta, perms); + let crdt = RegisterCrdt::new(*register.address()); Self { client, register, - ops: LinkedList::new(), + crdt, + ops: BTreeSet::new(), } } @@ -95,10 +100,12 @@ impl ClientRegister { /// ``` pub fn create_with_addr(client: Client, addr: RegisterAddress) -> Self { let register = Register::new(addr.owner(), addr.meta(), Permissions::default()); + let crdt = RegisterCrdt::new(addr); Self { client, register, - ops: LinkedList::new(), + crdt, + ops: BTreeSet::new(), } } @@ -158,13 +165,12 @@ impl ClientRegister { /// Retrieve a Register from the network to work on it offline. pub(super) async fn retrieve(client: Client, address: RegisterAddress) -> Result { - let register = Self::get_register_from_network(&client, address).await?; + let signed_register = Self::get_register_from_network(&client, address).await?; - Ok(Self { - client, - register, - ops: LinkedList::new(), - }) + let mut register = Self::create_with_addr(client, address); + register.merge(&signed_register); + + Ok(register) } /// Return type: [RegisterAddress] @@ -303,14 +309,17 @@ impl ClientRegister { /// # } /// ``` pub fn size(&self) -> u64 { - self.register.size() + self.crdt.size() } /// Return a value corresponding to the provided 'hash', if present. // No usages found in All Places pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - let entry = self.register.get(hash)?; - Ok(entry) + if let Some(entry) = self.crdt.get(hash) { + Ok(entry) + } else { + Err(RegisterError::NoSuchEntry(hash).into()) + } } /// Read the last entry, or entries when there are branches, if the register is not empty. @@ -333,7 +342,7 @@ impl ClientRegister { /// # } /// ``` pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.register.read() + self.crdt.read() } /// Write a new value onto the Register atop latest value. @@ -360,7 +369,7 @@ impl ClientRegister { /// # } /// ``` pub fn write(&mut self, entry: &[u8]) -> Result { - let children = self.register.read(); + let children = self.crdt.read(); if children.len() > 1 { return Err(Error::ContentBranchDetected(children)); } @@ -395,12 +404,8 @@ impl ClientRegister { /// # } /// ``` pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result { - let children: BTreeSet = self - .register - .read() - .into_iter() - .map(|(hash, _)| hash) - .collect(); + let children: BTreeSet = + self.crdt.read().into_iter().map(|(hash, _)| hash).collect(); self.write_atop(entry, &children) } @@ -440,14 +445,13 @@ impl ClientRegister { let public_key = self.client.signer_pk(); self.register.check_user_permissions(public_key)?; - let (entry_hash, op) = self - .register - .write(entry.into(), children, self.client.signer())?; - let cmd = RegisterCmd::Edit(op); + let (hash, address, crdt_op) = self.crdt.write(entry.to_vec(), children)?; - self.ops.push_front(cmd); + let op = RegisterOp::new(address, crdt_op, self.client.signer()); - Ok(entry_hash) + let _ = self.ops.insert(op); + + Ok(hash) } // ********* Online methods ********* @@ -500,8 +504,7 @@ impl ClientRegister { let mut royalties_fees = NanoTokens::zero(); let reg_result = if verify_store { debug!("VERIFYING REGISTER STORED {:?}", self.address()); - - let res = if payment_info.is_some() { + if payment_info.is_some() { // we expect this to be a _fresh_ register. // It still could have been PUT previously, but we'll do a quick verification // instead of thorough one. @@ -510,28 +513,21 @@ impl ClientRegister { .await } else { self.client.verify_register_stored(*self.address()).await - }; - - // we need to keep the error here if verifying, so we can retry and pay for storage - // once more below - match res { - Ok(r) => Ok(r.register()?), - Err(error) => Err(error), } } else { Self::get_register_from_network(&self.client, addr).await }; - let remote_replica = match reg_result { - Ok(r) => r, + + match reg_result { + Ok(remote_replica) => { + self.merge(&remote_replica); + self.push(verify_store).await?; + } // any error here will result in a repayment of the register // TODO: be smart about this and only pay for storage if we need to Err(err) => { debug!("Failed to get register: {err:?}"); debug!("Creating Register as it doesn't exist at {addr:?}!"); - let cmd = RegisterCmd::Create { - register: self.register.clone(), - signature: self.client.sign(self.register.bytes()?), - }; // Let's check if the user has already paid for this address first if payment_info.is_none() { @@ -546,13 +542,11 @@ impl ClientRegister { payment_info = Some((payment, payee)); } - Self::publish_register(self.client.clone(), cmd, payment_info, verify_store) - .await?; - self.register.clone() + // The `creation register` has to come with `payment`. + // Hence it needs to be `published` to network separately. + self.publish_register(payment_info, verify_store).await?; } - }; - self.register.merge(&remote_replica)?; - self.push(verify_store).await?; + } Ok((storage_cost, royalties_fees)) } @@ -581,27 +575,14 @@ impl ClientRegister { /// ``` pub async fn push(&mut self, verify_store: bool) -> Result<()> { let ops_len = self.ops.len(); + let address = *self.address(); if ops_len > 0 { - let address = *self.address(); - debug!("Pushing {ops_len} cached Register cmds at {address}!"); - - // TODO: send them all concurrently - while let Some(cmd) = self.ops.pop_back() { - // We don't need to send the payment proofs here since - // these are all Register mutation cmds which don't require payment. - let result = - Self::publish_register(self.client.clone(), cmd.clone(), None, verify_store) - .await; - - if let Err(err) = result { - warn!("Did not push Register cmd on all nodes in the close group!: {err}"); - // We keep the cmd for next sync to retry - self.ops.push_back(cmd); - return Err(err); - } + if let Err(err) = self.publish_register(None, verify_store).await { + warn!("Failed to push register {address:?} to network!: {err}"); + return Err(err); } - debug!("Successfully pushed {ops_len} Register cmds at {address}!"); + debug!("Successfully pushed register {address:?} to network!"); } Ok(()) @@ -674,51 +655,20 @@ impl ClientRegister { self.push(verify_store).await } - /// Write a new value onto the Register atop the set of branches/entries - /// referenced by the provided list to their corresponding entry hash. - /// Note you can use `write_merging_branches` API if you - /// want to write atop of all exiting branches/entries instead. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'children' - [BTreeSet]<[EntryHash]> - /// * 'verify_store' - Boolean - /// - /// Return type: - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// let mut rng = rand::thread_rng(); - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let entry = "Entry".as_bytes(); - /// let tree_set = BTreeSet::new(); - /// // Use of the 'write_atop_online': - /// let mut binding = ClientRegister::create(client, address); - /// let mut register = binding.write_atop_online(entry,&tree_set,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_atop_online( - &mut self, - entry: &[u8], - children: &BTreeSet, - verify_store: bool, - ) -> Result<()> { - self.write_atop(entry, children)?; - self.push(verify_store).await - } - /// Access the underlying MerkleReg (e.g. for access to history) /// NOTE: This API is unstable and may be removed in the future pub fn merkle_reg(&self) -> &MerkleReg { - self.register.merkle_reg() + self.crdt.merkle_reg() + } + + /// Returns the local ops list + pub fn ops_list(&self) -> &BTreeSet { + &self.ops + } + + /// Log the crdt DAG in tree structured view + pub fn log_update_history(&self) -> String { + self.crdt.log_update_history() } // ********* Private helpers ********* @@ -761,44 +711,22 @@ impl ClientRegister { /// Publish a `Register` command on the network. /// If `verify_store` is true, it will verify the Register was stored on the network. /// Optionally contains the Payment and the PeerId that we paid to. - pub(crate) async fn publish_register( - client: Client, - cmd: RegisterCmd, + pub async fn publish_register( + &self, payment: Option<(Payment, PeerId)>, verify_store: bool, ) -> Result<()> { - let cmd_dst = cmd.dst(); - debug!("Querying existing Register for cmd: {cmd_dst:?}"); - let network_reg = client.get_signed_register_from_network(cmd.dst()).await; - - debug!("Publishing Register cmd: {cmd_dst:?}"); - let register = match cmd { - RegisterCmd::Create { - register, - signature, - } => { - if let Ok(existing_reg) = network_reg { - if existing_reg.owner() != register.owner() { - return Err(ProtocolError::RegisterAlreadyClaimed(existing_reg.owner()))?; - } - } - SignedRegister::new(register, signature) - } - RegisterCmd::Edit(op) => { - let mut reg = network_reg?; - reg.add_op(op)?; - reg - } - }; + let client = self.client.clone(); + let signed_reg = self.get_signed_reg()?; - let network_address = NetworkAddress::from_register_address(*register.address()); + let network_address = NetworkAddress::from_register_address(*self.register.address()); let key = network_address.to_record_key(); let (record, payee) = match payment { Some((payment, payee)) => { let record = Record { key: key.clone(), value: try_serialize_record( - &(payment, ®ister), + &(payment, &signed_reg), RecordKind::RegisterWithPayment, )? .to_vec(), @@ -810,7 +738,7 @@ impl ClientRegister { None => { let record = Record { key: key.clone(), - value: try_serialize_record(®ister, RecordKind::Register)?.to_vec(), + value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), publisher: None, expires: None, }; @@ -829,7 +757,7 @@ impl ClientRegister { ( Some(Record { key, - value: try_serialize_record(®ister, RecordKind::Register)?.to_vec(), + value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), publisher: None, expires: None, }), @@ -858,13 +786,48 @@ impl ClientRegister { } /// Retrieve a `Register` from the Network. - async fn get_register_from_network( + pub async fn get_register_from_network( client: &Client, address: RegisterAddress, - ) -> Result { + ) -> Result { debug!("Retrieving Register from: {address}"); - let reg = client.get_signed_register_from_network(address).await?; - reg.verify_with_address(address)?; - Ok(reg.register()?) + let signed_reg = client.get_signed_register_from_network(address).await?; + signed_reg.verify_with_address(address)?; + Ok(signed_reg) + } + + /// Merge a network fetched copy with the local one. + /// Note the `get_register_from_network` already verified + /// * the fetched register is the same (address) as to the local one + /// * the ops of the fetched copy are all signed by the owner + pub fn merge(&mut self, signed_reg: &SignedRegister) { + debug!("Merging Register of: {:?}", self.register.address()); + + // Take out the difference between local ops and fetched ops + // note the `difference` functions gives entry that: in a but not in b + let diff: Vec<_> = signed_reg.ops().difference(&self.ops).cloned().collect(); + + // Apply the new ops to local + for op in diff { + // in case of deploying error, record then continue to next + if let Err(err) = self.crdt.apply_op(op.clone()) { + error!( + "Apply op to local Register {:?} failed with {err:?}", + self.register.address() + ); + } else { + let _ = self.ops.insert(op); + } + } + } + + /// Generate SignedRegister from local copy, so that can be published to network + fn get_signed_reg(&self) -> Result { + let signature = self.client.sign(self.register.bytes()?); + Ok(SignedRegister::new( + self.register.clone(), + signature, + self.ops.clone(), + )) } } diff --git a/sn_client/src/uploader/mod.rs b/sn_client/src/uploader/mod.rs index 8b8d6005fa..c3495b99ab 100644 --- a/sn_client/src/uploader/mod.rs +++ b/sn_client/src/uploader/mod.rs @@ -18,7 +18,7 @@ use sn_protocol::{ storage::{Chunk, ChunkAddress, RetryStrategy}, NetworkAddress, }; -use sn_registers::{Register, RegisterAddress}; +use sn_registers::{RegisterAddress, SignedRegister}; use sn_transfers::{NanoTokens, WalletApi}; use std::{ collections::{BTreeMap, BTreeSet}, @@ -420,7 +420,7 @@ impl UploadItem { #[derive(Debug)] enum TaskResult { GetRegisterFromNetworkOk { - remote_register: Register, + remote_register: SignedRegister, }, GetRegisterFromNetworkErr(XorName), PushRegisterOk { diff --git a/sn_client/src/uploader/tests/setup.rs b/sn_client/src/uploader/tests/setup.rs index 328489c24d..59f9005c4a 100644 --- a/sn_client/src/uploader/tests/setup.rs +++ b/sn_client/src/uploader/tests/setup.rs @@ -22,7 +22,7 @@ use libp2p_identity::Keypair; use rand::thread_rng; use sn_networking::{NetworkBuilder, PayeeQuote}; use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -use sn_registers::{Register, RegisterAddress}; +use sn_registers::{Permissions, RegisterAddress, SignedRegister}; use sn_transfers::{MainSecretKey, NanoTokens, PaymentQuote, WalletApi}; use std::{ collections::{BTreeMap, VecDeque}, @@ -50,7 +50,7 @@ impl UploaderInterface for TestUploader { fn submit_get_register_task( &mut self, - _client: Client, + client: Client, reg_addr: RegisterAddress, _task_result_sender: mpsc::Sender, ) { @@ -67,12 +67,10 @@ impl UploaderInterface for TestUploader { match step { TestSteps::GetRegisterOk => { handle.spawn(async move { - let reg = Register::test_new_from_address(reg_addr); - + let remote_register = + SignedRegister::test_new_from_address(reg_addr, client.signer()); task_result_sender - .send(TaskResult::GetRegisterFromNetworkOk { - remote_register: reg, - }) + .send(TaskResult::GetRegisterFromNetworkOk { remote_register }) .await .expect("Failed to send task result"); }); @@ -449,9 +447,13 @@ pub fn get_dummy_registers(num: usize, client: Client) -> Vec { let mut rng = thread_rng(); let mut registers = Vec::with_capacity(num); for _ in 0..num { - let mut client_reg = ClientRegister::create(client.clone(), XorName::random(&mut rng)); - // test_new_from_address that is used during get_register, uses AnyoneCanWrite permission, so use the same here - client_reg.register = Register::test_new_from_address(*client_reg.address()); + // test_new_from_address that is used during get_register, + // uses AnyoneCanWrite permission, so use the same here + let client_reg = ClientRegister::create_register( + client.clone(), + XorName::random(&mut rng), + Permissions::AnyoneCanWrite, + ); registers.push(client_reg); } diff --git a/sn_client/src/uploader/upload.rs b/sn_client/src/uploader/upload.rs index 0fdc4280de..857c9fc31c 100644 --- a/sn_client/src/uploader/upload.rs +++ b/sn_client/src/uploader/upload.rs @@ -20,11 +20,10 @@ use itertools::Either; use libp2p::PeerId; use sn_networking::PayeeQuote; use sn_protocol::{ - messages::RegisterCmd, storage::{Chunk, RetryStrategy}, NetworkAddress, }; -use sn_registers::{Register, RegisterAddress}; +use sn_registers::{RegisterAddress, SignedRegister}; use sn_transfers::{NanoTokens, WalletApi}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, @@ -275,8 +274,7 @@ pub(super) async fn start_upload( .get_mut(&xorname) .ok_or(ClientError::UploadableItemNotFound(xorname))?; if let UploadItem::Register { reg, .. } = reg { - // todo: not error out here - reg.register.merge(&remote_register)?; + reg.merge(&remote_register); uploader.pending_to_push_register.push(xorname); } } @@ -938,10 +936,8 @@ impl InnerUploader { // ====== Logic ====== - async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { - let reg = client.verify_register_stored(reg_addr).await?; - let reg = reg.register()?; - Ok(reg) + async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { + client.verify_register_stored(reg_addr).await } async fn push_register(upload_item: UploadItem, verify_store: bool) -> Result { @@ -1029,19 +1025,8 @@ impl InnerUploader { trace!("Client upload completed for chunk: {xorname:?}"); } UploadItem::Register { address: _, reg } => { - let signature = client.sign(reg.register.bytes()?); - trace!("Client upload started for register: {xorname:?}"); - - ClientRegister::publish_register( - client, - RegisterCmd::Create { - register: reg.register, - signature, - }, - Some((payment, payee)), - verify_store, - ) - .await?; + reg.publish_register(Some((payment, payee)), verify_store) + .await?; trace!("Client upload completed for register: {xorname:?}"); } } diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index d440109764..ec716cb4df 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -167,8 +167,8 @@ impl GetRecordCfg { } }; - // Only compare root values of the register - target_register.base_register().read() == fetched_register.base_register().read() + target_register.base_register() == fetched_register.base_register() + && target_register.ops() == fetched_register.ops() } else { target_record == record } diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 6e11295cbd..23fe9c53b0 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -374,6 +374,21 @@ // ), // ); +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); + +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops_list().len() +// ); +// println!( +// "current local cached ops length is {}", +// register.ops_list().len() +// ); + // // TODO adapt to evm // // let _ = wallet_client // // .mut_wallet() @@ -391,6 +406,12 @@ // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address // )); +// println!("Current fetched register is {:?}", retrieved_reg.address()); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.log_update_history() +// ); + // let random_entry = rng.gen::<[u8; 32]>().to_vec(); // register.write(&random_entry)?; diff --git a/sn_registers/src/lib.rs b/sn_registers/src/lib.rs index 2fb85cd71f..e9cc34e4f0 100644 --- a/sn_registers/src/lib.rs +++ b/sn_registers/src/lib.rs @@ -19,6 +19,7 @@ pub use self::{ error::Error, metadata::{Entry, EntryHash}, permissions::Permissions, + reg_crdt::RegisterCrdt, register::{Register, SignedRegister}, register_op::RegisterOp, }; diff --git a/sn_registers/src/reg_crdt.rs b/sn_registers/src/reg_crdt.rs index 844b3bfce3..f93002aefc 100644 --- a/sn_registers/src/reg_crdt.rs +++ b/sn_registers/src/reg_crdt.rs @@ -9,17 +9,21 @@ use crate::{error::Result, Entry, EntryHash, Error, RegisterAddress, RegisterOp}; use crdts::merkle_reg::Node as MerkleDagEntry; -use crdts::{merkle_reg::MerkleReg, CmRDT, CvRDT}; +use crdts::{ + merkle_reg::{Hash as CrdtHash, MerkleReg}, + CmRDT, CvRDT, +}; use serde::{Deserialize, Serialize}; use std::{ - collections::BTreeSet, + collections::{BTreeSet, HashSet}, fmt::{self, Debug, Display, Formatter}, hash::Hash, }; +use xor_name::XorName; /// Register data type as a CRDT with Access Control #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, PartialOrd)] -pub(crate) struct RegisterCrdt { +pub struct RegisterCrdt { /// Address on the network of this piece of data address: RegisterAddress, /// CRDT to store the actual data, i.e. the items of the Register. @@ -41,7 +45,7 @@ impl Display for RegisterCrdt { impl RegisterCrdt { /// Constructs a new '`RegisterCrdtImpl`'. - pub(crate) fn new(address: RegisterAddress) -> Self { + pub fn new(address: RegisterAddress) -> Self { Self { address, data: MerkleReg::new(), @@ -49,23 +53,23 @@ impl RegisterCrdt { } /// Returns the address. - pub(crate) fn address(&self) -> &RegisterAddress { + pub fn address(&self) -> &RegisterAddress { &self.address } /// Merge another register into this one. - pub(crate) fn merge(&mut self, other: Self) { + pub fn merge(&mut self, other: Self) { self.data.merge(other.data); } /// Returns total number of items in the register. - pub(crate) fn size(&self) -> u64 { + pub fn size(&self) -> u64 { (self.data.num_nodes() + self.data.num_orphans()) as u64 } /// Write a new entry to the `RegisterCrdt`, returning the hash /// of the entry and the CRDT operation without a signature - pub(crate) fn write( + pub fn write( &mut self, entry: Entry, children: &BTreeSet, @@ -81,7 +85,7 @@ impl RegisterCrdt { } /// Apply a remote data CRDT operation to this replica of the `RegisterCrdtImpl`. - pub(crate) fn apply_op(&mut self, op: RegisterOp) -> Result<()> { + pub fn apply_op(&mut self, op: RegisterOp) -> Result<()> { // Let's first check the op is validly signed. // Note: Perms and valid sig for the op are checked at the upper Register layer. @@ -100,12 +104,12 @@ impl RegisterCrdt { } /// Get the entry corresponding to the provided `hash` if it exists. - pub(crate) fn get(&self, hash: EntryHash) -> Option<&Entry> { + pub fn get(&self, hash: EntryHash) -> Option<&Entry> { self.data.node(hash.0).map(|node| &node.value) } /// Read current entries (multiple entries occur on concurrent writes). - pub(crate) fn read(&self) -> BTreeSet<(EntryHash, Entry)> { + pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { self.data .read() .hashes_and_nodes() @@ -124,9 +128,84 @@ impl RegisterCrdt { /// Access the underlying MerkleReg (e.g. for access to history) /// NOTE: This API is unstable and may be removed in the future - pub(crate) fn merkle_reg(&self) -> &MerkleReg { + pub fn merkle_reg(&self) -> &MerkleReg { &self.data } + + /// Log the structure of the MerkleReg as a tree view. + /// This is actually being the `update history` of the register. + pub fn log_update_history(&self) -> String { + let mut output = "MerkleReg Structure:\n".to_string(); + output = format!( + "{output}Total entries: {}\n", + self.data.num_nodes() + self.data.num_orphans() + ); + + // Find root nodes (entries with no parents) + let roots: Vec<_> = self.data.read().hashes().into_iter().collect(); + + // Print the tree starting from each root + for (i, root) in roots.iter().enumerate() { + let mut visited = HashSet::new(); + Self::print_tree( + root, + &self.data, + &mut output, + "", + i == roots.len() - 1, + &mut visited, + ); + } + + output + } + + // Helper function to recursively print the MerkleReg tree + fn print_tree( + hash: &CrdtHash, + merkle_reg: &MerkleReg, + output: &mut String, + prefix: &str, + is_last: bool, + visited: &mut HashSet, + ) { + let pretty_hash = format!("{}", XorName::from_content(hash)); + if !visited.insert(*hash) { + *output = format!( + "{}{prefix}{}* {pretty_hash} (cycle detected)\n", + output, + if is_last { "└── " } else { "├── " }, + ); + return; + } + + let entry = if let Some(node) = merkle_reg.node(*hash) { + format!("value: {}", XorName::from_content(&node.value)) + } else { + "value: None".to_string() + }; + *output = format!( + "{}{prefix}{}{pretty_hash}: {entry}\n", + output, + if is_last { "└── " } else { "├── " }, + ); + + let children: Vec<_> = merkle_reg.children(*hash).hashes().into_iter().collect(); + let new_prefix = format!("{prefix}{} ", if is_last { " " } else { "│" }); + + for (i, child) in children.iter().enumerate() { + Self::print_tree( + child, + merkle_reg, + output, + &new_prefix, + i == children.len() - 1, + visited, + ); + } + + visited.remove(hash); + } } #[cfg(test)] diff --git a/sn_registers/src/register.rs b/sn_registers/src/register.rs index 366f73ef0e..2bfda88aa3 100644 --- a/sn_registers/src/register.rs +++ b/sn_registers/src/register.rs @@ -6,15 +6,12 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - error::Result, reg_crdt::RegisterCrdt, Entry, EntryHash, Error, Permissions, RegisterAddress, - RegisterOp, -}; - -use bls::{PublicKey, SecretKey, Signature}; -use crdts::merkle_reg::{Hash, MerkleReg}; +use crate::{error::Result, Error, Permissions, RegisterAddress, RegisterOp}; +#[cfg(feature = "test-utils")] +use bls::SecretKey; +use bls::{PublicKey, Signature}; use serde::{Deserialize, Serialize}; -use std::collections::{BTreeSet, HashSet}; +use std::collections::BTreeSet; use xor_name::XorName; /// Arbitrary maximum size of a register entry. @@ -26,8 +23,8 @@ const MAX_REG_NUM_ENTRIES: u16 = 1024; /// A Register on the SAFE Network #[derive(Clone, Eq, PartialEq, PartialOrd, Hash, Serialize, Deserialize, Debug)] pub struct Register { - /// CRDT data of the Register - crdt: RegisterCrdt, + /// contains the info of meta (XorName) and owner (PublicKey) + address: RegisterAddress, /// Permissions of the Register /// Depending on the permissions, the owner can allow other users to write to the register /// Everyone can always read the Register because all data is public @@ -39,8 +36,8 @@ pub struct Register { #[derive(Clone, Debug, Serialize, Deserialize, PartialOrd, PartialEq, Eq, Hash)] pub struct SignedRegister { /// the base register we had at creation - base_register: Register, - /// signature over the above by the owner + register: Register, + /// signature over the above register by the owner signature: Signature, /// operations to apply on this register, /// they contain a signature of the writer @@ -49,24 +46,29 @@ pub struct SignedRegister { impl SignedRegister { /// Create a new SignedRegister - pub fn new(base_register: Register, signature: Signature) -> Self { + pub fn new(register: Register, signature: Signature, ops: BTreeSet) -> Self { Self { - base_register, + register, signature, - ops: BTreeSet::new(), + ops, } } /// Return the base register. This is the register before any operations have been applied. pub fn base_register(&self) -> &Register { - &self.base_register + &self.register } /// Verfies a SignedRegister pub fn verify(&self) -> Result<()> { - let bytes = self.base_register.bytes()?; + let reg_size = self.ops.len(); + if reg_size >= MAX_REG_NUM_ENTRIES as usize { + return Err(Error::TooManyEntries(reg_size)); + } + + let bytes = self.register.bytes()?; if !self - .base_register + .register .owner() .verify(&self.signature, bytes.as_slice()) { @@ -74,13 +76,20 @@ impl SignedRegister { } for op in &self.ops { - self.base_register.check_register_op(op)?; + self.register.check_register_op(op)?; + let size = op.crdt_op.value.len(); + if size > MAX_REG_ENTRY_SIZE { + return Err(Error::EntryTooBig { + size, + max: MAX_REG_ENTRY_SIZE, + }); + } } Ok(()) } pub fn verify_with_address(&self, address: RegisterAddress) -> Result<()> { - if self.base_register.address() != &address { + if self.register.address() != &address { return Err(Error::InvalidRegisterAddress { requested: Box::new(address), got: Box::new(*self.address()), @@ -89,19 +98,9 @@ impl SignedRegister { self.verify() } - /// Return the Register after applying all the operations - pub fn register(self) -> Result { - let mut register = self.base_register; - for op in self.ops { - register.apply_op(op)?; - } - Ok(register) - } - /// Merge two SignedRegisters pub fn merge(&mut self, other: &Self) -> Result<()> { - self.base_register - .verify_is_mergeable(&other.base_register)?; + self.register.verify_is_mergeable(&other.register)?; self.ops.extend(other.ops.clone()); Ok(()) } @@ -109,8 +108,7 @@ impl SignedRegister { /// Merge two SignedRegisters but verify the incoming content /// Significantly slower than merge, use when you want to trust but verify the `other` pub fn verified_merge(&mut self, other: &Self) -> Result<()> { - self.base_register - .verify_is_mergeable(&other.base_register)?; + self.register.verify_is_mergeable(&other.register)?; other.verify()?; self.ops.extend(other.ops.clone()); Ok(()) @@ -118,89 +116,80 @@ impl SignedRegister { /// Return the address. pub fn address(&self) -> &RegisterAddress { - self.base_register.address() + self.register.address() } /// Return the owner of the data. pub fn owner(&self) -> PublicKey { - self.base_register.owner() + self.register.owner() } /// Check and add an Op to the SignedRegister pub fn add_op(&mut self, op: RegisterOp) -> Result<()> { - self.base_register.check_register_op(&op)?; + let reg_size = self.ops.len(); + if reg_size >= MAX_REG_NUM_ENTRIES as usize { + return Err(Error::TooManyEntries(reg_size)); + } + + let size = op.crdt_op.value.len(); + if size > MAX_REG_ENTRY_SIZE { + return Err(Error::EntryTooBig { + size, + max: MAX_REG_ENTRY_SIZE, + }); + } + + self.register.check_register_op(&op)?; self.ops.insert(op); Ok(()) } - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.base_register.merkle_reg() + /// Returns the reference to the ops list + pub fn ops(&self) -> &BTreeSet { + &self.ops + } + + /// Used in tests. + #[cfg(feature = "test-utils")] + pub fn test_new_from_address(address: RegisterAddress, owner: &SecretKey) -> Self { + let base_register = Register { + address, + permissions: Permissions::AnyoneCanWrite, + }; + let bytes = if let Ok(bytes) = base_register.bytes() { + bytes + } else { + panic!("Failed to serialize register {base_register:?}"); + }; + let signature = owner.sign(bytes); + Self::new(base_register, signature, BTreeSet::new()) } } impl Register { /// Create a new Register pub fn new(owner: PublicKey, meta: XorName, mut permissions: Permissions) -> Self { - let address = RegisterAddress { meta, owner }; permissions.add_writer(owner); Self { - crdt: RegisterCrdt::new(address), + address: RegisterAddress { meta, owner }, permissions, } } - /// Sign a Register and return the signature, makes sure the signer is the owner in the process - pub fn sign(&self, secret_key: &SecretKey) -> Result { - if self.owner() != secret_key.public_key() { - return Err(Error::InvalidSecretKey); - } - let bytes = self.bytes()?; - let signature = secret_key.sign(bytes); - Ok(signature) - } - /// Returns a bytes version of the Register used for signing /// Use this API when you want to sign a Register withtout providing a secret key to the Register API pub fn bytes(&self) -> Result> { rmp_serde::to_vec(self).map_err(|_| Error::SerialisationFailed) } - /// Sign a Register into a SignedRegister - pub fn into_signed(self, secret_key: &SecretKey) -> Result { - let signature = self.sign(secret_key)?; - Ok(SignedRegister::new(self, signature)) - } - /// Return the address. pub fn address(&self) -> &RegisterAddress { - self.crdt.address() + &self.address } /// Return the owner of the data. pub fn owner(&self) -> PublicKey { - self.address().owner() - } - - /// Return the number of items held in the register - pub fn size(&self) -> u64 { - self.crdt.size() - } - - /// Return a value corresponding to the provided 'hash', if present. - pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - self.crdt.get(hash).ok_or(Error::NoSuchEntry(hash)) - } - - /// Read the last entry, or entries when there are branches, if the register is not empty. - pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.read() - } - - /// Returns the children of an entry, along with their corresponding entry hashes - pub fn children(&self, hash: &EntryHash) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.children(hash) + self.address.owner() } /// Return the permission. @@ -208,37 +197,6 @@ impl Register { &self.permissions } - /// Write an entry to the Register, returning the generated - /// CRDT operation so the caller can sign and broadcast it to other replicas, - /// along with the hash of the entry just written. - pub fn write( - &mut self, - entry: Entry, - children: &BTreeSet, - signer: &SecretKey, - ) -> Result<(EntryHash, RegisterOp)> { - self.check_entry_and_reg_sizes(&entry)?; - // check permissions before writing on the underlying CRDT - self.check_user_permissions(signer.public_key())?; - let (hash, address, crdt_op) = self.crdt.write(entry, children)?; - let op = RegisterOp::new(address, crdt_op, signer); - Ok((hash, op)) - } - - /// Apply a signed data CRDT operation. - pub fn apply_op(&mut self, op: RegisterOp) -> Result<()> { - self.check_entry_and_reg_sizes(&op.crdt_op.value)?; - self.check_register_op(&op)?; - self.crdt.apply_op(op) - } - - /// Merge another Register into this one. - pub fn merge(&mut self, other: &Self) -> Result<()> { - self.verify_is_mergeable(other)?; - self.crdt.merge(other.crdt.clone()); - Ok(()) - } - /// Check if a register op is valid for our current register pub fn check_register_op(&self, op: &RegisterOp) -> Result<()> { if self.permissions.can_anyone_write() { @@ -261,107 +219,6 @@ impl Register { } } - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.crdt.merkle_reg() - } - - /// Log the structure of the MerkleReg within this Register's CRDT as a tree view. - /// This is actually being the `update history` of the register. - pub fn log_update_history(&self) -> String { - let mut output = "MerkleReg Structure:\n".to_string(); - let merkle_reg = self.crdt.merkle_reg(); - output = format!( - "{output}Total entries: {}\n", - merkle_reg.num_nodes() + merkle_reg.num_orphans() - ); - - // Find root nodes (entries with no parents) - let roots: Vec<_> = merkle_reg.read().hashes().into_iter().collect(); - - // Print the tree starting from each root - for (i, root) in roots.iter().enumerate() { - let mut visited = HashSet::new(); - Self::print_tree( - root, - merkle_reg, - &mut output, - "", - i == roots.len() - 1, - &mut visited, - ); - } - - output - } - - // Helper function to recursively print the MerkleReg tree - fn print_tree( - hash: &Hash, - merkle_reg: &MerkleReg, - output: &mut String, - prefix: &str, - is_last: bool, - visited: &mut HashSet, - ) { - let pretty_hash = format!("{}", XorName::from_content(hash)); - if !visited.insert(*hash) { - *output = format!( - "{}{prefix}{}* {pretty_hash} (cycle detected)\n", - output, - if is_last { "└── " } else { "├── " }, - ); - return; - } - - let entry = if let Some(node) = merkle_reg.node(*hash) { - format!("value: {}", XorName::from_content(&node.value)) - } else { - "value: None".to_string() - }; - *output = format!( - "{}{prefix}{}{pretty_hash}: {entry}\n", - output, - if is_last { "└── " } else { "├── " }, - ); - - let children: Vec<_> = merkle_reg.children(*hash).hashes().into_iter().collect(); - let new_prefix = format!("{prefix}{} ", if is_last { " " } else { "│" }); - - for (i, child) in children.iter().enumerate() { - Self::print_tree( - child, - merkle_reg, - output, - &new_prefix, - i == children.len() - 1, - visited, - ); - } - - visited.remove(hash); - } - - // Private helper to check the given Entry's size is within define limit, - // as well as check the Register hasn't already reached the maximum number of entries. - fn check_entry_and_reg_sizes(&self, entry: &Entry) -> Result<()> { - let size = entry.len(); - if size > MAX_REG_ENTRY_SIZE { - return Err(Error::EntryTooBig { - size, - max: MAX_REG_ENTRY_SIZE, - }); - } - - let reg_size = self.crdt.size(); - if reg_size >= MAX_REG_NUM_ENTRIES.into() { - return Err(Error::TooManyEntries(reg_size as usize)); - } - - Ok(()) - } - // Private helper to check if this Register is mergeable with another fn verify_is_mergeable(&self, other: &Self) -> Result<()> { if self.address() != other.address() || self.permissions != other.permissions { @@ -369,30 +226,17 @@ impl Register { } Ok(()) } - - /// Used in tests. - #[cfg(feature = "test-utils")] - pub fn test_new_from_address(address: RegisterAddress) -> Self { - Register { - crdt: RegisterCrdt::new(address), - permissions: Permissions::AnyoneCanWrite, - } - } } #[cfg(test)] mod tests { - use crate::RegisterOp; + use crate::{RegisterCrdt, RegisterOp}; - use super::{ - EntryHash, Error, Permissions, Register, RegisterAddress, Result, MAX_REG_NUM_ENTRIES, - }; + use super::*; use bls::SecretKey; - use eyre::Context; - use proptest::prelude::*; - use rand::{rngs::OsRng, seq::SliceRandom, thread_rng, Rng}; - use std::{collections::BTreeSet, sync::Arc}; + use rand::{thread_rng, Rng}; + use std::collections::BTreeSet; use xor_name::XorName; #[test] @@ -408,111 +252,61 @@ mod tests { assert_eq!(*register.address(), address); } - #[test] - fn register_generate_entry_hash() -> eyre::Result<()> { - let authority_sk = SecretKey::random(); - let authority = authority_sk.public_key(); - - let meta: XorName = xor_name::rand::random(); - - let mut replica1 = Register::new(authority, meta, Permissions::default()); - let mut replica2 = Register::new(authority, meta, Permissions::default()); - - // Different item from same replica's root shall having different entry_hash - let item1 = random_register_entry(); - let item2 = random_register_entry(); - let (entry_hash1_1, _) = replica1.write(item1.clone(), &BTreeSet::new(), &authority_sk)?; - let (entry_hash1_2, _) = replica1.write(item2, &BTreeSet::new(), &authority_sk)?; - assert!(entry_hash1_1 != entry_hash1_2); - - // Same item from different replica's root shall remain same - let (entry_hash2_1, _) = replica2.write(item1, &BTreeSet::new(), &authority_sk)?; - assert_eq!(entry_hash1_1, entry_hash2_1); - - let mut parents = BTreeSet::new(); - // Different item from different replica with same parents shall be different - let _ = parents.insert(entry_hash1_1); - let item3 = random_register_entry(); - let item4 = random_register_entry(); - let (entry_hash1_1_3, _) = replica1.write(item3, &parents, &authority_sk)?; - let (entry_hash2_1_4, _) = replica2.write(item4, &parents, &authority_sk)?; - assert!(entry_hash1_1_3 != entry_hash2_1_4); - - Ok(()) - } - #[test] fn register_permissions() -> eyre::Result<()> { let owner_sk = SecretKey::random(); let owner = owner_sk.public_key(); - let other_user_sk = SecretKey::random(); - let other_user = other_user_sk.public_key(); + let user_sk_1 = SecretKey::random(); + let other_user = user_sk_1.public_key(); + let user_sk_2 = SecretKey::random(); let meta: XorName = xor_name::rand::random(); - let item = random_register_entry(); + let address = RegisterAddress { meta, owner }; // Create replicas where anyone can write to them, including the owner ofc - let mut replica1 = Register::new(owner, meta, Permissions::new_anyone_can_write()); - let mut replica2 = replica1.clone(); - let mut signed_replica3 = replica1.clone().into_signed(&owner_sk)?; - // ...owner and the other user can both write to them - let (_, op1) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - let (_, op2) = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - replica2.apply_op(op1)?; - replica2.apply_op(op2)?; - signed_replica3.verified_merge(&replica2.into_signed(&owner_sk)?)?; + let mut signed_reg_1 = create_reg_replica_with( + meta, + Some(owner_sk.clone()), + Some(Permissions::new_anyone_can_write()), + ); + // ...owner and any other users can both write to them + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_1.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_1)?; + assert!(signed_reg_1.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_2)?; + assert!(signed_reg_1.add_op(op).is_ok()); // Create replicas allowing both the owner and other user to write to them - let mut replica1 = Register::new(owner, meta, Permissions::new_with([other_user])); - let mut replica2 = replica1.clone(); - let mut signed_replica3 = replica1.clone().into_signed(&owner_sk)?; - // ...owner and the other user can both write to them - let (_, op1) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - let (_, op2) = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - replica2.apply_op(op1)?; - replica2.apply_op(op2)?; - signed_replica3.verified_merge(&replica2.into_signed(&owner_sk)?)?; + let mut signed_reg_2 = create_reg_replica_with( + meta, + Some(owner_sk.clone()), + Some(Permissions::new_with([other_user])), + ); + // ...owner and the other user can both write to them, others shall fail + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_2.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_1)?; + assert!(signed_reg_2.add_op(op).is_ok()); + let op = generate_random_op(address, &user_sk_2)?; + assert!(signed_reg_2.add_op(op).is_err()); // Create replicas with the owner as the only allowed to write - let mut replica1 = Register::new(owner, meta, Permissions::default()); - let mut replica2 = replica1.clone(); + let mut signed_reg_3 = create_reg_replica_with(meta, Some(owner_sk.clone()), None); // ...owner can write to them - let (_, op) = replica1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - replica2.apply_op(op.clone())?; + let op = generate_random_op(address, &owner_sk)?; + assert!(signed_reg_3.add_op(op).is_ok()); // ...whilst other user cannot write to them - let res = replica1.write(item.clone(), &BTreeSet::new(), &other_user_sk); + let op = generate_random_op(address, &user_sk_1)?; + let res = signed_reg_3.add_op(op); assert!( matches!(&res, Err(err) if err == &Error::AccessDenied(other_user)), "Unexpected result: {res:?}" ); - let (_, address, crdt_op) = replica1.crdt.write(item.clone(), &BTreeSet::new())?; - let op_signed_by_other_user = RegisterOp::new(address, crdt_op, &other_user_sk); - let res = replica2.apply_op(op_signed_by_other_user); - assert!( - matches!(&res, Err(err) if err == &Error::AccessDenied(other_user)), - "Unexpected result: {res:?}" - ); - - // Create Registers with different permissions to write - let mut reg1 = Register::new(owner, meta, Permissions::default()); - let mut reg2 = Register::new(owner, meta, Permissions::new_with([other_user])); - // ...owner can write to both of them, the other user only to one of them - reg1.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - reg2.write(item.clone(), &BTreeSet::new(), &owner_sk)?; - reg2.write(item.clone(), &BTreeSet::new(), &other_user_sk)?; - // ...but they cannot be merged due to different permissions sets - let res1 = reg1.merge(®2); - let res2 = reg2.merge(®1); - assert!( - matches!(&res1, Err(err) if err == &Error::DifferentBaseRegister), - "Unexpected result: {res1:?}" - ); - assert_eq!(res1, res2); - let mut signed_reg1 = reg1.into_signed(&owner_sk)?; - let mut signed_reg2 = reg2.into_signed(&owner_sk)?; - let res1 = signed_reg1.verified_merge(&signed_reg2); - let res2 = signed_reg2.verified_merge(&signed_reg1); + // Registers with different permission can not be merged + let res1 = signed_reg_1.merge(&signed_reg_2); + let res2 = signed_reg_2.merge(&signed_reg_1); assert!( matches!(&res1, Err(err) if err == &Error::DifferentBaseRegister), "Unexpected result: {res1:?}" @@ -522,85 +316,6 @@ mod tests { Ok(()) } - #[test] - fn register_concurrent_write_ops() -> eyre::Result<()> { - let authority_sk1 = SecretKey::random(); - let authority1 = authority_sk1.public_key(); - let authority_sk2 = SecretKey::random(); - let authority2 = authority_sk2.public_key(); - - let meta: XorName = xor_name::rand::random(); - - // We'll have 'authority1' as the owner in both replicas and - // grant permissions for Write to 'authority2' in both replicas too - let perms = Permissions::new_with([authority1, authority2]); - - // Instantiate the same Register on two replicas - let mut replica1 = Register::new(authority_sk1.public_key(), meta, perms); - let mut replica2 = replica1.clone(); - - // And let's write an item to replica1 with autority1 - let item1 = random_register_entry(); - let (_, op1) = replica1.write(item1, &BTreeSet::new(), &authority_sk1)?; - - // Let's assert current state on both replicas - assert_eq!(replica1.size(), 1); - assert_eq!(replica2.size(), 0); - - // Concurrently write another item with authority2 on replica2 - let item2 = random_register_entry(); - let (_, op2) = replica2.write(item2, &BTreeSet::new(), &authority_sk2)?; - - // Item should be writed on replica2 - assert_eq!(replica2.size(), 1); - - // Write operations are now broadcasted and applied to both replicas - replica1.apply_op(op2)?; - replica2.apply_op(op1)?; - - // Let's assert data convergence on both replicas - verify_data_convergence(&[replica1, replica2], 2)?; - - Ok(()) - } - - #[test] - fn register_get_by_hash() -> eyre::Result<()> { - let (sk, register) = &mut create_reg_replicas(1)[0]; - - let entry1 = random_register_entry(); - let entry2 = random_register_entry(); - let entry3 = random_register_entry(); - - let (entry1_hash, _) = register.write(entry1.clone(), &BTreeSet::new(), sk)?; - - // this creates a fork since entry1 is not set as child of entry2 - let (entry2_hash, _) = register.write(entry2.clone(), &BTreeSet::new(), sk)?; - - // we'll write entry2 but having the entry1 and entry2 as children, - // i.e. solving the fork created by them - let children = [entry1_hash, entry2_hash].into_iter().collect(); - - let (entry3_hash, _) = register.write(entry3.clone(), &children, sk)?; - - assert_eq!(register.size(), 3); - - let first_entry = register.get(entry1_hash)?; - assert_eq!(first_entry, &entry1); - - let second_entry = register.get(entry2_hash)?; - assert_eq!(second_entry, &entry2); - - let third_entry = register.get(entry3_hash)?; - assert_eq!(third_entry, &entry3); - - let non_existing_hash = EntryHash::default(); - let entry_not_found = register.get(non_existing_hash); - assert_eq!(entry_not_found, Err(Error::NoSuchEntry(non_existing_hash))); - - Ok(()) - } - #[test] fn register_query_public_perms() -> eyre::Result<()> { let meta = xor_name::rand::random(); @@ -627,21 +342,27 @@ mod tests { // check register 1 is public assert_eq!(replica1.owner(), authority_pk1); - assert_eq!(replica1.check_user_permissions(owner1), Ok(())); - assert_eq!(replica1.check_user_permissions(owner2), Ok(())); - assert_eq!(replica1.check_user_permissions(random_user), Ok(())); - assert_eq!(replica1.check_user_permissions(random_user2), Ok(())); + assert_eq!(replica1.register.check_user_permissions(owner1), Ok(())); + assert_eq!(replica1.register.check_user_permissions(owner2), Ok(())); + assert_eq!( + replica1.register.check_user_permissions(random_user), + Ok(()) + ); + assert_eq!( + replica1.register.check_user_permissions(random_user2), + Ok(()) + ); // check register 2 has only owner1 and owner2 write allowed assert_eq!(replica2.owner(), authority_pk2); - assert_eq!(replica2.check_user_permissions(owner1), Ok(())); - assert_eq!(replica2.check_user_permissions(owner2), Ok(())); + assert_eq!(replica2.register.check_user_permissions(owner1), Ok(())); + assert_eq!(replica2.register.check_user_permissions(owner2), Ok(())); assert_eq!( - replica2.check_user_permissions(random_user), + replica2.register.check_user_permissions(random_user), Err(Error::AccessDenied(random_user)) ); assert_eq!( - replica2.check_user_permissions(random_user2), + replica2.register.check_user_permissions(random_user2), Err(Error::AccessDenied(random_user2)) ); @@ -654,25 +375,20 @@ mod tests { // one replica will allow write ops to anyone let authority_sk1 = SecretKey::random(); + let owner = authority_sk1.public_key(); let perms1 = Permissions::new_anyone_can_write(); + let address = RegisterAddress { meta, owner }; - let mut replica = create_reg_replica_with(meta, Some(authority_sk1), Some(perms1)); + let mut replica = create_reg_replica_with(meta, Some(authority_sk1.clone()), Some(perms1)); for _ in 0..MAX_REG_NUM_ENTRIES { - let (_hash, _op) = replica - .write( - random_register_entry(), - &BTreeSet::new(), - &SecretKey::random(), - ) - .context("Failed to write register entry")?; + let op = generate_random_op(address, &authority_sk1)?; + assert!(replica.add_op(op).is_ok()); } - let excess_entry = replica.write( - random_register_entry(), - &BTreeSet::new(), - &SecretKey::random(), - ); + let op = generate_random_op(address, &authority_sk1)?; + + let excess_entry = replica.add_op(op); match excess_entry { Err(Error::TooManyEntries(size)) => { @@ -693,14 +409,18 @@ mod tests { meta: XorName, perms: Option, count: usize, - ) -> Vec<(SecretKey, Register)> { - let replicas: Vec<(SecretKey, Register)> = (0..count) + ) -> Vec<(SecretKey, SignedRegister)> { + let replicas: Vec<(SecretKey, SignedRegister)> = (0..count) .map(|_| { let authority_sk = authority_sk.clone().unwrap_or_else(SecretKey::random); let authority = authority_sk.public_key(); let perms = perms.clone().unwrap_or_default(); let register = Register::new(authority, meta, perms); - (authority_sk, register) + + let signature = authority_sk.sign(register.bytes().unwrap()); + let signed_reg = SignedRegister::new(register, signature, Default::default()); + + (authority_sk, signed_reg) }) .collect(); @@ -708,424 +428,24 @@ mod tests { replicas } - fn create_reg_replicas(count: usize) -> Vec<(SecretKey, Register)> { - let meta = xor_name::rand::random(); - - gen_reg_replicas(None, meta, None, count) - } - fn create_reg_replica_with( meta: XorName, authority_sk: Option, perms: Option, - ) -> Register { + ) -> SignedRegister { let replicas = gen_reg_replicas(authority_sk, meta, perms, 1); replicas[0].1.clone() } - // verify data convergence on a set of replicas and with the expected length - fn verify_data_convergence(replicas: &[Register], expected_size: u64) -> Result<()> { - // verify all replicas have the same and expected size - for r in replicas { - assert_eq!(r.size(), expected_size); - } - - // now verify that the items are the same in all replicas - let r0 = &replicas[0]; - for r in replicas { - assert_eq!(r.crdt, r0.crdt); - } - - Ok(()) - } - - // Generate a vec of Register replicas of some length, with corresponding vec of keypairs for signing, and the overall owner of the register - fn generate_replicas( - max_quantity: usize, - ) -> impl Strategy, Arc)>> { - let xorname = xor_name::rand::random(); - - let owner_sk = Arc::new(SecretKey::random()); - let owner = owner_sk.public_key(); - let perms = Permissions::new_anyone_can_write(); - - (1..max_quantity + 1).prop_map(move |quantity| { - let mut replicas = Vec::with_capacity(quantity); - for _ in 0..quantity { - let replica = Register::new(owner, xorname, perms.clone()); - - replicas.push(replica); - } - - Ok((replicas, Arc::clone(&owner_sk))) - }) - } - - // Generate a Register entry - fn generate_reg_entry() -> impl Strategy> { - "\\PC*".prop_map(|s| s.into_bytes()) - } - - // Generate a vec of Register entries - fn generate_dataset(max_quantity: usize) -> impl Strategy>> { - prop::collection::vec(generate_reg_entry(), 1..max_quantity + 1) - } - - // Generates a vec of Register entries each with a value suggesting - // the delivery chance of the op that gets created with the entry - fn generate_dataset_and_probability( - max_quantity: usize, - ) -> impl Strategy, u8)>> { - prop::collection::vec((generate_reg_entry(), any::()), 1..max_quantity + 1) - } - - proptest! { - #[test] - fn proptest_reg_doesnt_crash_with_random_data( - _data in generate_reg_entry() - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - // Write an item on replicas - let (_, op) = replica1.write(random_register_entry(), &BTreeSet::new(), &owner_sk)?; - replica2.apply_op(op)?; - - verify_data_convergence(&[replica1, replica2], 1)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data( - dataset in generate_dataset(1000) - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut children = BTreeSet::new(); - for _data in dataset { - // Write an item on replica1 - let (hash, op) = replica1.write(random_register_entry(), &children, &owner_sk)?; - // now apply that op to replica 2 - replica2.apply_op(op)?; - children = vec![hash].into_iter().collect(); - } - - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data_random_entry_children( - dataset in generate_dataset(1000) - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut list_of_hashes = Vec::new(); - let mut rng = thread_rng(); - for _data in dataset { - // choose a random set of children - let num_of_children: usize = rng.gen(); - let children = list_of_hashes.choose_multiple(&mut OsRng, num_of_children).cloned().collect(); - - // Write an item on replica1 using the randomly generated set of children - let (hash, op) = replica1.write(random_register_entry(), &children, &owner_sk)?; - - // now apply that op to replica 2 - replica2.apply_op(op)?; - list_of_hashes.push(hash); - } - - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_reg_converge_with_many_random_data_across_arbitrary_number_of_replicas( - dataset in generate_dataset(500), - res in generate_replicas(50) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // insert our data at replicas - let mut children = BTreeSet::new(); - for _data in dataset { - // first generate an op from one replica... - let (hash, op)= replicas[0].write(random_register_entry(), &children, &owner_sk)?; - - // then apply this to all replicas - for replica in &mut replicas { - replica.apply_op(op.clone())?; - } - children = vec![hash].into_iter().collect(); - } - - verify_data_convergence(&replicas, dataset_length)?; - - } - - #[test] - fn proptest_converge_with_shuffled_op_set_across_arbitrary_number_of_replicas( - dataset in generate_dataset(100), - res in generate_replicas(500) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set from one replica - let mut ops = vec![]; - - let mut children = BTreeSet::new(); - for _data in dataset { - let (hash, op) = replicas[0].write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_from_many_replicas_across_arbitrary_number_of_replicas( - dataset in generate_dataset(1000), - res in generate_replicas(7) - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for _data in dataset { - if let Some(replica) = replicas.choose_mut(&mut OsRng) - { - let (hash, op) = replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - } - - let opslen = ops.len() as u64; - prop_assert_eq!(dataset_length, opslen); - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_dropped_data_can_be_reapplied_and_we_converge( - dataset in generate_dataset_and_probability(1000), - ) { - // Instantiate the same Register on two replicas - let meta = xor_name::rand::random(); - let owner_sk = SecretKey::random(); - let perms = Default::default(); - - // Instantiate the same Register on two replicas - let mut replicas = gen_reg_replicas( - Some(owner_sk.clone()), - meta, - Some(perms), - 2); - let (_, mut replica1) = replicas.remove(0); - let (_, mut replica2) = replicas.remove(0); - - let dataset_length = dataset.len() as u64; - - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for (_data, delivery_chance) in dataset { - let (hash, op)= replica1.write(random_register_entry(), &children, &owner_sk)?; - - ops.push((op, delivery_chance)); - children = vec![hash].into_iter().collect(); - } - - for (op, delivery_chance) in ops.clone() { - if delivery_chance < u8::MAX / 3 { - replica2.apply_op(op)?; - } - } - - // here we statistically should have dropped some messages - if dataset_length > 50 { - assert_ne!(replica2.size(), replica1.size()); - } - - // reapply all ops - for (op, _) in ops { - replica2.apply_op(op)?; - } - - // now we converge - verify_data_convergence(&[replica1, replica2], dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_from_many_while_dropping_some_at_random( - dataset in generate_dataset_and_probability(1000), - res in generate_replicas(7), - ) { - let (mut replicas, owner_sk) = res?; - let dataset_length = dataset.len() as u64; - - // generate an ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for (_data, delivery_chance) in dataset { - // a random index within the replicas range - let index: usize = OsRng.gen_range(0..replicas.len()); - let replica = &mut replicas[index]; - - let (hash, op)=replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push((op, delivery_chance)); - children = vec![hash].into_iter().collect(); - } - - let opslen = ops.len() as u64; - prop_assert_eq!(dataset_length, opslen); - - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for (op, delivery_chance) in ops.clone() { - if delivery_chance > u8::MAX / 3 { - replica.apply_op(op)?; - } - } - - // reapply all ops, simulating lazy messaging filling in the gaps - for (op, _) in ops { - replica.apply_op(op)?; - } - } - - verify_data_convergence(&replicas, dataset_length)?; - } - - #[test] - fn proptest_converge_with_shuffled_ops_including_bad_ops_which_error_and_are_not_applied( - dataset in generate_dataset(10), - bogus_dataset in generate_dataset(10), // should be same number as dataset - gen_replicas_result in generate_replicas(10), - - ) { - let (mut replicas, owner_sk) = gen_replicas_result?; - let dataset_length = dataset.len(); - let bogus_dataset_length = bogus_dataset.len(); - let number_replicas = replicas.len(); - - // generate the real ops set using random replica for each data - let mut ops = vec![]; - let mut children = BTreeSet::new(); - for _data in dataset { - if let Some(replica) = replicas.choose_mut(&mut OsRng) - { - let (hash, op)=replica.write(random_register_entry(), &children, &owner_sk)?; - ops.push(op); - children = vec![hash].into_iter().collect(); - } - } - - // set up a replica that has nothing to do with the rest, random xor... different owner... - let xorname = xor_name::rand::random(); - let random_owner_sk = SecretKey::random(); - let mut bogus_replica = Register::new(random_owner_sk.public_key(), xorname, Permissions::default()); - - // add bogus ops from bogus replica + bogus data - let mut children = BTreeSet::new(); - for _data in bogus_dataset { - let (hash, bogus_op) = bogus_replica.write(random_register_entry(), &children, &random_owner_sk)?; - bogus_replica.apply_op(bogus_op.clone())?; - ops.push(bogus_op); - children = vec![hash].into_iter().collect(); - } - - let opslen = ops.len(); - prop_assert_eq!(dataset_length + bogus_dataset_length, opslen); - - let mut err_count = vec![]; - // now we randomly shuffle ops and apply at each replica - for replica in &mut replicas { - let mut ops = ops.clone(); - ops.shuffle(&mut OsRng); - - for op in ops { - match replica.apply_op(op) { - Ok(_) => {}, - // record all errors to check this matches bogus data - Err(error) => {err_count.push(error)}, - } - } - } - - // check we get an error per bogus datum per replica - assert_eq!(err_count.len(), bogus_dataset_length * number_replicas); - - verify_data_convergence(&replicas, dataset_length as u64)?; - } - } - fn random_register_entry() -> Vec { let random_bytes = thread_rng().gen::<[u8; 32]>(); random_bytes.to_vec() } + + fn generate_random_op(address: RegisterAddress, writer_sk: &SecretKey) -> Result { + let mut crdt_reg = RegisterCrdt::new(address); + let item = random_register_entry(); + let (_hash, addr, crdt_op) = crdt_reg.write(item, &BTreeSet::new())?; + Ok(RegisterOp::new(addr, crdt_op, writer_sk)) + } } diff --git a/sn_registers/src/register_op.rs b/sn_registers/src/register_op.rs index 936529cdf1..455d26b43d 100644 --- a/sn_registers/src/register_op.rs +++ b/sn_registers/src/register_op.rs @@ -39,7 +39,7 @@ impl std::hash::Hash for RegisterOp { impl RegisterOp { /// Create a new RegisterOp - pub(crate) fn new( + pub fn new( address: RegisterAddress, crdt_op: MerkleDagEntry, signer: &SecretKey, From d2284d37bb94364750b303171410821974621f45 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 11 Oct 2024 11:01:41 +0900 Subject: [PATCH 149/255] feat(networking): remove NodeIssue::Connection Temmporary issues might cause increased shunning here. In reality any connection issues should surface through replication and data verificaiton tests (or they are not testing sufficiently). So this removes a potential source of failure of nodes/networks due to temporary issues --- sn_networking/src/cmd.rs | 2 -- sn_networking/src/event/swarm.rs | 9 ++------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index ef7eaaa017..b0eda19190 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -44,8 +44,6 @@ const REPLICATION_TIMEOUT: Duration = Duration::from_secs(45); #[derive(Debug, Eq, PartialEq)] pub enum NodeIssue { - /// Connection issues observed - ConnectionIssue, /// Data Replication failed ReplicationFailure, /// Close nodes have reported this peer as bad diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index ba35f2bf18..028c791712 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::LocalSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, - relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, + event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, + target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; #[cfg(feature = "local")] use libp2p::mdns; @@ -494,11 +494,6 @@ impl SwarmDriver { .remove_peer(&failed_peer_id) { self.update_on_peer_removal(*dead_peer.node.key.preimage()); - - self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { - peer_id: failed_peer_id, - issue: crate::NodeIssue::ConnectionIssue, - })?; } } } From 51ca6287a8ca8cf312b8b5306ece56615c23b6e1 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 11 Oct 2024 17:19:09 +0900 Subject: [PATCH 150/255] feat: api rework for fs less wasm compatibility --- Cargo.lock | 1 + autonomi/Cargo.toml | 8 +- autonomi/src/client/address.rs | 8 +- autonomi/src/client/archive.rs | 66 +++++ autonomi/src/client/data.rs | 392 ++++---------------------- autonomi/src/client/error.rs | 87 ++++++ autonomi/src/client/files.rs | 190 +++++-------- autonomi/src/client/mod.rs | 8 +- autonomi/src/client/registers.rs | 11 +- autonomi/src/client/utils.rs | 272 ++++++++++++++++++ autonomi/src/client/vault.rs | 16 +- autonomi/tests/file.rs | 76 +++-- autonomi/tests/put.rs | 4 +- autonomi/tests/wallet.rs | 18 +- autonomi_cli/Cargo.toml | 2 +- autonomi_cli/src/access/network.rs | 8 +- autonomi_cli/src/actions/download.rs | 14 +- autonomi_cli/src/commands/file.rs | 8 +- evmlib/src/utils.rs | 2 +- sn_evm/src/amount.rs | 6 + sn_evm/src/evm.rs | 12 +- sn_evm/src/lib.rs | 2 + sn_networking/src/event/mod.rs | 10 +- sn_node/src/bin/safenode/main.rs | 5 +- sn_node/tests/data_with_churn.rs | 13 +- sn_node/tests/verify_data_location.rs | 2 +- test_utils/src/evm.rs | 5 +- 27 files changed, 701 insertions(+), 545 deletions(-) create mode 100644 autonomi/src/client/archive.rs create mode 100644 autonomi/src/client/error.rs create mode 100644 autonomi/src/client/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 5abb5c540a..67a4c6df2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1081,6 +1081,7 @@ dependencies = [ "rmp-serde", "self_encryption", "serde", + "sha2 0.10.8", "sn_bls_ckd", "sn_curv", "sn_evm", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 4f03191d31..aa5e463ac6 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -14,10 +14,10 @@ default = ["data"] full = ["data", "registers", "vault"] data = [] vault = ["data"] -files = ["data"] -fs = ["tokio/fs", "files"] +fs = ["tokio/fs", "data"] local = ["sn_networking/local", "test_utils/local"] -registers = [] +registers = ["data"] +loud = [] [dependencies] bip39 = "2.0.0" @@ -28,7 +28,6 @@ curv = { version = "0.10.1", package = "sn_curv", default-features = false, feat ] } eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } const-hex = "1.12.0" -evmlib = { path = "../evmlib", version = "0.1" } hex = "~0.4.3" libp2p = "0.54.1" rand = "0.8.5" @@ -49,6 +48,7 @@ futures = "0.3.30" [dev-dependencies] eyre = "0.6.5" +sha2 = "0.10.6" sn_logging = { path = "../sn_logging", version = "0.2.33" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } diff --git a/autonomi/src/client/address.rs b/autonomi/src/client/address.rs index ef7fab938e..f314952f9c 100644 --- a/autonomi/src/client/address.rs +++ b/autonomi/src/client/address.rs @@ -16,7 +16,7 @@ pub enum DataError { InvalidHexString, } -pub fn str_to_xorname(addr: &str) -> Result { +pub fn str_to_addr(addr: &str) -> Result { let bytes = hex::decode(addr).map_err(|err| { error!("Failed to decode hex string: {err:?}"); DataError::InvalidHexString @@ -28,7 +28,7 @@ pub fn str_to_xorname(addr: &str) -> Result { Ok(xor) } -pub fn xorname_to_str(addr: XorName) -> String { +pub fn addr_to_str(addr: XorName) -> String { hex::encode(addr) } @@ -41,8 +41,8 @@ mod test { fn test_xorname_to_str() { let rng = &mut rand::thread_rng(); let xorname = XorName::random(rng); - let str = xorname_to_str(xorname); - let xorname2 = str_to_xorname(&str).expect("Failed to convert back to xorname"); + let str = addr_to_str(xorname); + let xorname2 = str_to_addr(&str).expect("Failed to convert back to xorname"); assert_eq!(xorname, xorname2); } } diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs new file mode 100644 index 0000000000..1a9998e792 --- /dev/null +++ b/autonomi/src/client/archive.rs @@ -0,0 +1,66 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{collections::HashMap, path::PathBuf}; + +use super::{ + data::DataAddr, + error::{GetError, PutError}, + Client, +}; +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; +use xor_name::XorName; + +/// The address of an archive on the network. Points to an [`Archive`]. +pub type ArchiveAddr = XorName; + +/// An archive of files that containing file paths, their metadata and the files data addresses +/// Using archives is useful for uploading entire directories to the network, only needing to keep track of a single address. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Archive { + pub map: HashMap, +} + +impl Archive { + /// Deserialize from bytes. + pub fn from_bytes(data: Bytes) -> Result { + let root: Archive = rmp_serde::from_slice(&data[..])?; + + Ok(root) + } + + /// Serialize to bytes. + pub fn into_bytes(&self) -> Result { + let root_serialized = rmp_serde::to_vec(&self)?; + let root_serialized = Bytes::from(root_serialized); + + Ok(root_serialized) + } +} + +impl Client { + /// Fetch an archive from the network + pub async fn archive_get(&self, addr: ArchiveAddr) -> Result { + let data = self.data_get(addr).await?; + Ok(Archive::from_bytes(data)?) + } + + /// Upload an archive to the network + pub async fn archive_put( + &self, + archive: Archive, + wallet: &EvmWallet, + ) -> Result { + let bytes = archive + .into_bytes() + .map_err(|e| PutError::Serialization(format!("Failed to serialize archive: {e:?}")))?; + self.data_put(bytes, wallet).await + } +} diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 6e63b80515..40bb9e86c2 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -6,86 +6,35 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::self_encryption::DataMapLevel; use bytes::Bytes; -use evmlib::wallet; -use libp2p::kad::{Quorum, Record}; +use libp2p::kad::Quorum; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; use std::collections::HashSet; -use tokio::task::JoinError; use xor_name::XorName; +use super::error::{GetError, PayError, PutError}; use crate::{self_encryption::encrypt, Client}; -use evmlib::common::{QuoteHash, QuotePayment, TxHash}; -use evmlib::wallet::Wallet; -use libp2p::futures; -use rand::{thread_rng, Rng}; -use sn_evm::{Amount, AttoTokens, ProofOfPayment}; -use sn_networking::PutRecordCfg; -use sn_networking::{GetRecordCfg, Network, NetworkError, PayeeQuote, VerificationKind}; +use sn_evm::EvmWallet; +use sn_evm::{Amount, AttoTokens}; +use sn_networking::{GetRecordCfg, NetworkError}; use sn_protocol::{ - messages::ChunkProof, - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, - RecordKind, RetryStrategy, - }, + storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}, NetworkAddress, }; -use std::collections::{BTreeMap, HashMap}; -use std::num::NonZero; -/// Errors that can occur during the put operation. -#[derive(Debug, thiserror::Error)] -pub enum PutError { - #[error("Failed to self-encrypt data.")] - SelfEncryption(#[from] crate::self_encryption::Error), - #[error("Error serializing data.")] - Serialization, - #[error("Error getting Vault XorName data.")] - VaultXorName, - #[error("A network error occurred.")] - Network(#[from] NetworkError), - #[error("Error occurred during payment.")] - PayError(#[from] PayError), - #[error("A wallet error occurred.")] - Wallet(#[from] sn_evm::EvmError), -} - -/// Errors that can occur during the pay operation. -#[derive(Debug, thiserror::Error)] -pub enum PayError { - #[error("Could not get store quote for: {0:?} after several retries")] - CouldNotGetStoreQuote(XorName), - #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(NetworkError), - #[error("Could not simultaneously fetch store costs: {0:?}")] - JoinError(JoinError), - #[error("Wallet error: {0:?}")] - EvmWalletError(#[from] wallet::Error), - #[error("Failed to self-encrypt data.")] - SelfEncryption(#[from] crate::self_encryption::Error), -} +/// Raw Data Address +pub type DataAddr = XorName; +/// Raw Chunk Address +pub type ChunkAddr = XorName; -/// Errors that can occur during the get operation. -#[derive(Debug, thiserror::Error)] -pub enum GetError { - #[error("Could not deserialize data map.")] - InvalidDataMap(rmp_serde::decode::Error), - #[error("Failed to decrypt data.")] - Decryption(crate::self_encryption::Error), - #[error("General networking error: {0:?}")] - Network(#[from] NetworkError), - #[error("General protocol error: {0:?}")] - Protocol(#[from] sn_protocol::Error), -} +/// The address of a file on the network. Points to a [`DataMap`]. +pub type FileAddr = XorName; impl Client { - /// Fetch a piece of self-encrypted data from the network, by its data map - /// XOR address. - pub async fn get(&self, data_map_addr: XorName) -> Result { - info!("Fetching file from data_map: {data_map_addr:?}"); - let data_map_chunk = self.fetch_chunk(data_map_addr).await?; + /// Fetch a blob of data from the network + pub async fn data_get(&self, addr: DataAddr) -> Result { + info!("Fetching data from Data Address: {addr:?}"); + let data_map_chunk = self.chunk_get(addr).await?; let data = self .fetch_from_data_map_chunk(data_map_chunk.value()) .await?; @@ -93,89 +42,9 @@ impl Client { Ok(data) } - /// Get a raw chunk from the network. - pub async fn fetch_chunk(&self, addr: XorName) -> Result { - info!("Getting chunk: {addr:?}"); - - let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: HashSet::new(), - is_register: false, - }; - - let record = self - .network - .get_record_from_network(key, &get_cfg) - .await - .inspect_err(|err| error!("Error fetching chunk: {err:?}"))?; - let header = RecordHeader::from_record(&record)?; - - if let RecordKind::Chunk = header.kind { - let chunk: Chunk = try_deserialize_record(&record)?; - Ok(chunk) - } else { - Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) - } - } - - /// Fetch and decrypt all chunks in the data map. - async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { - let mut encrypted_chunks = vec![]; - - for info in data_map.infos() { - let chunk = self - .fetch_chunk(info.dst_hash) - .await - .inspect_err(|err| error!("Error fetching chunk {:?}: {err:?}", info.dst_hash))?; - let chunk = EncryptedChunk { - index: info.index, - content: chunk.value, - }; - encrypted_chunks.push(chunk); - } - - let data = decrypt_full_set(data_map, &encrypted_chunks).map_err(|e| { - error!("Error decrypting encrypted_chunks: {e:?}"); - GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)) - })?; - - Ok(data) - } - - /// Unpack a wrapped data map and fetch all bytes using self-encryption. - async fn fetch_from_data_map_chunk(&self, data_map_bytes: &Bytes) -> Result { - let mut data_map_level: DataMapLevel = rmp_serde::from_slice(data_map_bytes) - .map_err(GetError::InvalidDataMap) - .inspect_err(|err| error!("Error deserializing data map: {err:?}"))?; - - loop { - let data_map = match &data_map_level { - DataMapLevel::First(map) => map, - DataMapLevel::Additional(map) => map, - }; - - let data = self.fetch_from_data_map(data_map).await?; - - match &data_map_level { - DataMapLevel::First(_) => break Ok(data), - DataMapLevel::Additional(_) => { - data_map_level = rmp_serde::from_slice(&data).map_err(|err| { - error!("Error deserializing data map: {err:?}"); - GetError::InvalidDataMap(err) - })?; - continue; - } - }; - } - } - - /// Upload a piece of data to the network. This data will be self-encrypted, - /// and the data map XOR address will be returned. - pub async fn put(&self, data: Bytes, wallet: &Wallet) -> Result { + /// Upload a piece of data to the network. This data will be self-encrypted. + /// Returns the Data Address at which the data was stored. + pub async fn data_put(&self, data: Bytes, wallet: &EvmWallet) -> Result { let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; info!( @@ -202,7 +71,7 @@ impl Client { // Upload data map if let Some(proof) = payment_proofs.get(&map_xor_name) { debug!("Uploading data map chunk: {map_xor_name:?}"); - self.upload_chunk(data_map_chunk.clone(), proof.clone()) + self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) .await .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; } @@ -212,7 +81,7 @@ impl Client { for chunk in chunks { if let Some(proof) = payment_proofs.get(chunk.name()) { let address = *chunk.address(); - self.upload_chunk(chunk, proof.clone()) + self.chunk_upload_with_payment(chunk, proof.clone()) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; } @@ -221,9 +90,37 @@ impl Client { Ok(map_xor_name) } - /// Get the cost of storing a piece of data. - #[cfg_attr(not(feature = "fs"), allow(dead_code, reason = "used only with `fs`"))] - pub async fn cost(&self, data: Bytes) -> Result { + /// Get a raw chunk from the network. + pub async fn chunk_get(&self, addr: ChunkAddr) -> Result { + info!("Getting chunk: {addr:?}"); + + let key = NetworkAddress::from_chunk_address(ChunkAddress::new(addr)).to_record_key(); + + let get_cfg = GetRecordCfg { + get_quorum: Quorum::One, + retry_strategy: None, + target_record: None, + expected_holders: HashSet::new(), + is_register: false, + }; + + let record = self + .network + .get_record_from_network(key, &get_cfg) + .await + .inspect_err(|err| error!("Error fetching chunk: {err:?}"))?; + let header = RecordHeader::from_record(&record)?; + + if let RecordKind::Chunk = header.kind { + let chunk: Chunk = try_deserialize_record(&record)?; + Ok(chunk) + } else { + Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) + } + } + + /// Get the estimated cost of storing a piece of data. + pub async fn data_cost(&self, data: Bytes) -> Result { let now = std::time::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; @@ -253,189 +150,4 @@ impl Client { ); Ok(total_cost) } - - /// Pay for the chunks and get the proof of payment. - pub(crate) async fn pay( - &self, - content_addrs: impl Iterator, - wallet: &Wallet, - ) -> Result<(HashMap, Vec), PayError> { - let cost_map = self.get_store_quotes(content_addrs).await?; - let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); - - // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. - // TODO: retry when it fails? - // Execute chunk payments - let payments = wallet - .pay_for_quotes(quote_payments) - .await - .map_err(|err| PayError::from(err.0))?; - - let proofs = construct_proofs(&cost_map, &payments); - - trace!( - "Chunk payments of {} chunks completed. {} chunks were free / already paid for", - proofs.len(), - skipped_chunks.len() - ); - - Ok((proofs, skipped_chunks)) - } - - pub(crate) async fn get_store_quotes( - &self, - content_addrs: impl Iterator, - ) -> Result, PayError> { - let futures: Vec<_> = content_addrs - .into_iter() - .map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr)) - .collect(); - - let quotes = futures::future::try_join_all(futures).await?; - - Ok(quotes.into_iter().collect::>()) - } - - /// Directly writes Chunks to the network in the form of immutable self encrypted chunks. - async fn upload_chunk( - &self, - chunk: Chunk, - proof_of_payment: ProofOfPayment, - ) -> Result<(), PutError> { - self.store_chunk(chunk, proof_of_payment).await?; - Ok(()) - } - - /// Actually store a chunk to a peer. - async fn store_chunk(&self, chunk: Chunk, payment: ProofOfPayment) -> Result<(), PutError> { - let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); - - debug!("Storing chunk: {chunk:?} to {:?}", storing_node); - - let key = chunk.network_address().to_record_key(); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind) - .map_err(|_| PutError::Serialization)? - .to_vec(), - publisher: None, - expires: None, - }; - - let verification = { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), - retry_strategy: Some(RetryStrategy::Quick), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }; - - let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) - .map_err(|_| PutError::Serialization)? - .to_vec(); - let random_nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: Some(vec![storing_node]), - verification, - }; - Ok(self.network.put_record(record, &put_cfg).await?) - } -} - -/// Fetch a store quote for a content address with a retry strategy. -async fn fetch_store_quote_with_retries( - network: &Network, - content_addr: XorName, -) -> Result<(XorName, PayeeQuote), PayError> { - let mut retries = 0; - - loop { - match fetch_store_quote(network, content_addr).await { - Ok(quote) => { - break Ok((content_addr, quote)); - } - Err(err) if retries < 2 => { - retries += 1; - error!("Error while fetching store quote: {err:?}, retry #{retries}"); - } - Err(err) => { - error!( - "Error while fetching store quote: {err:?}, stopping after {retries} retries" - ); - break Err(PayError::CouldNotGetStoreQuote(content_addr)); - } - } - } -} - -/// Fetch a store quote for a content address. -async fn fetch_store_quote( - network: &Network, - content_addr: XorName, -) -> Result { - network - .get_store_costs_from_network( - NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), - vec![], - ) - .await -} - -/// Form to be executed payments and already executed payments from a cost map. -fn extract_quote_payments( - cost_map: &HashMap, -) -> (Vec, Vec) { - let mut to_be_paid = vec![]; - let mut already_paid = vec![]; - - for (chunk_address, quote) in cost_map.iter() { - if quote.2.cost.is_zero() { - already_paid.push(*chunk_address); - } else { - to_be_paid.push(( - quote.2.hash(), - quote.2.rewards_address, - quote.2.cost.as_atto(), - )); - } - } - - (to_be_paid, already_paid) -} - -/// Construct payment proofs from cost map and payments map. -fn construct_proofs( - cost_map: &HashMap, - payments: &BTreeMap, -) -> HashMap { - cost_map - .iter() - .filter_map(|(xor_name, (_, _, quote))| { - payments.get("e.hash()).map(|tx_hash| { - ( - *xor_name, - ProofOfPayment { - quote: quote.clone(), - tx_hash: *tx_hash, - }, - ) - }) - }) - .collect() } diff --git a/autonomi/src/client/error.rs b/autonomi/src/client/error.rs new file mode 100644 index 0000000000..d03aef5da6 --- /dev/null +++ b/autonomi/src/client/error.rs @@ -0,0 +1,87 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use sn_evm::EvmWalletError; +use sn_networking::NetworkError; +use tokio::task::JoinError; +use xor_name::XorName; + +/// Errors that can occur during the file upload operation. +#[cfg(feature = "fs")] +#[derive(Debug, thiserror::Error)] +pub enum UploadError { + #[error("Failed to recursively traverse directory")] + WalkDir(#[from] walkdir::Error), + #[error("Input/output failure")] + IoError(#[from] std::io::Error), + #[error("Failed to upload file")] + PutError(#[from] PutError), + #[error("Failed to fetch file")] + GetError(#[from] GetError), + #[error("Failed to serialize")] + Serialization(#[from] rmp_serde::encode::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), +} + +#[cfg(feature = "fs")] +/// Errors that can occur during the download operation. +#[derive(Debug, thiserror::Error)] +pub enum DownloadError { + #[error("Failed to download file")] + GetError(#[from] GetError), + #[error("IO failure")] + IoError(#[from] std::io::Error), +} + +/// Errors that can occur during the put operation. +#[derive(Debug, thiserror::Error)] +pub enum PutError { + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), + #[error("Error getting Vault XorName data.")] + VaultXorName, + #[error("A network error occurred.")] + Network(#[from] NetworkError), + #[error("Error occurred during payment.")] + PayError(#[from] PayError), + #[error("Failed to serialize {0}")] + Serialization(String), + #[error("A wallet error occurred.")] + Wallet(#[from] sn_evm::EvmError), +} + +/// Errors that can occur during the pay operation. +#[derive(Debug, thiserror::Error)] +pub enum PayError { + #[error("Could not get store quote for: {0:?} after several retries")] + CouldNotGetStoreQuote(XorName), + #[error("Could not get store costs: {0:?}")] + CouldNotGetStoreCosts(NetworkError), + #[error("Could not simultaneously fetch store costs: {0:?}")] + JoinError(JoinError), + #[error("Wallet error: {0:?}")] + EvmWalletError(#[from] EvmWalletError), + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), +} + +/// Errors that can occur during the get operation. +#[derive(Debug, thiserror::Error)] +pub enum GetError { + #[error("Could not deserialize data map.")] + InvalidDataMap(rmp_serde::decode::Error), + #[error("Failed to decrypt data.")] + Decryption(crate::self_encryption::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), + #[error("General networking error: {0:?}")] + Network(#[from] NetworkError), + #[error("General protocol error: {0:?}")] + Protocol(#[from] sn_protocol::Error), +} diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/files.rs index 10786b61f3..7135fd81c2 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/files.rs @@ -6,81 +6,92 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::client::data::{GetError, PutError}; use crate::client::Client; use bytes::Bytes; -use serde::{Deserialize, Serialize}; +use sn_evm::EvmWallet; use std::collections::HashMap; use std::path::PathBuf; -use xor_name::XorName; -/// Directory-like structure that containing file paths and their metadata. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Root { - pub map: HashMap, -} +use super::archive::{Archive, ArchiveAddr}; +use super::data::DataAddr; +use super::error::{DownloadError, UploadError}; -impl Root { - /// Deserialize from bytes. - pub fn from_bytes(data: Bytes) -> Result { - let root: Root = rmp_serde::from_slice(&data[..])?; +impl Client { + /// Download file from network to local file system + pub async fn file_download( + &self, + data_addr: DataAddr, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let data = self.data_get(data_addr).await?; + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + Ok(()) + } - Ok(root) + /// Download directory from network to local file system + pub async fn dir_download( + &self, + archive_addr: ArchiveAddr, + to_dest: PathBuf, + ) -> Result<(), DownloadError> { + let archive = self.archive_get(archive_addr).await?; + for (path, addr) in archive.map { + self.file_download(addr, to_dest.join(path)).await?; + } + Ok(()) } - /// Serialize to bytes. - pub fn into_bytes(&self) -> Result { - let root_serialized = rmp_serde::to_vec(&self)?; - let root_serialized = Bytes::from(root_serialized); + /// Upload a directory to the network. The directory is recursively walked. + /// Reads all files, splits into chunks, uploads chunks, uploads datamaps, uploads archive, returns ArchiveAddr (pointing to the archive) + pub async fn dir_upload( + &mut self, + dir_path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + let mut map = HashMap::new(); - Ok(root_serialized) - } -} + for entry in walkdir::WalkDir::new(dir_path) { + let entry = entry?; -/// Structure that describes a file on the network. The actual data is stored in -/// chunks, to be constructed with the address pointing to the data map. -/// -/// This is similar to ['inodes'](https://en.wikipedia.org/wiki/Inode) in Unix-like filesystems. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct FilePointer { - pub(crate) data_map: XorName, - pub(crate) created_at: u64, - pub(crate) modified_at: u64, -} + if !entry.file_type().is_file() { + continue; + } -#[derive(Debug, thiserror::Error)] -pub enum UploadError { - #[error("Failed to recursively traverse directory")] - WalkDir(#[from] walkdir::Error), - #[error("Input/output failure")] - IoError(#[from] std::io::Error), - #[error("Failed to upload file")] - PutError(#[from] PutError), - #[error("Failed to fetch file")] - GetError(#[from] GetError), - #[error("Failed to serialize")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Failed to deserialize")] - Deserialization(#[from] rmp_serde::decode::Error), -} + let path = entry.path().to_path_buf(); + tracing::info!("Uploading file: {path:?}"); + #[cfg(feature = "loud")] + println!("Uploading file: {path:?}"); + let file = self.file_upload(path.clone(), wallet).await?; -impl Client { - /// Fetch a directory from the network. - pub async fn fetch_root(&self, address: XorName) -> Result { - let data = self.get(address).await?; + map.insert(path, file); + } - Ok(Root::from_bytes(data)?) + let archive = Archive { map }; + let archive_serialized = archive.into_bytes()?; + + let arch_addr = self.data_put(archive_serialized, wallet).await?; + + Ok(arch_addr) } - /// Fetch the file pointed to by the given pointer. - pub async fn fetch_file(&self, file: &FilePointer) -> Result { - let data = self.get(file.data_map).await?; - Ok(data) + /// Upload a file to the network. + /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) + async fn file_upload( + &mut self, + path: PathBuf, + wallet: &EvmWallet, + ) -> Result { + let data = tokio::fs::read(path).await?; + let data = Bytes::from(data); + let addr = self.data_put(data, wallet).await?; + Ok(addr) } /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - #[cfg(feature = "fs")] pub async fn file_cost(&self, path: &PathBuf) -> Result { let mut map = HashMap::new(); let mut total_cost = sn_evm::Amount::ZERO; @@ -97,7 +108,7 @@ impl Client { let data = tokio::fs::read(&path).await?; let file_bytes = Bytes::from(data); - let file_cost = self.cost(file_bytes.clone()).await.expect("TODO"); + let file_cost = self.data_cost(file_bytes.clone()).await.expect("TODO"); total_cost += file_cost.as_atto(); @@ -107,70 +118,19 @@ impl Client { let (data_map_chunk, _) = crate::self_encryption::encrypt(file_bytes).expect("TODO"); tracing::debug!("Encryption took: {:.2?}", now.elapsed()); let map_xor_name = *data_map_chunk.address().xorname(); - let data_map_xorname = FilePointer { - data_map: map_xor_name, - created_at: 0, - modified_at: 0, - }; - map.insert(path, data_map_xorname); + map.insert(path, map_xor_name); } - let root = Root { map }; + let root = Archive { map }; let root_serialized = rmp_serde::to_vec(&root).expect("TODO"); - let cost = self.cost(Bytes::from(root_serialized)).await.expect("TODO"); - Ok(cost) - } - - /// Upload a directory to the network. The directory is recursively walked. - #[cfg(feature = "fs")] - pub async fn upload_from_dir( - &mut self, - path: PathBuf, - wallet: &sn_evm::EvmWallet, - ) -> Result<(Root, XorName), UploadError> { - let mut map = HashMap::new(); + let archive_cost = self + .data_cost(Bytes::from(root_serialized)) + .await + .expect("TODO"); - for entry in walkdir::WalkDir::new(path) { - let entry = entry?; - - if !entry.file_type().is_file() { - continue; - } - - let path = entry.path().to_path_buf(); - tracing::info!("Uploading file: {path:?}"); - println!("Uploading file: {path:?}"); - let file = upload_from_file(self, path.clone(), wallet).await?; - - map.insert(path, file); - } - - let root = Root { map }; - let root_serialized = root.into_bytes()?; - - let xor_name = self.put(root_serialized, wallet).await?; - - Ok((root, xor_name)) + total_cost += archive_cost.as_atto(); + Ok(total_cost.into()) } } - -#[cfg(feature = "fs")] -async fn upload_from_file( - client: &mut Client, - path: PathBuf, - wallet: &sn_evm::EvmWallet, -) -> Result { - let data = tokio::fs::read(path).await?; - let data = Bytes::from(data); - - let addr = client.put(data, wallet).await?; - - // TODO: Set created_at and modified_at - Ok(FilePointer { - data_map: addr, - created_at: 0, - modified_at: 0, - }) -} diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index cb61bd75de..3692463cc9 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -7,16 +7,22 @@ // permissions and limitations relating to use of the SAFE Network Software. pub mod address; +pub mod error; +#[cfg(feature = "data")] +pub mod archive; #[cfg(feature = "data")] pub mod data; -#[cfg(feature = "files")] +#[cfg(feature = "fs")] pub mod files; #[cfg(feature = "registers")] pub mod registers; #[cfg(feature = "vault")] pub mod vault; +// private module with utility functions +mod utils; + use std::{collections::HashSet, time::Duration}; use libp2p::{identity::Keypair, Multiaddr}; diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 6e65389c2a..c584f74d45 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -10,15 +10,16 @@ pub use bls::SecretKey as RegisterSecretKey; use sn_evm::Amount; use sn_evm::AttoTokens; +use sn_evm::EvmWalletError; use sn_networking::VerificationKind; use sn_protocol::storage::RetryStrategy; pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; -use crate::client::data::PayError; +use crate::client::error::PayError; use crate::client::Client; use bytes::Bytes; -use evmlib::wallet::Wallet; use libp2p::kad::{Quorum, Record}; +use sn_evm::EvmWallet; use sn_networking::{GetRecordCfg, GetRecordError, NetworkError, PutRecordCfg}; use sn_protocol::storage::try_deserialize_record; use sn_protocol::storage::try_serialize_record; @@ -40,7 +41,7 @@ pub enum RegisterError { #[error("Payment failure occurred during register creation.")] Pay(#[from] PayError), #[error("Failed to retrieve wallet payment")] - Wallet(#[from] evmlib::wallet::Error), + Wallet(#[from] EvmWalletError), #[error("Failed to write to low-level register")] Write(#[source] sn_registers::Error), #[error("Failed to sign register")] @@ -273,7 +274,7 @@ impl Client { value: Bytes, name: &str, owner: RegisterSecretKey, - wallet: &Wallet, + wallet: &EvmWallet, ) -> Result { let pk = owner.public_key(); let permissions = Permissions::new_with([pk]); @@ -291,7 +292,7 @@ impl Client { name: &str, owner: RegisterSecretKey, permissions: RegisterPermissions, - wallet: &Wallet, + wallet: &EvmWallet, ) -> Result { info!("Creating register with name: {name}"); let name = XorName::from_content_parts(&[name.as_bytes()]); diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs new file mode 100644 index 0000000000..eae9f62c4b --- /dev/null +++ b/autonomi/src/client/utils.rs @@ -0,0 +1,272 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use std::{ + collections::{BTreeMap, HashMap}, + num::NonZero, +}; + +use bytes::Bytes; +use libp2p::kad::{Quorum, Record}; +use rand::{thread_rng, Rng}; +use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk}; +use sn_evm::{EvmWallet, ProofOfPayment, QuoteHash, QuotePayment, TxHash}; +use sn_networking::{ + GetRecordCfg, Network, NetworkError, PayeeQuote, PutRecordCfg, VerificationKind, +}; +use sn_protocol::{ + messages::ChunkProof, + storage::{try_serialize_record, Chunk, ChunkAddress, RecordKind, RetryStrategy}, + NetworkAddress, +}; +use xor_name::XorName; + +use crate::self_encryption::DataMapLevel; + +use super::{ + error::{GetError, PayError, PutError}, + Client, +}; + +impl Client { + /// Fetch and decrypt all chunks in the data map. + pub(crate) async fn fetch_from_data_map(&self, data_map: &DataMap) -> Result { + let mut encrypted_chunks = vec![]; + + for info in data_map.infos() { + let chunk = self + .chunk_get(info.dst_hash) + .await + .inspect_err(|err| error!("Error fetching chunk {:?}: {err:?}", info.dst_hash))?; + let chunk = EncryptedChunk { + index: info.index, + content: chunk.value, + }; + encrypted_chunks.push(chunk); + } + + let data = decrypt_full_set(data_map, &encrypted_chunks).map_err(|e| { + error!("Error decrypting encrypted_chunks: {e:?}"); + GetError::Decryption(crate::self_encryption::Error::SelfEncryption(e)) + })?; + + Ok(data) + } + + /// Unpack a wrapped data map and fetch all bytes using self-encryption. + pub(crate) async fn fetch_from_data_map_chunk( + &self, + data_map_bytes: &Bytes, + ) -> Result { + let mut data_map_level: DataMapLevel = rmp_serde::from_slice(data_map_bytes) + .map_err(GetError::InvalidDataMap) + .inspect_err(|err| error!("Error deserializing data map: {err:?}"))?; + + loop { + let data_map = match &data_map_level { + DataMapLevel::First(map) => map, + DataMapLevel::Additional(map) => map, + }; + + let data = self.fetch_from_data_map(data_map).await?; + + match &data_map_level { + DataMapLevel::First(_) => break Ok(data), + DataMapLevel::Additional(_) => { + data_map_level = rmp_serde::from_slice(&data).map_err(|err| { + error!("Error deserializing data map: {err:?}"); + GetError::InvalidDataMap(err) + })?; + continue; + } + }; + } + } + + pub(crate) async fn chunk_upload_with_payment( + &self, + chunk: Chunk, + payment: ProofOfPayment, + ) -> Result<(), PutError> { + let storing_node = payment.to_peer_id_payee().expect("Missing node Peer ID"); + + debug!("Storing chunk: {chunk:?} to {:?}", storing_node); + + let key = chunk.network_address().to_record_key(); + + let record_kind = RecordKind::ChunkWithPayment; + let record = Record { + key: key.clone(), + value: try_serialize_record(&(payment, chunk.clone()), record_kind) + .map_err(|e| { + PutError::Serialization(format!( + "Failed to serialize chunk with payment: {e:?}" + )) + })? + .to_vec(), + publisher: None, + expires: None, + }; + + let verification = { + let verification_cfg = GetRecordCfg { + get_quorum: Quorum::N(NonZero::new(2).expect("2 is non-zero")), + retry_strategy: Some(RetryStrategy::Quick), + target_record: None, + expected_holders: Default::default(), + is_register: false, + }; + + let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk) + .map_err(|e| PutError::Serialization(format!("Failed to serialize chunk: {e:?}")))? + .to_vec(); + let random_nonce = thread_rng().gen::(); + let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); + + Some(( + VerificationKind::ChunkProof { + expected_proof, + nonce: random_nonce, + }, + verification_cfg, + )) + }; + + let put_cfg = PutRecordCfg { + put_quorum: Quorum::One, + retry_strategy: Some(RetryStrategy::Balanced), + use_put_record_to: Some(vec![storing_node]), + verification, + }; + Ok(self.network.put_record(record, &put_cfg).await?) + } + + /// Pay for the chunks and get the proof of payment. + pub(crate) async fn pay( + &self, + content_addrs: impl Iterator, + wallet: &EvmWallet, + ) -> Result<(HashMap, Vec), PayError> { + let cost_map = self.get_store_quotes(content_addrs).await?; + let (quote_payments, skipped_chunks) = extract_quote_payments(&cost_map); + + // TODO: the error might contain some succeeded quote payments as well. These should be returned on err, so that they can be skipped when retrying. + // TODO: retry when it fails? + // Execute chunk payments + let payments = wallet + .pay_for_quotes(quote_payments) + .await + .map_err(|err| PayError::from(err.0))?; + + let proofs = construct_proofs(&cost_map, &payments); + + trace!( + "Chunk payments of {} chunks completed. {} chunks were free / already paid for", + proofs.len(), + skipped_chunks.len() + ); + + Ok((proofs, skipped_chunks)) + } + + pub(crate) async fn get_store_quotes( + &self, + content_addrs: impl Iterator, + ) -> Result, PayError> { + let futures: Vec<_> = content_addrs + .into_iter() + .map(|content_addr| fetch_store_quote_with_retries(&self.network, content_addr)) + .collect(); + + let quotes = futures::future::try_join_all(futures).await?; + + Ok(quotes.into_iter().collect::>()) + } +} + +/// Fetch a store quote for a content address with a retry strategy. +async fn fetch_store_quote_with_retries( + network: &Network, + content_addr: XorName, +) -> Result<(XorName, PayeeQuote), PayError> { + let mut retries = 0; + + loop { + match fetch_store_quote(network, content_addr).await { + Ok(quote) => { + break Ok((content_addr, quote)); + } + Err(err) if retries < 2 => { + retries += 1; + error!("Error while fetching store quote: {err:?}, retry #{retries}"); + } + Err(err) => { + error!( + "Error while fetching store quote: {err:?}, stopping after {retries} retries" + ); + break Err(PayError::CouldNotGetStoreQuote(content_addr)); + } + } + } +} + +/// Fetch a store quote for a content address. +async fn fetch_store_quote( + network: &Network, + content_addr: XorName, +) -> Result { + network + .get_store_costs_from_network( + NetworkAddress::from_chunk_address(ChunkAddress::new(content_addr)), + vec![], + ) + .await +} + +/// Form to be executed payments and already executed payments from a cost map. +fn extract_quote_payments( + cost_map: &HashMap, +) -> (Vec, Vec) { + let mut to_be_paid = vec![]; + let mut already_paid = vec![]; + + for (chunk_address, quote) in cost_map.iter() { + if quote.2.cost.is_zero() { + already_paid.push(*chunk_address); + } else { + to_be_paid.push(( + quote.2.hash(), + quote.2.rewards_address, + quote.2.cost.as_atto(), + )); + } + } + + (to_be_paid, already_paid) +} + +/// Construct payment proofs from cost map and payments map. +fn construct_proofs( + cost_map: &HashMap, + payments: &BTreeMap, +) -> HashMap { + cost_map + .iter() + .filter_map(|(xor_name, (_, _, quote))| { + payments.get("e.hash()).map(|tx_hash| { + ( + *xor_name, + ProofOfPayment { + quote: quote.clone(), + tx_hash: *tx_hash, + }, + ) + }) + }) + .collect() +} diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 735c43c07d..ab69704549 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -8,12 +8,12 @@ use std::collections::HashSet; -use crate::client::data::PutError; +use crate::client::error::PutError; use crate::client::Client; use bls::SecretKey; use bytes::Bytes; -use evmlib::wallet::Wallet; use libp2p::kad::{Quorum, Record}; +use sn_evm::EvmWallet; use sn_networking::{GetRecordCfg, NetworkError, PutRecordCfg, VerificationKind}; use sn_protocol::storage::{ try_serialize_record, RecordKind, RetryStrategy, Scratchpad, ScratchpadAddress, @@ -86,7 +86,7 @@ impl Client { pub async fn write_bytes_to_vault( &mut self, data: Bytes, - wallet: &mut Wallet, + wallet: &mut EvmWallet, secret_key: &SecretKey, ) -> Result { let client_pk = secret_key.public_key(); @@ -133,7 +133,11 @@ impl Client { Record { key: scratch_key, value: try_serialize_record(&(proof, scratch), RecordKind::ScratchpadWithPayment) - .map_err(|_| PutError::Serialization)? + .map_err(|_| { + PutError::Serialization( + "Failed to serialize scratchpad with payment".to_string(), + ) + })? .to_vec(), publisher: None, expires: None, @@ -142,7 +146,9 @@ impl Client { Record { key: scratch_key, value: try_serialize_record(&scratch, RecordKind::Scratchpad) - .map_err(|_| PutError::Serialization)? + .map_err(|_| { + PutError::Serialization("Failed to serialize scratchpad".to_string()) + })? .to_vec(), publisher: None, expires: None, diff --git a/autonomi/tests/file.rs b/autonomi/tests/file.rs index cae3eedbcc..f7bc84814d 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/file.rs @@ -6,38 +6,77 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -#![cfg(all(feature = "files", feature = "fs"))] +#![cfg(feature = "fs")] use autonomi::Client; use eyre::Result; +use sha2::{Digest, Sha256}; use sn_logging::LogBuilder; +use std::fs::File; +use std::io::{BufReader, Read}; use std::time::Duration; use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; +use walkdir::WalkDir; +// With a local evm network, and local network, run: +// EVM_NETWORK=local cargo test --features="fs,local" --package autonomi --test file +#[cfg(feature = "fs")] #[tokio::test] -async fn file() -> Result<()> { - let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("file", false); +async fn dir_upload_download() -> Result<()> { + let _log_appender_guard = + LogBuilder::init_single_threaded_tokio_test("dir_upload_download", false); let mut client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &wallet) + let addr = client + .dir_upload("tests/file/test_dir".into(), &wallet) .await?; sleep(Duration::from_secs(10)).await; - let root_fetched = client.fetch_root(addr).await?; + client + .dir_download(addr, "tests/file/test_dir_fetched".into()) + .await?; + // compare the two directories assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" + compute_dir_sha256("tests/file/test_dir")?, + compute_dir_sha256("tests/file/test_dir_fetched")?, ); - Ok(()) } +fn compute_sha256(path: &str) -> Result { + let mut hasher = Sha256::new(); + let mut file = BufReader::new(File::open(path)?); + let mut buffer = [0; 1024]; + while let Ok(read_bytes) = file.read(&mut buffer) { + if read_bytes == 0 { + break; + } + hasher.update(&buffer[..read_bytes]); + } + Ok(format!("{:x}", hasher.finalize())) +} + +fn compute_dir_sha256(dir: &str) -> Result { + let mut hasher = Sha256::new(); + for entry in WalkDir::new(dir).into_iter().filter_map(|e| e.ok()) { + if entry.file_type().is_file() { + let sha = compute_sha256( + entry + .path() + .to_str() + .expect("Failed to convert path to string"), + )?; + hasher.update(sha.as_bytes()); + } + } + Ok(format!("{:x}", hasher.finalize())) +} + #[cfg(feature = "vault")] #[tokio::test] async fn file_into_vault() -> Result<()> { @@ -47,30 +86,25 @@ async fn file_into_vault() -> Result<()> { let mut wallet = get_funded_wallet(); let client_sk = bls::SecretKey::random(); - let (root, addr) = client - .upload_from_dir("tests/file/test_dir".into(), &wallet) + let addr = client + .dir_upload("tests/file/test_dir".into(), &wallet) .await?; sleep(Duration::from_secs(2)).await; - let root_fetched = client.fetch_root(addr).await?; + let archive = client.archive_get(addr).await?; client - .write_bytes_to_vault(root.into_bytes()?, &mut wallet, &client_sk) + .write_bytes_to_vault(archive.into_bytes()?, &mut wallet, &client_sk) .await?; - assert_eq!( - root.map, root_fetched.map, - "root fetched should match root put" - ); - // now assert over the stored account packet let new_client = Client::connect(&[]).await?; if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { - let ap_root_fetched = autonomi::client::files::Root::from_bytes(ap)?; + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; assert_eq!( - root.map, ap_root_fetched.map, - "root fetched should match root put" + archive.map, ap_archive_fetched.map, + "archive fetched should match archive put" ); } else { eyre::bail!("No account packet found"); diff --git a/autonomi/tests/put.rs b/autonomi/tests/put.rs index 5c0163a507..dbced37d00 100644 --- a/autonomi/tests/put.rs +++ b/autonomi/tests/put.rs @@ -23,11 +23,11 @@ async fn put() -> Result<()> { let wallet = get_funded_wallet(); let data = gen_random_data(1024 * 1024 * 10); - let addr = client.put(data.clone(), &wallet).await?; + let addr = client.data_put(data.clone(), &wallet).await?; sleep(Duration::from_secs(10)).await; - let data_fetched = client.get(addr).await?; + let data_fetched = client.data_get(addr).await?; assert_eq!(data, data_fetched, "data fetched should match data put"); Ok(()) diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index 502afb6be0..b410f0dd80 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -7,22 +7,21 @@ // permissions and limitations relating to use of the SAFE Network Software. use const_hex::traits::FromHex; -use evmlib::common::{Address, Amount}; -use evmlib::utils::evm_network_from_env; -use evmlib::wallet::Wallet; +use sn_evm::evm::network_from_env; +use sn_evm::EvmWallet; +use sn_evm::{Amount, RewardsAddress}; use sn_logging::LogBuilder; use test_utils::evm::get_funded_wallet; #[tokio::test] async fn from_private_key() { let private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa"; - let network = - evm_network_from_env().expect("Could not get EVM network from environment variables"); - let wallet = Wallet::new_from_private_key(network, private_key).unwrap(); + let network = network_from_env().expect("Could not get EVM network from environment variables"); + let wallet = EvmWallet::new_from_private_key(network, private_key).unwrap(); assert_eq!( wallet.address(), - Address::from_hex("0x69D5BF2Bc42bca8782b8D2b4FdfF2b1Fa7644Fe7").unwrap() + RewardsAddress::from_hex("0x69D5BF2Bc42bca8782b8D2b4FdfF2b1Fa7644Fe7").unwrap() ) } @@ -30,11 +29,10 @@ async fn from_private_key() { async fn send_tokens() { let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("wallet", false); - let network = - evm_network_from_env().expect("Could not get EVM network from environment variables"); + let network = network_from_env().expect("Could not get EVM network from environment variables"); let wallet = get_funded_wallet(); - let receiving_wallet = Wallet::new_with_random_wallet(network); + let receiving_wallet = EvmWallet::new_with_random_wallet(network); let initial_balance = receiving_wallet.balance_of_tokens().await.unwrap(); diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index b06d90d67b..414cd47149 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -12,9 +12,9 @@ network-contacts = ["sn_peers_acquisition/network-contacts"] [dependencies] autonomi = { path = "../autonomi", version = "0.1.0", features = [ "data", - "files", "fs", "registers", + "loud", ] } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "~0.6" diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index 0502988ddf..2268856581 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -12,9 +12,11 @@ use color_eyre::eyre::Context; use color_eyre::Result; use color_eyre::Section; use sn_peers_acquisition::PeersArgs; - use sn_peers_acquisition::SAFE_PEERS_ENV; +#[cfg(not(feature = "local"))] +use autonomi::evm::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; + pub async fn get_peers(peers: PeersArgs) -> Result> { peers.get_peers().await .wrap_err("Please provide valid Network peers to connect to") @@ -33,7 +35,9 @@ pub fn get_evm_network_from_env() -> Result { } #[cfg(not(feature = "local"))] { - let network = autonomi::evm::network_from_env(); + let network = autonomi::evm::network_from_env() + .wrap_err("Failed to get EVM network from environment variables") + .with_suggestion(|| format!("If connecting to a custom EVM network, make sure you've set the following environment variables: {RPC_URL}, {PAYMENT_TOKEN_ADDRESS} and {DATA_PAYMENTS_ADDRESS}"))?; if matches!(network, EvmNetwork::Custom(_)) { println!("Using custom EVM network found from environment variables"); info!("Using custom EVM network found from environment variables {network:?}"); diff --git a/autonomi_cli/src/actions/download.rs b/autonomi_cli/src/actions/download.rs index 5892a21472..ba004930e3 100644 --- a/autonomi_cli/src/actions/download.rs +++ b/autonomi_cli/src/actions/download.rs @@ -7,22 +7,22 @@ // permissions and limitations relating to use of the SAFE Network Software. use super::get_progress_bar; -use autonomi::{client::address::str_to_xorname, Client}; +use autonomi::{client::address::str_to_addr, Client}; use color_eyre::eyre::{eyre, Context, Result}; use std::path::PathBuf; pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { - let address = str_to_xorname(addr).wrap_err("Failed to parse data address")?; - let root = client - .fetch_root(address) + let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + let archive = client + .archive_get(address) .await .wrap_err("Failed to fetch data from address")?; - let progress_bar = get_progress_bar(root.map.len() as u64)?; + let progress_bar = get_progress_bar(archive.map.len() as u64)?; let mut all_errs = vec![]; - for (path, file) in root.map { + for (path, addr) in archive.map { progress_bar.println(format!("Fetching file: {path:?}...")); - let bytes = match client.fetch_file(&file).await { + let bytes = match client.data_get(addr).await { Ok(bytes) => bytes, Err(e) => { let err = format!("Failed to fetch file {path:?}: {e}"); diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index ce91409bd3..0af446c6b8 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::client::address::xorname_to_str; +use autonomi::client::address::addr_to_str; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; @@ -31,11 +31,11 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; println!("Uploading data to network..."); - let (_, xor_name) = client - .upload_from_dir(PathBuf::from(file), &wallet) + let xor_name = client + .dir_upload(PathBuf::from(file), &wallet) .await .wrap_err("Failed to upload file")?; - let addr = xorname_to_str(xor_name); + let addr = addr_to_str(xor_name); println!("Successfully uploaded: {file}"); println!("At address: {addr}"); diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index f037f03967..0ebf444645 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -32,7 +32,7 @@ pub fn dummy_hash() -> Hash { } /// Get the `Network` from environment variables -pub fn evm_network_from_env() -> Result { +pub fn network_from_env() -> Result { let evm_vars = [ env::var(RPC_URL) .ok() diff --git a/sn_evm/src/amount.rs b/sn_evm/src/amount.rs index 80978c721f..be25546042 100644 --- a/sn_evm/src/amount.rs +++ b/sn_evm/src/amount.rs @@ -77,6 +77,12 @@ impl From for AttoTokens { } } +impl From for AttoTokens { + fn from(value: Amount) -> Self { + Self(value) + } +} + impl FromStr for AttoTokens { type Err = EvmError; diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs index ee82824e53..40fde7dc18 100644 --- a/sn_evm/src/evm.rs +++ b/sn_evm/src/evm.rs @@ -6,20 +6,10 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::EvmNetwork; - pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; /// Load the evm network from env -pub fn network_from_env() -> EvmNetwork { - match evmlib::utils::evm_network_from_env() { - Ok(network) => network, - Err(e) => { - warn!("Failed to get EVM network from environment variables, using default: {e}"); - EvmNetwork::default() - } - } -} +pub use evmlib::utils::network_from_env; /// Load the evm network from local CSV pub use evmlib::utils::local_evm_network_from_csv; diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index 43c7bd2b43..c48f36b43e 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -10,8 +10,10 @@ extern crate tracing; pub use evmlib::common::Address as RewardsAddress; +pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; pub use evmlib::utils; +pub use evmlib::wallet::Error as EvmWalletError; pub use evmlib::wallet::Wallet as EvmWallet; pub use evmlib::Network as EvmNetwork; diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 6585f4d61a..8cd3f704c7 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -24,10 +24,16 @@ use libp2p::{ use sn_evm::PaymentQuote; use sn_protocol::{ messages::{Query, Request, Response}, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, + NetworkAddress, PrettyPrintRecordKey, }; + +#[cfg(feature = "open-metrics")] +use sn_protocol::CLOSE_GROUP_SIZE; +#[cfg(feature = "open-metrics")] +use std::collections::HashSet; + use std::{ - collections::{BTreeSet, HashSet}, + collections::BTreeSet, fmt::{Debug, Formatter}, }; use tokio::sync::oneshot; diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 5e541198de..7b5756cd0b 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -261,7 +261,10 @@ fn main() -> Result<()> { .as_ref() .cloned() .map(|v| v.into()) - .unwrap_or_else(sn_evm::evm::network_from_env); + .unwrap_or_else(|| { + sn_evm::evm::network_from_env() + .expect("Failed to get EVM network from environment variables") + }); if matches!(evm_network, EvmNetwork::Custom(_)) { println!("Using custom EVM network"); info!("Using custom EVM network {evm_network:?}"); diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 4d9b71974b..51a2a32803 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -315,10 +315,13 @@ fn store_chunks_task( loop { let random_data = gen_random_data(*DATA_SIZE); - let data_map = client.put(random_data, &wallet).await.inspect_err(|err| { - println!("Error to put chunk: {err:?}"); - error!("Error to put chunk: {err:?}") - })?; + let data_map = client + .data_put(random_data, &wallet) + .await + .inspect_err(|err| { + println!("Error to put chunk: {err:?}"); + error!("Error to put chunk: {err:?}") + })?; println!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); info!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); @@ -497,7 +500,7 @@ async fn query_content(client: &Client, net_addr: &NetworkAddress) -> Result<()> Ok(()) } NetworkAddress::ChunkAddress(addr) => { - client.get(*addr.xorname()).await?; + client.data_get(*addr.xorname()).await?; Ok(()) } _other => Ok(()), // we don't create/store any other type of content in this test yet diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index d387bd76b6..641756fa2c 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -334,7 +334,7 @@ async fn store_chunks( let random_bytes = Bytes::from(random_bytes); - client.put(random_bytes, wallet).await?; + client.data_put(random_bytes, wallet).await?; uploaded_chunks_count += 1; diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 037e2559d4..2c85c0d90a 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -6,12 +6,11 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use evmlib::{utils::evm_network_from_env, Network}; +use evmlib::{utils::network_from_env, Network}; use std::env; pub fn get_funded_wallet() -> evmlib::wallet::Wallet { - let network = - evm_network_from_env().expect("Failed to get EVM network from environment variables"); + let network = network_from_env().expect("Failed to get EVM network from environment variables"); if matches!(network, Network::ArbitrumOne) { panic!("You're trying to use ArbitrumOne network. Use a custom network for testing."); } From f8e6e07cf390d9bbba1031356bd972829393ce09 Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 11 Oct 2024 17:34:49 +0900 Subject: [PATCH 151/255] chore: remove some dead code --- autonomi/src/client/data.rs | 7 ++----- autonomi/src/lib.rs | 2 -- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 40bb9e86c2..c929f82dc3 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -22,14 +22,11 @@ use sn_protocol::{ NetworkAddress, }; -/// Raw Data Address +/// Raw Data Address (points to a [`DataMap`]) pub type DataAddr = XorName; -/// Raw Chunk Address +/// Raw Chunk Address (points to a [`Chunk`]) pub type ChunkAddr = XorName; -/// The address of a file on the network. Points to a [`DataMap`]. -pub type FileAddr = XorName; - impl Client { /// Fetch a blob of data from the network pub async fn data_get(&self, addr: DataAddr) -> Result { diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index d6be10953c..cfc8d81f72 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -44,7 +44,5 @@ pub use sn_evm::EvmWallet as Wallet; pub use bytes::Bytes; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use libp2p::Multiaddr; -#[doc(no_inline)] // Place this under 'Re-exports' in the docs. -pub use xor_name::XorName; pub use client::Client; From 0382b780ce45110ce29a060d6713d632fddc487a Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 11 Oct 2024 17:40:52 +0900 Subject: [PATCH 152/255] chore: wrong link in docs --- autonomi/src/client/data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index c929f82dc3..af714030b9 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -22,7 +22,7 @@ use sn_protocol::{ NetworkAddress, }; -/// Raw Data Address (points to a [`DataMap`]) +/// Raw Data Address (points to a DataMap) pub type DataAddr = XorName; /// Raw Chunk Address (points to a [`Chunk`]) pub type ChunkAddr = XorName; From bd9dda8749c037025ed609a4e5f32a97e60a981f Mon Sep 17 00:00:00 2001 From: grumbach Date: Fri, 11 Oct 2024 18:18:09 +0900 Subject: [PATCH 153/255] chore: improve according to PR comments --- autonomi/src/client/archive.rs | 2 +- autonomi/src/client/data.rs | 51 ++++++++++++++- autonomi/src/client/error.rs | 87 ------------------------- autonomi/src/client/{files.rs => fs.rs} | 31 ++++++++- autonomi/src/client/mod.rs | 3 +- autonomi/src/client/registers.rs | 2 +- autonomi/src/client/utils.rs | 2 +- autonomi/src/client/vault.rs | 2 +- autonomi/tests/{file.rs => fs.rs} | 1 - 9 files changed, 83 insertions(+), 98 deletions(-) delete mode 100644 autonomi/src/client/error.rs rename autonomi/src/client/{files.rs => fs.rs} (83%) rename autonomi/tests/{file.rs => fs.rs} (99%) diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index 1a9998e792..d3cf9714ec 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -10,7 +10,7 @@ use std::{collections::HashMap, path::PathBuf}; use super::{ data::DataAddr, - error::{GetError, PutError}, + data::{GetError, PutError}, Client, }; use bytes::Bytes; diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index af714030b9..3a609b5685 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -8,14 +8,14 @@ use bytes::Bytes; use libp2p::kad::Quorum; +use tokio::task::JoinError; use std::collections::HashSet; use xor_name::XorName; -use super::error::{GetError, PayError, PutError}; use crate::{self_encryption::encrypt, Client}; -use sn_evm::EvmWallet; use sn_evm::{Amount, AttoTokens}; +use sn_evm::{EvmWallet, EvmWalletError}; use sn_networking::{GetRecordCfg, NetworkError}; use sn_protocol::{ storage::{try_deserialize_record, Chunk, ChunkAddress, RecordHeader, RecordKind}, @@ -27,6 +27,53 @@ pub type DataAddr = XorName; /// Raw Chunk Address (points to a [`Chunk`]) pub type ChunkAddr = XorName; +/// Errors that can occur during the put operation. +#[derive(Debug, thiserror::Error)] +pub enum PutError { + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), + #[error("Error getting Vault XorName data.")] + VaultXorName, + #[error("A network error occurred.")] + Network(#[from] NetworkError), + #[error("Error occurred during payment.")] + PayError(#[from] PayError), + #[error("Failed to serialize {0}")] + Serialization(String), + #[error("A wallet error occurred.")] + Wallet(#[from] sn_evm::EvmError), +} + +/// Errors that can occur during the pay operation. +#[derive(Debug, thiserror::Error)] +pub enum PayError { + #[error("Could not get store quote for: {0:?} after several retries")] + CouldNotGetStoreQuote(XorName), + #[error("Could not get store costs: {0:?}")] + CouldNotGetStoreCosts(NetworkError), + #[error("Could not simultaneously fetch store costs: {0:?}")] + JoinError(JoinError), + #[error("Wallet error: {0:?}")] + EvmWalletError(#[from] EvmWalletError), + #[error("Failed to self-encrypt data.")] + SelfEncryption(#[from] crate::self_encryption::Error), +} + +/// Errors that can occur during the get operation. +#[derive(Debug, thiserror::Error)] +pub enum GetError { + #[error("Could not deserialize data map.")] + InvalidDataMap(rmp_serde::decode::Error), + #[error("Failed to decrypt data.")] + Decryption(crate::self_encryption::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), + #[error("General networking error: {0:?}")] + Network(#[from] NetworkError), + #[error("General protocol error: {0:?}")] + Protocol(#[from] sn_protocol::Error), +} + impl Client { /// Fetch a blob of data from the network pub async fn data_get(&self, addr: DataAddr) -> Result { diff --git a/autonomi/src/client/error.rs b/autonomi/src/client/error.rs deleted file mode 100644 index d03aef5da6..0000000000 --- a/autonomi/src/client/error.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_evm::EvmWalletError; -use sn_networking::NetworkError; -use tokio::task::JoinError; -use xor_name::XorName; - -/// Errors that can occur during the file upload operation. -#[cfg(feature = "fs")] -#[derive(Debug, thiserror::Error)] -pub enum UploadError { - #[error("Failed to recursively traverse directory")] - WalkDir(#[from] walkdir::Error), - #[error("Input/output failure")] - IoError(#[from] std::io::Error), - #[error("Failed to upload file")] - PutError(#[from] PutError), - #[error("Failed to fetch file")] - GetError(#[from] GetError), - #[error("Failed to serialize")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Failed to deserialize")] - Deserialization(#[from] rmp_serde::decode::Error), -} - -#[cfg(feature = "fs")] -/// Errors that can occur during the download operation. -#[derive(Debug, thiserror::Error)] -pub enum DownloadError { - #[error("Failed to download file")] - GetError(#[from] GetError), - #[error("IO failure")] - IoError(#[from] std::io::Error), -} - -/// Errors that can occur during the put operation. -#[derive(Debug, thiserror::Error)] -pub enum PutError { - #[error("Failed to self-encrypt data.")] - SelfEncryption(#[from] crate::self_encryption::Error), - #[error("Error getting Vault XorName data.")] - VaultXorName, - #[error("A network error occurred.")] - Network(#[from] NetworkError), - #[error("Error occurred during payment.")] - PayError(#[from] PayError), - #[error("Failed to serialize {0}")] - Serialization(String), - #[error("A wallet error occurred.")] - Wallet(#[from] sn_evm::EvmError), -} - -/// Errors that can occur during the pay operation. -#[derive(Debug, thiserror::Error)] -pub enum PayError { - #[error("Could not get store quote for: {0:?} after several retries")] - CouldNotGetStoreQuote(XorName), - #[error("Could not get store costs: {0:?}")] - CouldNotGetStoreCosts(NetworkError), - #[error("Could not simultaneously fetch store costs: {0:?}")] - JoinError(JoinError), - #[error("Wallet error: {0:?}")] - EvmWalletError(#[from] EvmWalletError), - #[error("Failed to self-encrypt data.")] - SelfEncryption(#[from] crate::self_encryption::Error), -} - -/// Errors that can occur during the get operation. -#[derive(Debug, thiserror::Error)] -pub enum GetError { - #[error("Could not deserialize data map.")] - InvalidDataMap(rmp_serde::decode::Error), - #[error("Failed to decrypt data.")] - Decryption(crate::self_encryption::Error), - #[error("Failed to deserialize")] - Deserialization(#[from] rmp_serde::decode::Error), - #[error("General networking error: {0:?}")] - Network(#[from] NetworkError), - #[error("General protocol error: {0:?}")] - Protocol(#[from] sn_protocol::Error), -} diff --git a/autonomi/src/client/files.rs b/autonomi/src/client/fs.rs similarity index 83% rename from autonomi/src/client/files.rs rename to autonomi/src/client/fs.rs index 7135fd81c2..ca83442f78 100644 --- a/autonomi/src/client/files.rs +++ b/autonomi/src/client/fs.rs @@ -13,8 +13,35 @@ use std::collections::HashMap; use std::path::PathBuf; use super::archive::{Archive, ArchiveAddr}; -use super::data::DataAddr; -use super::error::{DownloadError, UploadError}; +use super::data::{DataAddr, GetError, PutError}; + +/// Errors that can occur during the file upload operation. +#[cfg(feature = "fs")] +#[derive(Debug, thiserror::Error)] +pub enum UploadError { + #[error("Failed to recursively traverse directory")] + WalkDir(#[from] walkdir::Error), + #[error("Input/output failure")] + IoError(#[from] std::io::Error), + #[error("Failed to upload file")] + PutError(#[from] PutError), + #[error("Failed to fetch file")] + GetError(#[from] GetError), + #[error("Failed to serialize")] + Serialization(#[from] rmp_serde::encode::Error), + #[error("Failed to deserialize")] + Deserialization(#[from] rmp_serde::decode::Error), +} + +#[cfg(feature = "fs")] +/// Errors that can occur during the download operation. +#[derive(Debug, thiserror::Error)] +pub enum DownloadError { + #[error("Failed to download file")] + GetError(#[from] GetError), + #[error("IO failure")] + IoError(#[from] std::io::Error), +} impl Client { /// Download file from network to local file system diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 3692463cc9..57e165f2a6 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -7,14 +7,13 @@ // permissions and limitations relating to use of the SAFE Network Software. pub mod address; -pub mod error; #[cfg(feature = "data")] pub mod archive; #[cfg(feature = "data")] pub mod data; #[cfg(feature = "fs")] -pub mod files; +pub mod fs; #[cfg(feature = "registers")] pub mod registers; #[cfg(feature = "vault")] diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index c584f74d45..41b6e00736 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -15,7 +15,7 @@ use sn_networking::VerificationKind; use sn_protocol::storage::RetryStrategy; pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; -use crate::client::error::PayError; +use crate::client::data::PayError; use crate::client::Client; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; diff --git a/autonomi/src/client/utils.rs b/autonomi/src/client/utils.rs index eae9f62c4b..0714f60d9d 100644 --- a/autonomi/src/client/utils.rs +++ b/autonomi/src/client/utils.rs @@ -29,7 +29,7 @@ use xor_name::XorName; use crate::self_encryption::DataMapLevel; use super::{ - error::{GetError, PayError, PutError}, + data::{GetError, PayError, PutError}, Client, }; diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index ab69704549..2eb960dfc1 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -8,7 +8,7 @@ use std::collections::HashSet; -use crate::client::error::PutError; +use crate::client::data::PutError; use crate::client::Client; use bls::SecretKey; use bytes::Bytes; diff --git a/autonomi/tests/file.rs b/autonomi/tests/fs.rs similarity index 99% rename from autonomi/tests/file.rs rename to autonomi/tests/fs.rs index f7bc84814d..850b7eec00 100644 --- a/autonomi/tests/file.rs +++ b/autonomi/tests/fs.rs @@ -21,7 +21,6 @@ use walkdir::WalkDir; // With a local evm network, and local network, run: // EVM_NETWORK=local cargo test --features="fs,local" --package autonomi --test file -#[cfg(feature = "fs")] #[tokio::test] async fn dir_upload_download() -> Result<()> { let _log_appender_guard = From 4e8b07c4efb27e7878608fb4e6830133aa5431c8 Mon Sep 17 00:00:00 2001 From: XiaoBei <1505929057@qq.com> Date: Fri, 4 Oct 2024 16:45:06 +0800 Subject: [PATCH 154/255] docs: update README.md --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index ee5f1573bd..60f5cd84b3 100644 --- a/README.md +++ b/README.md @@ -181,7 +181,7 @@ cargo run --example registers --features=local -- --user alice --reg-nickname my ``` Alice can now write a message to the register and see anything written by anyone else. For example -she might enter the text "hello, who's there?" which is written to the register and then shown as +she might enter the text "Hello, who's there?" which is written to the register and then shown as the "Latest value", in her terminal: ``` @@ -195,15 +195,15 @@ Latest value (more than one if concurrent writes were made): -------------- Enter a blank line to receive updates, or some text to be written. -hello, who's there? -Writing msg (offline) to Register: 'hello, who's there?' +Hello, who's there? +Writing msg (offline) to Register: 'Hello, who's there?' Syncing with SAFE in 2s... synced! Current total number of items in Register: 1 Latest value (more than one if concurrent writes were made): -------------- -[alice]: hello, who's there? +[Alice]: Hello, who's there? -------------- Enter a blank line to receive updates, or some text to be written. @@ -212,7 +212,7 @@ Enter a blank line to receive updates, or some text to be written. For anyone else to write to the same register they need to know its xor address, so to communicate with her friend Bob, Alice needs to find a way to send it to Bob. In her terminal, this is the -value starting "50f4..." in the output above. This value it will be different each time you run the +value starting "50f4..." in the output above. This value will be different each time you run the example to create a register. Having received the xor address, in another terminal Bob can access the same register to see the @@ -232,7 +232,7 @@ Here's Bob writing from his terminal: ``` Latest value (more than one if concurrent writes were made): -------------- -[alice]: hello, who's there? +[Alice]: Hello, who's there? -------------- Enter a blank line to receive updates, or some text to be written. @@ -367,7 +367,7 @@ Listening to royalty payment events: ``` $ cargo run --bin safenode_rpc_client -- 127.0.0.1:34416 transfers -Listening to transfers notifications... (press Ctrl+C to exit) +Listening to transfer notifications... (press Ctrl+C to exit) New transfer notification received for PublicKey(0c54..5952), containing 1 cash note/s. CashNote received with UniquePubkey(PublicKey(19ee..1580)), value: 0.000000001 @@ -380,7 +380,7 @@ The `transfers` command can provide a path for royalty payment cash notes: ``` $ cargo run --release --bin=safenode_rpc_client -- 127.0.0.1:34416 transfers ./royalties-cash-notes -Listening to transfers notifications... (press Ctrl+C to exit) +Listening to transfer notifications... (press Ctrl+C to exit) Writing cash notes to: ./royalties-cash-notes ``` From 67549f4349e3bdac821f3ffe5067c188dec0ffe0 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 11 Oct 2024 02:11:16 +0530 Subject: [PATCH 155/255] fix(test): use separate wallets to prevent payment failures --- evmlib/src/contract/network_token.rs | 52 ++++++++++++++++----------- sn_node/tests/common/client.rs | 53 ++++++++++++++++++++-------- sn_node/tests/data_with_churn.rs | 15 +++++--- test_utils/src/evm.rs | 21 ++++++++--- 4 files changed, 97 insertions(+), 44 deletions(-) diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 361c87e340..be9d2bca7f 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -54,6 +54,7 @@ where /// Get the raw token balance of an address. pub async fn balance_of(&self, account: Address) -> Result { + debug!("Getting balance of account: {account:?}"); let balance = self .contract .balanceOf(account) @@ -61,37 +62,48 @@ where .await .inspect_err(|err| error!("Error getting balance of account: {err:?}"))? ._0; + debug!("Balance of account: {account} is {balance}"); Ok(balance) } /// Approve spender to spend a raw amount of tokens. pub async fn approve(&self, spender: Address, value: U256) -> Result { - let tx_hash = self - .contract - .approve(spender, value) - .send() - .await - .inspect_err(|err| { - error!("Error approving spender to spend raw amt of tokens: {err:?}") - })? - .watch() - .await - .inspect_err(|err| error!("Error watching approve tx: {err:?}"))?; + debug!("Approving spender to spend raw amt of tokens: {value}"); + let call = self.contract.approve(spender, value); + let pending_tx_builder = call.send().await.inspect_err(|err| { + error!( + "Error approving spender {spender:?} to spend raw amt of tokens {value}: {err:?}" + ) + })?; + + let pending_tx_hash = *pending_tx_builder.tx_hash(); + debug!("The approval from sender {spender:?} is pending with tx_hash: {pending_tx_hash:?}",); + let tx_hash = pending_tx_builder.watch().await.inspect_err(|err| { + error!("Error watching approve tx with hash {pending_tx_hash:?}: {err:?}") + })?; + + debug!("Approve tx with hash {tx_hash:?} is successful"); Ok(tx_hash) } /// Transfer a raw amount of tokens. pub async fn transfer(&self, receiver: Address, amount: U256) -> Result { - let tx_hash = self - .contract - .transfer(receiver, amount) - .send() - .await - .inspect_err(|err| error!("Error transferring raw amt of tokens: {err:?}"))? - .watch() - .await - .inspect_err(|err| error!("Error watching transfer tx: {err:?}"))?; + debug!("Transferring raw amt of tokens: {amount} to {receiver:?}"); + let call = self.contract.transfer(receiver, amount); + let pending_tx_builder = call.send().await.inspect_err(|err| { + error!("Error transferring raw amt of tokens to {receiver:?}: {err:?}") + })?; + + let pending_tx_hash = *pending_tx_builder.tx_hash(); + debug!( + "The transfer to receiver {receiver:?} is pending with tx_hash: {pending_tx_hash:?}" + ); + let tx_hash = pending_tx_builder.watch().await.inspect_err(|err| { + error!("Error watching transfer tx with hash {pending_tx_hash:?}: {err:?}") + })?; + + debug!("Transfer tx with hash {tx_hash:?} is successful"); Ok(tx_hash) } diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index 106a13567e..513fc46a95 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -7,10 +7,14 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::Client; +use evmlib::wallet::Wallet; use eyre::Result; +use sn_evm::Amount; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; use sn_service_management::{get_local_node_registry_path, NodeRegistry}; +use std::str::FromStr; use std::{net::SocketAddr, path::Path}; +use test_utils::evm::get_new_wallet; use test_utils::testnet::DeploymentInventory; use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::sync::Mutex; @@ -35,7 +39,7 @@ const LOAD_FAUCET_WALLET_RETRIES: usize = 6; // mutex to restrict access to faucet wallet from concurrent tests static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); -pub async fn get_client_and_funded_wallet() -> (Client, evmlib::wallet::Wallet) { +pub async fn get_client_and_funded_wallet() -> (Client, Wallet) { match DeploymentInventory::load() { Ok(_inventory) => { todo!("Not implemented yet for WanNetwork"); @@ -107,20 +111,17 @@ pub fn get_all_rpc_addresses(_skip_genesis_for_droplet: bool) -> Result Result { -// match DeploymentInventory::load() { -// Ok(inventory) => { -// Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await -// } -// Err(_) => NonDroplet::get_funded_wallet(client, to_wallet_dir, false).await, -// } -// } +/// Transfer tokens from the provided wallet to a newly created wallet +/// Returns the newly created wallet +pub async fn transfer_to_new_wallet(from: &Wallet, amount: usize) -> Result { + match DeploymentInventory::load() { + Ok(_inventory) => { + todo!("Not implemented yet for WanNetwork"); + // Droplet::get_funded_wallet(client, to_wallet_dir, inventory.faucet_address, false).await + } + Err(_) => LocalNetwork::transfer_to_new_wallet(from, amount).await, + } +} pub struct LocalNetwork; impl LocalNetwork { @@ -139,6 +140,28 @@ impl LocalNetwork { get_funded_wallet() } + /// Transfer tokens from the provided wallet to a newly created wallet + /// Returns the newly created wallet + async fn transfer_to_new_wallet(from: &Wallet, amount: usize) -> Result { + let wallet_balance = from.balance_of_tokens().await?; + let gas_balance = from.balance_of_gas_tokens().await?; + + debug!("Wallet balance: {wallet_balance}, Gas balance: {gas_balance}"); + + let new_wallet = get_new_wallet()?; + + from.transfer_tokens(new_wallet.address(), Amount::from(amount)) + .await?; + + from.transfer_gas_tokens( + new_wallet.address(), + Amount::from_str("10000000000000000000")?, + ) + .await?; + + Ok(new_wallet) + } + // Restart a local node by sending in the SafenodeRpcCmd::Restart to the node's RPC endpoint. pub async fn restart_node(rpc_endpoint: SocketAddr, retain_peer_id: bool) -> Result<()> { let mut rpc_client = get_safenode_rpc_client(rpc_endpoint).await?; diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 51a2a32803..dcc4e077ae 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -13,6 +13,7 @@ use crate::common::{ NodeRestart, }; use autonomi::{Client, Wallet}; +use common::client::transfer_to_new_wallet; use eyre::{bail, ErrReport, Result}; use rand::Rng; use self_encryption::MAX_CHUNK_SIZE; @@ -31,6 +32,8 @@ use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; use tracing::{debug, error, info, trace, warn}; use xor_name::XorName; +const TOKENS_TO_TRANSFER: usize = 10000000; + const EXTRA_CHURN_COUNT: u32 = 5; const CHURN_CYCLES: u32 = 2; const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; @@ -108,11 +111,11 @@ async fn data_availability_during_churn() -> Result<()> { if chunks_only { " (Chunks only)" } else { "" } ); - let (client, wallet) = get_client_and_funded_wallet().await; + let (client, main_wallet) = get_client_and_funded_wallet().await; info!( - "Client and wallet created. Wallet address: {:?}", - wallet.address() + "Client and wallet created. Main wallet address: {:?}", + main_wallet.address() ); // Shared bucket where we keep track of content created/stored on the network @@ -121,9 +124,10 @@ async fn data_availability_during_churn() -> Result<()> { // Spawn a task to create Registers and CashNotes at random locations, // at a higher frequency than the churning events let create_register_handle = if !chunks_only { + let register_wallet = transfer_to_new_wallet(&main_wallet, TOKENS_TO_TRANSFER).await?; let create_register_handle = create_registers_task( client.clone(), - wallet.clone(), + register_wallet, Arc::clone(&content), churn_period, ); @@ -135,10 +139,11 @@ async fn data_availability_during_churn() -> Result<()> { println!("Uploading some chunks before carry out node churning"); info!("Uploading some chunks before carry out node churning"); + let chunk_wallet = transfer_to_new_wallet(&main_wallet, TOKENS_TO_TRANSFER).await?; // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events let store_chunks_handle = store_chunks_task( client.clone(), - wallet.clone(), + chunk_wallet, Arc::clone(&content), churn_period, ); diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 2c85c0d90a..b7443df991 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -6,10 +6,14 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use evmlib::{utils::network_from_env, Network}; +use color_eyre::{ + eyre::{bail, Context}, + Result, +}; +use evmlib::{utils::network_from_env, wallet::Wallet, Network}; use std::env; -pub fn get_funded_wallet() -> evmlib::wallet::Wallet { +pub fn get_funded_wallet() -> Wallet { let network = network_from_env().expect("Failed to get EVM network from environment variables"); if matches!(network, Network::ArbitrumOne) { panic!("You're trying to use ArbitrumOne network. Use a custom network for testing."); @@ -20,6 +24,15 @@ pub fn get_funded_wallet() -> evmlib::wallet::Wallet { let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); - evmlib::wallet::Wallet::new_from_private_key(network, &private_key) - .expect("Invalid private key") + Wallet::new_from_private_key(network, &private_key).expect("Invalid private key") +} + +pub fn get_new_wallet() -> Result { + let network = + network_from_env().wrap_err("Failed to get EVM network from environment variables")?; + if matches!(network, Network::ArbitrumOne) { + bail!("You're trying to use ArbitrumOne network. Use a custom network for testing."); + } + + Ok(Wallet::new_with_random_wallet(network)) } From b532cc43e0a61e425e0733c6f1b8197adbfba42c Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 11 Oct 2024 02:12:56 +0530 Subject: [PATCH 156/255] fix(ci): re enable churn test --- .github/workflows/merge.yml | 233 ++++++++++++++++++------------------ 1 file changed, 116 insertions(+), 117 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 1e22700f58..fd11bce681 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -556,135 +556,134 @@ jobs: # log_file_prefix: safe_test_logs_token_distribution # platform: ${{ matrix.os }} - # churn: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: Network churning tests - # runs-on: ${{ matrix.os }} - # strategy: - # matrix: - # include: - # - os: ubuntu-latest - # node_data_path: /home/runner/.local/share/safe/node - # safe_path: /home/runner/.local/share/safe - # - os: windows-latest - # node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node - # safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe - # - os: macos-latest - # node_data_path: /Users/runner/Library/Application Support/safe/node - # safe_path: /Users/runner/Library/Application Support/safe - # steps: - # - uses: actions/checkout@v4 - - # - uses: dtolnay/rust-toolchain@stable + churn: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: Network churning tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: ubuntu-latest + node_data_path: /home/runner/.local/share/safe/node + safe_path: /home/runner/.local/share/safe + - os: windows-latest + node_data_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe\\node + safe_path: C:\\Users\\runneradmin\\AppData\\Roaming\\safe + - os: macos-latest + node_data_path: /Users/runner/Library/Application Support/safe/node + safe_path: /Users/runner/Library/Application Support/safe + steps: + - uses: actions/checkout@v4 - # - uses: Swatinem/rust-cache@v2 + - uses: dtolnay/rust-toolchain@stable - # - name: Build binaries - # run: cargo build --release --features local --bin safenode - # timeout-minutes: 30 + - uses: Swatinem/rust-cache@v2 - # - name: Build faucet binaries - # run: cargo build --release --features="local,gifting" --bin faucet - # timeout-minutes: 30 + - name: Build binaries + run: cargo build --release --features local --bin safenode + timeout-minutes: 30 - # - name: Build churn tests - # run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run - # env: - # # only set the target dir for windows to bypass the linker issue. - # # happens if we build the node manager via testnet action - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 30 + - name: Build churn tests + run: cargo test --release -p sn_node --features=local --test data_with_churn --no-run + env: + # only set the target dir for windows to bypass the linker issue. + # happens if we build the node manager via testnet action + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 30 - # - name: Start a local network - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: start - # interval: 2000 - # node-path: target/release/safenode - # faucet-path: target/release/faucet - # platform: ${{ matrix.os }} - # build: true + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: start + enable-evm-testnet: true + node-path: target/release/safenode + platform: ${{ matrix.os }} + build: true - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + - name: Check if SAFE_PEERS and EVM_NETWORK are set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + elif [[ -z "$EVM_NETWORK" ]]; then + echo "The EVM_NETWORK variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + echo "EVM_NETWORK has been set to $EVM_NETWORK" + fi - # - name: Chunks data integrity during nodes churn - # run: cargo test --release -p sn_node --features="local" --test data_with_churn -- --nocapture - # env: - # TEST_DURATION_MINS: 5 - # TEST_TOTAL_CHURN_CYCLES: 15 - # SN_LOG: "all" - # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - # timeout-minutes: 30 + - name: Chunks data integrity during nodes churn + run: cargo test --release -p sn_node --features=local --test data_with_churn -- --nocapture + env: + TEST_DURATION_MINS: 5 + TEST_TOTAL_CHURN_CYCLES: 15 + SN_LOG: "all" + CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + timeout-minutes: 30 - # - name: Stop the local network and upload logs - # if: always() - # uses: maidsafe/sn-local-testnet-action@main - # with: - # action: stop - # log_file_prefix: safe_test_logs_churn - # platform: ${{ matrix.os }} + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@evm-dev + with: + action: stop + log_file_prefix: safe_test_logs_churn + platform: ${{ matrix.os }} - # - name: Verify restart of nodes using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of restarts - # # TODO: make this use an env var, or relate to testnet size - # run: | - # restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Restart $restart_count nodes" - # peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "PeerRemovedFromRoutingTable $peer_removed times" - # if [ $peer_removed -lt $restart_count ]; then - # echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" - # exit 1 - # fi - # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - # echo "Node dir count is $node_count" + - name: Verify restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restart $restart_count nodes" + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "PeerRemovedFromRoutingTable $peer_removed times" + if [ $peer_removed -lt $restart_count ]; then + echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" + exit 1 + fi + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" - # # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here - # # if [ $restart_count -lt $node_count ]; then - # # echo "Restart count of: $restart_count is less than the node count of: $node_count" - # # exit 1 - # # fi + # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here + # if [ $restart_count -lt $node_count ]; then + # echo "Restart count of: $restart_count is less than the node count of: $node_count" + # exit 1 + # fi - # - name: Verify data replication using rg - # shell: bash - # timeout-minutes: 1 - # # get the counts, then the specific line, and then the digit count only - # # then check we have an expected level of replication - # # TODO: make this use an env var, or relate to testnet size - # run: | - # fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.node_data_path }}" -c --stats | \ - # rg "(\d+) matches" | rg "\d+" -o) - # echo "Carried out $fetching_attempt_count fetching attempts" - # node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - # if [ $fetching_attempt_count -lt $node_count ]; then - # echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" - # exit 1 - # fi + - name: Verify data replication using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of replication + # TODO: make this use an env var, or relate to testnet size + run: | + fetching_attempt_count=$(rg "FetchingKeysForReplication" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Carried out $fetching_attempt_count fetching attempts" + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + if [ $fetching_attempt_count -lt $node_count ]; then + echo "Replication fetching attempts of: $fetching_attempt_count is less than the node count of: $node_count" + exit 1 + fi - # # Only error out after uploading the logs - # - name: Don't log raw data - # if: matrix.os != 'windows-latest' # causes error - # shell: bash - # timeout-minutes: 10 - # run: | - # if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' - # then - # echo "We are logging an extremely large data" - # exit 1 - # fi + # Only error out after uploading the logs + - name: Don't log raw data + if: matrix.os != 'windows-latest' # causes error + shell: bash + timeout-minutes: 10 + run: | + if ! rg '^' "${{ matrix.safe_path }}"/*/*/logs | awk 'length($0) > 15000 { print; exit 1 }' + then + echo "We are logging an extremely large data" + exit 1 + fi verify_data_location_routing_table: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" From 8af00bb5d2a983810eeaf608b91378f07612b9dc Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 11 Oct 2024 17:25:59 +0530 Subject: [PATCH 157/255] fix(test): retry during PUT to fix failures due to node restarts --- sn_node/tests/data_with_churn.rs | 47 ++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index dcc4e077ae..c372fc0331 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -320,21 +320,40 @@ fn store_chunks_task( loop { let random_data = gen_random_data(*DATA_SIZE); - let data_map = client - .data_put(random_data, &wallet) - .await - .inspect_err(|err| { - println!("Error to put chunk: {err:?}"); - error!("Error to put chunk: {err:?}") - })?; - - println!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); - info!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + // FIXME: The client does not have the retry repay to different payee feature yet. + // Retry here for now + let mut retries = 1; + loop { + match client + .data_put(random_data.clone(), &wallet) + .await + .inspect_err(|err| { + println!("Error to put chunk: {err:?}"); + error!("Error to put chunk: {err:?}") + }) { + Ok(data_map) => { + println!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + info!("Stored Chunk/s at {data_map:?} after a delay of: {delay:?}"); + + content + .write() + .await + .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(data_map))); + break; + } + Err(err) => { + println!("Failed to store chunk: {err:?}. Retrying ..."); + error!("Failed to store chunk: {err:?}. Retrying ..."); + if retries >= 3 { + println!("Failed to store chunk after 3 retries: {err}"); + error!("Failed to store chunk after 3 retries: {err}"); + bail!("Failed to store chunk after 3 retries: {err}"); + } + retries += 1; + } + } + } - content - .write() - .await - .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(data_map))); sleep(delay).await; } }); From 67bdf52f872beaef3cca15493eb382d72f161e0d Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 4 Oct 2024 17:29:50 +0200 Subject: [PATCH 158/255] feat(autonomi): bindings for js --- Cargo.lock | 40 ++++++++++++++++++++++ autonomi/Cargo.toml | 9 ++++- autonomi/index.html | 33 +++++++++++++++++++ autonomi/src/client/mod.rs | 3 ++ autonomi/src/client/wasm.rs | 66 +++++++++++++++++++++++++++++++++++++ 5 files changed, 150 insertions(+), 1 deletion(-) create mode 100644 autonomi/index.html create mode 100644 autonomi/src/client/wasm.rs diff --git a/Cargo.lock b/Cargo.lock index 67a4c6df2e..fa1892468f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -862,6 +862,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "ascii" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" + [[package]] name = "asn1-rs" version = "0.6.2" @@ -1081,6 +1087,7 @@ dependencies = [ "rmp-serde", "self_encryption", "serde", + "serde-wasm-bindgen", "sha2 0.10.8", "sn_bls_ckd", "sn_curv", @@ -1092,11 +1099,14 @@ dependencies = [ "sn_registers", "test_utils", "thiserror", + "tiny_http", "tokio", "tracing", "tracing-subscriber", "tracing-web", "walkdir", + "wasm-bindgen", + "wasm-bindgen-futures", "wasm-bindgen-test", "xor_name", ] @@ -1676,6 +1686,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "chunked_transfer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" + [[package]] name = "ciborium" version = "0.2.2" @@ -7734,6 +7750,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-wasm-bindgen" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8302e169f0eddcc139c70f139d19d6467353af16f9fce27e8c30158036a1e16b" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + [[package]] name = "serde_bytes" version = "0.11.15" @@ -8823,6 +8850,19 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tiny_http" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0d6ef4e10d23c1efb862eecad25c5054429a71958b4eeef85eb5e7170b477ca" +dependencies = [ + "ascii", + "chunked_transfer", + "log", + "time", + "url", +] + [[package]] name = "tinytemplate" version = "1.2.1" diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index aa5e463ac6..149acf987b 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://maidsafe.net" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" +[lib] +crate-type = ["cdylib", "rlib"] + [features] default = ["data"] full = ["data", "registers", "vault"] @@ -45,16 +48,20 @@ tracing = { version = "~0.1.26" } walkdir = "2.5.0" xor_name = "5.0.0" futures = "0.3.30" +wasm-bindgen = "0.2.93" +wasm-bindgen-futures = "0.4.43" +serde-wasm-bindgen = "0.6.5" [dev-dependencies] eyre = "0.6.5" sha2 = "0.10.6" sn_logging = { path = "../sn_logging", version = "0.2.33" } -tracing-subscriber = { version = "0.3", features = ["env-filter"] } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } +tiny_http = "0.11" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/autonomi/index.html b/autonomi/index.html new file mode 100644 index 0000000000..86c7808307 --- /dev/null +++ b/autonomi/index.html @@ -0,0 +1,33 @@ + + + + + + + + + diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 57e165f2a6..a0a691eaa8 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -19,6 +19,9 @@ pub mod registers; #[cfg(feature = "vault")] pub mod vault; +#[cfg(target_arch = "wasm32")] +pub mod wasm; + // private module with utility functions mod utils; diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs new file mode 100644 index 0000000000..f757c08a98 --- /dev/null +++ b/autonomi/src/client/wasm.rs @@ -0,0 +1,66 @@ +use libp2p::Multiaddr; +use wasm_bindgen::prelude::*; + +#[wasm_bindgen] +pub struct Client(super::Client); + +#[wasm_bindgen] +impl Client { + #[wasm_bindgen(constructor)] + pub async fn connect(peers: Vec) -> Result { + let peers = peers + .into_iter() + .map(|peer| peer.parse()) + .collect::, _>>() + // .map_err(|err| serde_wasm_bindgen::to_value(&err).unwrap()); + .map_err(|_err| JsValue::NULL)?; + + let client = super::Client::connect(&peers) + .await + .map_err(|err| serde_wasm_bindgen::to_value(&err).expect("serialization to succeed"))?; + + Ok(Client(client)) + } + + #[wasm_bindgen] + pub async fn put(&self, data: Vec, wallet: Wallet) -> Result, JsValue> { + let data = crate::Bytes::from(data); + self.0 + .put(data, &wallet.0) + .await + .map_err(|err| serde_wasm_bindgen::to_value(&err).expect("serialization to succeed")) + .map(|xorname| xorname.to_vec()) + } +} + +#[wasm_bindgen] +pub struct Wallet(evmlib::wallet::Wallet); + +#[wasm_bindgen(js_name = getFundedWallet)] +pub fn funded_wallet() -> Wallet { + let network = evmlib::utils::evm_network_from_env().expect("network init from env"); + + let private_key = option_env!("EVM_PRIVATE_KEY") + .unwrap_or_else(|| "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"); + + Wallet( + evmlib::wallet::Wallet::new_from_private_key(network, private_key) + .expect("Invalid private key"), + ) +} + +#[wasm_bindgen(js_name = logInit)] +pub fn log_init(directive: String) { + use tracing_subscriber::prelude::*; + + console_error_panic_hook::set_once(); + + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(false) // Only partially supported across browsers + .without_time() // std::time is not available in browsers + .with_writer(tracing_web::MakeWebConsoleWriter::new()); // write events to the console + tracing_subscriber::registry() + .with(fmt_layer) + .with(tracing_subscriber::EnvFilter::new(directive)) + .init(); +} From e77e4174f1d223464cf34f2b39b3f6f67b1e02ab Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 7 Oct 2024 17:10:29 +0200 Subject: [PATCH 159/255] fix(autonomi): use proper dep --- autonomi/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 149acf987b..875b341ffe 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -69,6 +69,7 @@ console_error_panic_hook = "0.1.7" evmlib = { path = "../evmlib", version = "0.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-web = "0.1.3" [lints] From e024c1b954b8b43fd23c401243c8f5c3ba9775fb Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Mon, 7 Oct 2024 17:12:12 +0200 Subject: [PATCH 160/255] refactor(autonomi): error return; camel case --- autonomi/index.html | 4 ++-- autonomi/src/client/wasm.rs | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/autonomi/index.html b/autonomi/index.html index 86c7808307..42c57359e3 100644 --- a/autonomi/index.html +++ b/autonomi/index.html @@ -5,12 +5,12 @@ + + + diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index 3a609b5685..cc9af3a267 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -165,7 +165,7 @@ impl Client { /// Get the estimated cost of storing a piece of data. pub async fn data_cost(&self, data: Bytes) -> Result { - let now = std::time::Instant::now(); + let now = sn_networking::target_arch::Instant::now(); let (data_map_chunk, chunks) = encrypt(data)?; debug!("Encryption took: {:.2?}", now.elapsed()); diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index a085ee2da9..b91414b3a6 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -4,6 +4,29 @@ use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Client(super::Client); +#[wasm_bindgen] +pub struct ChunkAddr(xor_name::XorName); + +#[wasm_bindgen] +pub struct DataAddr(xor_name::XorName); +#[wasm_bindgen] +impl DataAddr { + #[wasm_bindgen(js_name = toString)] + pub fn to_string(&self) -> String { + crate::client::address::xorname_to_str(self.0) + } +} + +#[wasm_bindgen] +pub struct AttoTokens(sn_evm::AttoTokens); +#[wasm_bindgen] +impl AttoTokens { + #[wasm_bindgen(js_name = toString)] + pub fn to_string(&self) -> String { + self.0.to_string() + } +} + #[wasm_bindgen] impl Client { #[wasm_bindgen(constructor)] @@ -18,20 +41,44 @@ impl Client { Ok(Client(client)) } + #[wasm_bindgen(js_name = putChunk)] + pub async fn put_chunk(&self, _data: Vec, _wallet: Wallet) -> Result { + unimplemented!() + } + + #[wasm_bindgen(js_name = getChunk)] + pub async fn get_chunk(&self, addr: ChunkAddr) -> Result, JsError> { + let chunk = self.0.fetch_chunk(addr.0).await?; + Ok(chunk.value().to_vec()) + } + + #[wasm_bindgen(js_name = putData)] + pub async fn put_data(&self, data: Vec, wallet: Wallet) -> Result { + let data = crate::Bytes::from(data); + let xorname = self.0.put(data, &wallet.0).await?; + Ok(DataAddr(xorname)) + } + + #[wasm_bindgen(js_name = getData)] + pub async fn get_data(&self, addr: DataAddr) -> Result, JsError> { + let data = self.0.get(addr.0).await?; + Ok(data.to_vec()) + } + #[wasm_bindgen] - pub async fn put(&self, data: Vec, wallet: Wallet) -> Result, JsValue> { + pub async fn cost(&self, data: Vec) -> Result { let data = crate::Bytes::from(data); - self.0 - .put(data, &wallet.0) - .await - .map_err(|err| serde_wasm_bindgen::to_value(&err).expect("serialization to succeed")) - .map(|xorname| xorname.to_vec()) + let cost = self.0.cost(data).await.map_err(|e| JsError::from(e))?; + + Ok(AttoTokens(cost)) } } #[wasm_bindgen] pub struct Wallet(evmlib::wallet::Wallet); +/// Get a funded wallet for testing. This either uses a default private key or the `EVM_PRIVATE_KEY` +/// environment variable that was used during the build process of this library. #[wasm_bindgen(js_name = getFundedWallet)] pub fn funded_wallet() -> Wallet { let network = evmlib::utils::evm_network_from_env().expect("network init from env"); diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 4ee7753fde..e7d79b6e0f 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -33,5 +33,8 @@ tempfile = "3.10.1" [dev-dependencies] tokio = { version = "1.32.0", features = ["macros", "rt"] } +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasmtimer = { version = "0.2.0", features = ["serde"] } + [lints] workspace = true diff --git a/sn_evm/src/data_payments.rs b/sn_evm/src/data_payments.rs index 7ac835bbd1..688d11b621 100644 --- a/sn_evm/src/data_payments.rs +++ b/sn_evm/src/data_payments.rs @@ -14,7 +14,10 @@ use evmlib::{ }; use libp2p::{identity::PublicKey, PeerId}; use serde::{Deserialize, Serialize}; -use std::time::SystemTime; +#[cfg(not(target_arch = "wasm32"))] +pub use std::time::SystemTime; +#[cfg(target_arch = "wasm32")] +pub use wasmtimer::std::SystemTime; use xor_name::XorName; /// The time in seconds that a quote is valid for @@ -24,7 +27,7 @@ pub const QUOTE_EXPIRATION_SECS: u64 = 3600; const LIVE_TIME_MARGIN: u64 = 10; /// The proof of payment for a data payment -#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] pub struct ProofOfPayment { /// The Quote we're paying for pub quote: PaymentQuote, @@ -76,9 +79,7 @@ impl Default for QuotingMetrics { /// A payment quote to store data given by a node to a client /// Note that the PaymentQuote is a contract between the node and itself to make sure the clients aren’t mispaying. /// It is NOT a contract between the client and the node. -#[derive( - Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize, custom_debug::Debug, -)] +#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, custom_debug::Debug)] pub struct PaymentQuote { /// the content paid for pub content: XorName, @@ -199,7 +200,7 @@ impl PaymentQuote { /// Returns true) if the quote has not yet expired pub fn has_expired(&self) -> bool { - let now = std::time::SystemTime::now(); + let now = SystemTime::now(); let dur_s = match now.duration_since(self.timestamp) { Ok(dur) => dur.as_secs(), From 5fdb8b4e8d58fc6f0cf0598cf3845867c4d24eac Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 11 Oct 2024 08:33:11 +0200 Subject: [PATCH 163/255] chore(global): update wasmtimer and alloy --- Cargo.lock | 611 +++++++++++++++++++++++++---------------------------- Cargo.toml | 4 - 2 files changed, 288 insertions(+), 327 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 581331da98..14c241bde9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -119,7 +119,8 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "056f2c01b2aed86e15b43c47d109bfc8b82553dc34e66452875e51247ec31ab2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -140,9 +141,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.36" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c225801d42099570d0674701dddd4142f0ef715282aeb5985042e2ec962df7" +checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" dependencies = [ "num_enum", "strum", @@ -151,7 +152,8 @@ dependencies = [ [[package]] name = "alloy-consensus" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -166,7 +168,8 @@ dependencies = [ [[package]] name = "alloy-contract" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917f7d12cf3971dc8c11c9972f732b35ccb9aaaf5f28f2f87e9e6523bee3a8ad" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -184,9 +187,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb750349efda145ca6aada68d0336067f7f364d7d44ef09e2cf000b040c5e99" +checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -197,9 +200,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95d76a38cae906fd394a5afb0736aaceee5432efe76addfd71048e623e208af" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -237,7 +240,8 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -254,7 +258,8 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" dependencies = [ "alloy-primitives", "alloy-serde", @@ -263,9 +268,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c66eec1acdd96b39b995b8f5ee5239bc0c871d62c527ae1ac9fd1d7fecd455" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -276,7 +281,8 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -289,7 +295,8 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,7 +316,8 @@ dependencies = [ [[package]] name = "alloy-network-primitives" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -321,7 +329,8 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -336,19 +345,18 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb848c43f6b06ae3de2e4a67496cbbabd78ae87db0f1248934f15d76192c6a" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", "derive_more", - "foldhash", - "hashbrown 0.15.0", + "hashbrown 0.14.5", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.5.0", "itoa", "k256", "keccak-asm", @@ -365,7 +373,8 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" dependencies = [ "alloy-chains", "alloy-consensus", @@ -378,7 +387,6 @@ dependencies = [ "alloy-rpc-client", "alloy-rpc-types-anvil", "alloy-rpc-types-eth", - "alloy-signer", "alloy-signer-local", "alloy-transport", "alloy-transport-http", @@ -389,17 +397,14 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", - "parking_lot", "pin-project", - "reqwest 0.12.8", - "schnellru", + "reqwest 0.12.7", "serde", "serde_json", "thiserror", "tokio", "tracing", "url", - "wasmtimer", ] [[package]] @@ -421,13 +426,14 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "alloy-rpc-client" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -435,7 +441,7 @@ dependencies = [ "alloy-transport-http", "futures", "pin-project", - "reqwest 0.12.8", + "reqwest 0.12.7", "serde", "serde_json", "tokio", @@ -443,13 +449,13 @@ dependencies = [ "tower 0.5.1", "tracing", "url", - "wasmtimer", ] [[package]] name = "alloy-rpc-types" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -461,7 +467,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" dependencies = [ "alloy-primitives", "alloy-serde", @@ -471,7 +478,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" dependencies = [ "alloy-consensus", "alloy-eips", @@ -489,7 +497,8 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" dependencies = [ "alloy-primitives", "serde", @@ -499,7 +508,8 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" dependencies = [ "alloy-primitives", "async-trait", @@ -512,7 +522,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" dependencies = [ "alloy-consensus", "alloy-network", @@ -526,42 +537,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "661c516eb1fa3294cc7f2fb8955b3b609d639c282ac81a4eedb14d3046db503a" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecbabb8fc3d75a0c2cea5215be22e7a267e3efde835b0f2a8922f5e3f5d47683" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.6.0", + "indexmap 2.5.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16517f2af03064485150d89746b8ffdcdbc9b6eeb3d536fb66efd7c2846fbc75" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" dependencies = [ "alloy-json-abi", "const-hex", @@ -570,15 +581,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.79", + "syn 2.0.77", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c07ebb0c1674ff8cbb08378d7c2e0e27919d2a2dae07ad3bca26174deda8d389" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" dependencies = [ "serde", "winnow", @@ -586,9 +597,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e448d879903624863f608c552d10efb0e0905ddbee98b0049412799911eb062" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -600,7 +611,8 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -614,17 +626,17 @@ dependencies = [ "tracing", "url", "wasm-bindgen-futures", - "wasmtimer", ] [[package]] name = "alloy-transport-http" version = "0.4.2" -source = "git+https://github.com/alloy-rs/alloy.git?branch=main#3f5f1e5de21552ed875ffdc16fb4d5db9d1ba0e8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.8", + "reqwest 0.12.7", "serde_json", "tower 0.5.1", "tracing", @@ -879,7 +891,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "synstructure", ] @@ -891,7 +903,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -963,9 +975,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -974,24 +986,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1037,7 +1049,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1046,14 +1058,14 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" @@ -1252,9 +1264,9 @@ dependencies = [ [[package]] name = "bip39" -version = "2.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33415e24172c1b7d6066f6d999545375ab8e1d95421d6784bdfff9496f292387" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", "serde", @@ -1276,21 +1288,11 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - [[package]] name = "bitcoin_hashes" -version = "0.13.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals", - "hex-conservative", -] +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" [[package]] name = "bitflags" @@ -1469,7 +1471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.7", "serde", ] @@ -1621,9 +1623,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.28" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1720,9 +1722,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", "clap_derive", @@ -1730,9 +1732,9 @@ dependencies = [ [[package]] name = "clap-verbosity-flag" -version = "2.2.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e099138e1807662ff75e2cebe4ae2287add879245574489f9b1588eb5e5564ed" +checksum = "63d19864d6b68464c59f7162c9914a0b569ddc2926b4a2d71afe62a9738eff53" dependencies = [ "clap", "log", @@ -1740,9 +1742,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstream", "anstyle", @@ -1750,19 +1752,19 @@ dependencies = [ "strsim", "terminal_size", "unicase", - "unicode-width 0.2.0", + "unicode-width", ] [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -1882,7 +1884,7 @@ dependencies = [ "encode_unicode", "lazy_static", "libc", - "unicode-width 0.1.14", + "unicode-width", "windows-sys 0.52.0", ] @@ -1898,9 +1900,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" dependencies = [ "cfg-if", "cpufeatures", @@ -2222,7 +2224,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2244,7 +2246,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "synstructure", ] @@ -2269,7 +2271,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2280,7 +2282,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2414,7 +2416,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "unicode-xid", ] @@ -2530,7 +2532,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2686,7 +2688,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -2918,9 +2920,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", "miniz_oxide 0.8.0", @@ -2941,12 +2943,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foldhash" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2992,9 +2988,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -3017,9 +3013,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -3027,15 +3023,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -3045,9 +3041,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -3061,13 +3057,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -3077,21 +3073,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.14", + "rustls 0.23.13", "rustls-pki-types", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -3116,9 +3112,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -3470,7 +3466,7 @@ checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -3720,8 +3716,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -3783,7 +3779,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.6.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util 0.7.12", @@ -3820,17 +3816,6 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", -] - -[[package]] -name = "hashbrown" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash", "serde", ] @@ -3909,12 +3894,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-conservative" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" - [[package]] name = "hex-literal" version = "0.4.1" @@ -4079,9 +4058,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -4172,7 +4151,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.14", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -4194,9 +4173,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -4207,6 +4186,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] @@ -4318,7 +4298,7 @@ dependencies = [ "globset", "log", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.7", "same-file", "walkdir", "winapi-util", @@ -4356,18 +4336,18 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "hashbrown 0.12.3", ] [[package]] name = "indexmap" -version = "2.6.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.14.5", "serde", ] @@ -4382,7 +4362,7 @@ dependencies = [ "number_prefix", "portable-atomic", "tokio", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -4392,7 +4372,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.5.0", "is-terminal", "itoa", "log", @@ -4420,7 +4400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -4449,9 +4429,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is-terminal" @@ -4514,9 +4494,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.71" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -4572,9 +4552,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libm" @@ -5045,7 +5025,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.17.8", - "rustls 0.23.14", + "rustls 0.23.13", "socket2", "thiserror", "tokio", @@ -5156,7 +5136,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5188,7 +5168,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.14", + "rustls 0.23.13", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -5294,7 +5274,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "scopeguard", ] @@ -5306,11 +5286,11 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lru" -version = "0.12.5" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.14.5", ] [[package]] @@ -5493,7 +5473,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5516,9 +5496,9 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.18.2" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +checksum = "8b852bc02a2da5feed68cd14fa50d0774b92790a5bdbfa932a813926c8472070" dependencies = [ "arrayref", "byteorder", @@ -5529,7 +5509,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint 0.7.2", "url", ] @@ -5716,7 +5696,7 @@ dependencies = [ "pretty_assertions", "prometheus-parse", "ratatui", - "reqwest 0.12.8", + "reqwest 0.12.7", "serde", "serde_json", "signal-hook", @@ -5788,7 +5768,7 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "num-integer", "num-traits", ] @@ -5835,7 +5815,7 @@ version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", "libm", ] @@ -5866,7 +5846,7 @@ checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -5904,9 +5884,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" @@ -6235,7 +6215,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -6256,27 +6236,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.6.0", + "indexmap 2.5.0", ] [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -6313,9 +6293,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plist" @@ -6324,7 +6304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42cf17e9a1800f5f396bc67d193dc9411b59012a5876445ef450d449881e1016" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.5.0", "quick-xml 0.32.0", "serde", "time", @@ -6398,9 +6378,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -6531,14 +6511,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "proc-macro2" -version = "1.0.87" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -6569,7 +6549,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -6598,7 +6578,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift 0.3.0", - "regex-syntax 0.8.5", + "regex-syntax 0.8.4", "rusty-fork", "tempfile", "unarray", @@ -6749,7 +6729,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.14", + "rustls 0.23.13", "socket2", "thiserror", "tokio", @@ -6766,7 +6746,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash", - "rustls 0.23.14", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -7029,7 +7009,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -7075,9 +7055,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -7095,14 +7075,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", - "regex-syntax 0.8.5", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -7116,13 +7096,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax 0.8.4", ] [[package]] @@ -7133,9 +7113,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" @@ -7180,9 +7160,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -7202,8 +7182,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.14", - "rustls-pemfile 2.2.0", + "rustls 0.23.13", + "rustls-pemfile 2.1.3", "rustls-pki-types", "serde", "serde_json", @@ -7474,9 +7454,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring 0.17.8", @@ -7497,18 +7477,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.2.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -7575,17 +7556,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schnellru" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" -dependencies = [ - "ahash", - "cfg-if", - "hashbrown 0.13.2", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -7777,7 +7747,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -7794,9 +7764,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -7828,7 +7798,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -8005,7 +7975,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.4.0", + "autocfg 1.3.0", ] [[package]] @@ -8035,7 +8005,7 @@ dependencies = [ "predicates 3.1.2", "prost 0.9.0", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.7", "semver 1.0.23", "serde", "serde_json", @@ -8069,7 +8039,7 @@ dependencies = [ "flate2", "lazy_static", "regex", - "reqwest 0.12.8", + "reqwest 0.12.7", "semver 1.0.23", "serde_json", "tar", @@ -8266,7 +8236,7 @@ dependencies = [ "prost 0.9.0", "rand 0.8.5", "rayon", - "reqwest 0.12.8", + "reqwest 0.12.7", "rmp-serde", "self_encryption", "serde", @@ -8330,7 +8300,7 @@ dependencies = [ "lazy_static", "libp2p 0.54.1", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.7", "sn_protocol", "thiserror", "tokio", @@ -8570,7 +8540,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8581,9 +8551,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "9fdf97c441f18a4f92425b896a4ec7a27e03631a0b1047ec4e34e9916a9a167e" dependencies = [ "debugid", "memmap2", @@ -8593,9 +8563,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "bc8ece6b129e97e53d1fbb3f61d33a6a9e5369b11d01228c068094d6d134eaea" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -8615,9 +8585,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -8626,14 +8596,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20e7b52ad118b2153644eea95c6fc740b6c1555b2344fdab763fc9de4075f665" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8659,7 +8629,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8706,9 +8676,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909" dependencies = [ "filetime", "libc", @@ -8717,9 +8687,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", @@ -8730,12 +8700,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ "rustix", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -8761,22 +8731,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8936,7 +8906,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -8966,7 +8936,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.14", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] @@ -9045,11 +9015,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", @@ -9206,7 +9176,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -9326,7 +9296,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -9355,7 +9325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3e785f863a3af4c800a2a669d0b64c879b538738e352607e2624d03f868dc01" dependencies = [ "crossterm 0.27.0", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -9385,9 +9355,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" @@ -9418,9 +9388,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-bom" @@ -9457,7 +9427,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width 0.1.14", + "unicode-width", ] [[package]] @@ -9466,12 +9436,6 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" -[[package]] -name = "unicode-width" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" - [[package]] name = "unicode-xid" version = "0.2.6" @@ -9690,9 +9654,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.94" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", "once_cell", @@ -9701,24 +9665,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.94" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.44" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -9728,9 +9692,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.94" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9738,28 +9702,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.94" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.94" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-bindgen-test" -version = "0.3.44" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a93d2a9ae98f1af8953f6415397299d808cce0a24f6d7c613d27bd83edf98da8" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" dependencies = [ "console_error_panic_hook", "js-sys", @@ -9772,19 +9736,20 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.44" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb8b294691f640bad8f2bb35a11bb28272701b1d687bd5fd661a27684e894d4d" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] name = "wasmtimer" -version = "0.2.0" -source = "git+https://github.com/b-zee/wasmtimer-rs.git?branch=master#1295c9ee97d6da1e7ed44e6b3ca7a60d6e732d4f" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7ed9d8b15c7fb594d72bfb4b5a276f3d2029333cd93a932f376f5937f6f80ee" dependencies = [ "futures", "js-sys", @@ -9797,9 +9762,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.71" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -9892,7 +9857,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -10119,9 +10084,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] @@ -10293,7 +10258,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] @@ -10313,7 +10278,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.77", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c658e46781..c34946d706 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,7 +59,3 @@ pre-release-commit-message = "chore(release): release commit, tags, deps and cha publish = false push = false tag = false - -[patch.crates-io] -alloy = { git = 'https://github.com/alloy-rs/alloy.git', branch = "main" } -wasmtimer = { git = 'https://github.com/b-zee/wasmtimer-rs.git', branch = "master" } From c3a3f3b81667173a7ec6446b0d8a47715c05aef5 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 11 Oct 2024 15:51:31 +0200 Subject: [PATCH 164/255] refactor(autonomi): rename calls for rebase --- autonomi/Cargo.toml | 1 + autonomi/index.html | 11 ++++++---- autonomi/src/client/wasm.rs | 42 +++++++++++++++---------------------- 3 files changed, 25 insertions(+), 29 deletions(-) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index efbc73cf82..43cf7b83a7 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -70,6 +70,7 @@ evmlib = { path = "../evmlib", version = "0.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" +test_utils = { path = "../test_utils" } tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-web = "0.1.3" diff --git a/autonomi/index.html b/autonomi/index.html index 7052c21c37..bd806016ca 100644 --- a/autonomi/index.html +++ b/autonomi/index.html @@ -24,16 +24,19 @@ console.log("wallet retrieved"); const data = new Uint8Array([1, 2, 3]); + console.log("our data: ", data); console.log("calculating cost..."); - let result = await client.cost(data, wallet); + let result = await client.dataCost(data, wallet); console.log("calculated cost: ", result.toString()); console.log("putting..."); - const dataAddr = await client.putData(data, wallet); - console.log("put done!"); + const dataAddr = await client.dataPut(data, wallet); + console.log("put done: ", dataAddr.toString()); - console.log("Uploaded data to: ", dataAddr.toString()); + console.log("getting..."); + const data_get = await client.dataGet(dataAddr); + console.log("get done: ", data_get, " (original data: ", data, ")"); } document.getElementById ("btn-run").addEventListener("click", run, false); diff --git a/autonomi/src/client/wasm.rs b/autonomi/src/client/wasm.rs index b91414b3a6..b6149776fe 100644 --- a/autonomi/src/client/wasm.rs +++ b/autonomi/src/client/wasm.rs @@ -13,7 +13,7 @@ pub struct DataAddr(xor_name::XorName); impl DataAddr { #[wasm_bindgen(js_name = toString)] pub fn to_string(&self) -> String { - crate::client::address::xorname_to_str(self.0) + crate::client::address::addr_to_str(self.0) } } @@ -41,34 +41,34 @@ impl Client { Ok(Client(client)) } - #[wasm_bindgen(js_name = putChunk)] - pub async fn put_chunk(&self, _data: Vec, _wallet: Wallet) -> Result { - unimplemented!() + #[wasm_bindgen(js_name = chunkPut)] + pub async fn chunk_put(&self, _data: Vec, _wallet: Wallet) -> Result { + async { unimplemented!() }.await } - #[wasm_bindgen(js_name = getChunk)] - pub async fn get_chunk(&self, addr: ChunkAddr) -> Result, JsError> { - let chunk = self.0.fetch_chunk(addr.0).await?; + #[wasm_bindgen(js_name = chunkGet)] + pub async fn chunk_get(&self, addr: ChunkAddr) -> Result, JsError> { + let chunk = self.0.chunk_get(addr.0).await?; Ok(chunk.value().to_vec()) } - #[wasm_bindgen(js_name = putData)] - pub async fn put_data(&self, data: Vec, wallet: Wallet) -> Result { + #[wasm_bindgen(js_name = dataPut)] + pub async fn data_put(&self, data: Vec, wallet: Wallet) -> Result { let data = crate::Bytes::from(data); - let xorname = self.0.put(data, &wallet.0).await?; + let xorname = self.0.data_put(data, &wallet.0).await?; Ok(DataAddr(xorname)) } - #[wasm_bindgen(js_name = getData)] - pub async fn get_data(&self, addr: DataAddr) -> Result, JsError> { - let data = self.0.get(addr.0).await?; + #[wasm_bindgen(js_name = dataGet)] + pub async fn data_get(&self, addr: DataAddr) -> Result, JsError> { + let data = self.0.data_get(addr.0).await?; Ok(data.to_vec()) } - #[wasm_bindgen] - pub async fn cost(&self, data: Vec) -> Result { + #[wasm_bindgen(js_name = dataCost)] + pub async fn data_cost(&self, data: Vec) -> Result { let data = crate::Bytes::from(data); - let cost = self.0.cost(data).await.map_err(|e| JsError::from(e))?; + let cost = self.0.data_cost(data).await.map_err(JsError::from)?; Ok(AttoTokens(cost)) } @@ -81,15 +81,7 @@ pub struct Wallet(evmlib::wallet::Wallet); /// environment variable that was used during the build process of this library. #[wasm_bindgen(js_name = getFundedWallet)] pub fn funded_wallet() -> Wallet { - let network = evmlib::utils::evm_network_from_env().expect("network init from env"); - - let private_key = option_env!("EVM_PRIVATE_KEY") - .unwrap_or_else(|| "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"); - - Wallet( - evmlib::wallet::Wallet::new_from_private_key(network, private_key) - .expect("Invalid private key"), - ) + Wallet(test_utils::evm::get_funded_wallet()) } /// Enable tracing logging in the console. From bb2688d83a9bf00a5cc3f0eb4b37fed6d02a11c1 Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 11 Oct 2024 16:00:04 +0200 Subject: [PATCH 165/255] ci: enable wasm32 compilation step --- .github/workflows/cross-platform.yml | 31 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/.github/workflows/cross-platform.yml b/.github/workflows/cross-platform.yml index 3672b6d7b7..6beeac321d 100644 --- a/.github/workflows/cross-platform.yml +++ b/.github/workflows/cross-platform.yml @@ -14,26 +14,25 @@ env: jobs: - # wasm: - # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - # name: Wasm builds - # runs-on: ubuntu-latest + wasm: + if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + name: wasm32-unknown-unknown builds + runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v4 + steps: + - uses: actions/checkout@v4 - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 - # - name: Install wasm-pack - # run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + - name: Install wasm-pack + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh - # - name: Build client for wasm - # # wasm pack doesnt support workspaces - # # --dev to avoid a loong optimisation step - # run: cd sn_client && wasm-pack build --dev - # timeout-minutes: 30 + - name: Build WASM package + # --dev to avoid optimisation + run: wasm-pack build --dev --target=web autonomi + timeout-minutes: 30 websocket: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" From 3968607c67f999da14520f8ddee88825c478a55d Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Fri, 11 Oct 2024 16:04:08 +0200 Subject: [PATCH 166/255] fix(autonomi): enable wasm32 lib --- autonomi/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index aa5e463ac6..697b4f2bde 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://maidsafe.net" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" +[lib] +crate-type = ["cdylib", "rlib"] + [features] default = ["data"] full = ["data", "registers", "vault"] From c5285c46e97a2cc34a273b44c2d900b5ff251f76 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Fri, 11 Oct 2024 02:29:16 +0530 Subject: [PATCH 167/255] chore(autonomi): use a single utility function to get evm vars --- .github/workflows/merge.yml | 14 ++--- autonomi/Cargo.toml | 2 +- autonomi/src/lib.rs | 2 +- autonomi/tests/wallet.rs | 8 ++- autonomi_cli/Cargo.toml | 2 +- autonomi_cli/src/access/keys.rs | 4 +- autonomi_cli/src/access/network.rs | 26 --------- evm_testnet/src/main.rs | 6 +- evmlib/Cargo.toml | 1 + evmlib/src/common.rs | 8 +++ evmlib/src/contract/data_payments/error.rs | 8 +++ evmlib/src/contract/data_payments/mod.rs | 8 +++ evmlib/src/contract/mod.rs | 8 +++ evmlib/src/contract/network_token.rs | 8 +++ evmlib/src/cryptography.rs | 8 +++ evmlib/src/event.rs | 8 +++ evmlib/src/lib.rs | 8 +++ evmlib/src/testnet.rs | 8 +++ evmlib/src/transaction.rs | 8 +++ evmlib/src/utils.rs | 58 +++++++++++++------ evmlib/src/wallet.rs | 17 +++++- sn_evm/Cargo.toml | 1 + sn_evm/src/evm.rs | 15 ----- sn_evm/src/lib.rs | 5 +- sn_networking/src/event/mod.rs | 6 +- sn_node/Cargo.toml | 2 +- sn_node/src/bin/safenode/main.rs | 16 +---- .../src/bin/cli/subcommands/evm_network.rs | 12 +++- test_utils/src/evm.rs | 11 ++-- 29 files changed, 181 insertions(+), 107 deletions(-) delete mode 100644 sn_evm/src/evm.rs diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index fd11bce681..2a90a5e7ae 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -160,11 +160,11 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --bin safenode --bin autonomi_cli + run: cargo build --release --features local --bin safenode --bin autonomi_cli timeout-minutes: 30 - name: Start a local network - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: start enable-evm-testnet: true @@ -338,7 +338,7 @@ jobs: - name: Stop the local network and upload logs if: always() - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: stop log_file_prefix: safe_test_logs_e2e @@ -592,7 +592,7 @@ jobs: timeout-minutes: 30 - name: Start a local network - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: start enable-evm-testnet: true @@ -625,7 +625,7 @@ jobs: - name: Stop the local network and upload logs if: always() - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: stop log_file_prefix: safe_test_logs_churn @@ -721,7 +721,7 @@ jobs: timeout-minutes: 30 - name: Start a local network - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: start enable-evm-testnet: true @@ -765,7 +765,7 @@ jobs: - name: Stop the local network and upload logs if: always() - uses: maidsafe/sn-local-testnet-action@evm-dev + uses: maidsafe/sn-local-testnet-action@main with: action: stop log_file_prefix: safe_test_logs_data_location diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index aa5e463ac6..2bc48ca322 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -15,7 +15,7 @@ full = ["data", "registers", "vault"] data = [] vault = ["data"] fs = ["tokio/fs", "data"] -local = ["sn_networking/local", "test_utils/local"] +local = ["sn_networking/local", "test_utils/local", "sn_evm/local"] registers = ["data"] loud = [] diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index cfc8d81f72..98921768ce 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -36,7 +36,7 @@ pub mod client; #[cfg(feature = "data")] mod self_encryption; -pub use sn_evm::evm; +pub use sn_evm::get_evm_network_from_env; pub use sn_evm::EvmNetwork; pub use sn_evm::EvmWallet as Wallet; diff --git a/autonomi/tests/wallet.rs b/autonomi/tests/wallet.rs index b410f0dd80..5d5be9301e 100644 --- a/autonomi/tests/wallet.rs +++ b/autonomi/tests/wallet.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use const_hex::traits::FromHex; -use sn_evm::evm::network_from_env; +use sn_evm::get_evm_network_from_env; use sn_evm::EvmWallet; use sn_evm::{Amount, RewardsAddress}; use sn_logging::LogBuilder; @@ -16,7 +16,8 @@ use test_utils::evm::get_funded_wallet; #[tokio::test] async fn from_private_key() { let private_key = "0xdb1049e76a813c94be0df47ec3e20533ca676b1b9fef2ddbce9daa117e4da4aa"; - let network = network_from_env().expect("Could not get EVM network from environment variables"); + let network = + get_evm_network_from_env().expect("Could not get EVM network from environment variables"); let wallet = EvmWallet::new_from_private_key(network, private_key).unwrap(); assert_eq!( @@ -29,7 +30,8 @@ async fn from_private_key() { async fn send_tokens() { let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("wallet", false); - let network = network_from_env().expect("Could not get EVM network from environment variables"); + let network = + get_evm_network_from_env().expect("Could not get EVM network from environment variables"); let wallet = get_funded_wallet(); let receiving_wallet = EvmWallet::new_with_random_wallet(network); diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index 414cd47149..7b1d0e948b 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [features] default = ["metrics"] -local = ["sn_peers_acquisition/local"] +local = ["sn_peers_acquisition/local", "autonomi/local"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] diff --git a/autonomi_cli/src/access/keys.rs b/autonomi_cli/src/access/keys.rs index ef06e8c4e1..18310f4831 100644 --- a/autonomi_cli/src/access/keys.rs +++ b/autonomi_cli/src/access/keys.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use autonomi::client::registers::RegisterSecretKey; -use autonomi::Wallet; +use autonomi::{get_evm_network_from_env, Wallet}; use color_eyre::eyre::{Context, Result}; use color_eyre::Section; use std::env; @@ -24,7 +24,7 @@ const REGISTER_SIGNING_KEY_FILE: &str = "register_signing_key"; pub fn load_evm_wallet() -> Result { let secret_key = get_secret_key().wrap_err("The secret key is required to perform this action")?; - let network = crate::network::get_evm_network_from_env()?; + let network = get_evm_network_from_env()?; let wallet = Wallet::new_from_private_key(network, &secret_key) .wrap_err("Failed to load EVM wallet from key")?; Ok(wallet) diff --git a/autonomi_cli/src/access/network.rs b/autonomi_cli/src/access/network.rs index 2268856581..f7e455dade 100644 --- a/autonomi_cli/src/access/network.rs +++ b/autonomi_cli/src/access/network.rs @@ -6,7 +6,6 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use autonomi::EvmNetwork; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::Result; @@ -14,34 +13,9 @@ use color_eyre::Section; use sn_peers_acquisition::PeersArgs; use sn_peers_acquisition::SAFE_PEERS_ENV; -#[cfg(not(feature = "local"))] -use autonomi::evm::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; - pub async fn get_peers(peers: PeersArgs) -> Result> { peers.get_peers().await .wrap_err("Please provide valid Network peers to connect to") .with_suggestion(|| format!("make sure you've provided network peers using the --peers option or the {SAFE_PEERS_ENV} env var")) .with_suggestion(|| "a peer address looks like this: /ip4/42.42.42.42/udp/4242/quic-v1/p2p/B64nodePeerIDvdjb3FAJF4ks3moreBase64CharsHere") } - -pub fn get_evm_network_from_env() -> Result { - #[cfg(feature = "local")] - { - println!("Getting EVM network from local CSV as the local feature is enabled"); - let network = autonomi::evm::local_evm_network_from_csv() - .wrap_err("Failed to get EVM network from local CSV") - .with_suggestion(|| "make sure you've set up the local EVM network by running `cargo run --bin evm_testnet`")?; - Ok(network) - } - #[cfg(not(feature = "local"))] - { - let network = autonomi::evm::network_from_env() - .wrap_err("Failed to get EVM network from environment variables") - .with_suggestion(|| format!("If connecting to a custom EVM network, make sure you've set the following environment variables: {RPC_URL}, {PAYMENT_TOKEN_ADDRESS} and {DATA_PAYMENTS_ADDRESS}"))?; - if matches!(network, EvmNetwork::Custom(_)) { - println!("Using custom EVM network found from environment variables"); - info!("Using custom EVM network found from environment variables {network:?}"); - } - Ok(network) - } -} diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index 1e9ddca501..52df961aab 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -131,11 +131,11 @@ impl TestnetData { println!("Run the CLI or Node with the following env vars set to manually connect to this network:"); println!( "{}=\"{}\" {}=\"{}\" {}=\"{}\"", - sn_evm::evm::RPC_URL, + sn_evm::RPC_URL, self.rpc_url, - sn_evm::evm::PAYMENT_TOKEN_ADDRESS, + sn_evm::PAYMENT_TOKEN_ADDRESS, self.payment_token_address, - sn_evm::evm::DATA_PAYMENTS_ADDRESS, + sn_evm::DATA_PAYMENTS_ADDRESS, self.data_payments_address ); println!("--------------"); diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index fc101d17eb..fae3cba0cb 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -10,6 +10,7 @@ version = "0.1.0" [features] wasm-bindgen = ["alloy/wasm-bindgen"] +local = [] [dependencies] alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } diff --git a/evmlib/src/common.rs b/evmlib/src/common.rs index 4897a1e2cf..af210f9285 100644 --- a/evmlib/src/common.rs +++ b/evmlib/src/common.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use alloy::primitives::FixedBytes; pub type Address = alloy::primitives::Address; diff --git a/evmlib/src/contract/data_payments/error.rs b/evmlib/src/contract/data_payments/error.rs index 84bd2c6c9a..95ec1c1c27 100644 --- a/evmlib/src/contract/data_payments/error.rs +++ b/evmlib/src/contract/data_payments/error.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::contract::network_token; use alloy::transports::{RpcError, TransportErrorKind}; diff --git a/evmlib/src/contract/data_payments/mod.rs b/evmlib/src/contract/data_payments/mod.rs index 22d45f7ed5..352f294581 100644 --- a/evmlib/src/contract/data_payments/mod.rs +++ b/evmlib/src/contract/data_payments/mod.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + pub mod error; use crate::common; diff --git a/evmlib/src/contract/mod.rs b/evmlib/src/contract/mod.rs index 1a4e070efd..d428880800 100644 --- a/evmlib/src/contract/mod.rs +++ b/evmlib/src/contract/mod.rs @@ -1,2 +1,10 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + pub mod data_payments; pub mod network_token; diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index be9d2bca7f..4c8112e869 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::{Address, TxHash, U256}; use crate::contract::network_token::NetworkTokenContract::NetworkTokenContractInstance; use alloy::providers::{Network, Provider}; diff --git a/evmlib/src/cryptography.rs b/evmlib/src/cryptography.rs index fea0297a83..ddc0149b43 100644 --- a/evmlib/src/cryptography.rs +++ b/evmlib/src/cryptography.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::Hash; use alloy::primitives::keccak256; diff --git a/evmlib/src/event.rs b/evmlib/src/event.rs index eff4bdf30e..5cdda3d91e 100644 --- a/evmlib/src/event.rs +++ b/evmlib/src/event.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::{Address, Hash, U256}; use alloy::primitives::{b256, FixedBytes}; use alloy::rpc::types::Log; diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index cdbbab6b5c..6e29a7f4d5 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::{Address, QuoteHash, TxHash, U256}; use crate::transaction::verify_data_payment; use alloy::primitives::address; diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index c4db116c2e..93922b539d 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::Address; use crate::contract::data_payments::DataPaymentsHandler; use crate::contract::network_token::NetworkToken; diff --git a/evmlib/src/transaction.rs b/evmlib/src/transaction.rs index c5c97896e0..dc8609a4d5 100644 --- a/evmlib/src/transaction.rs +++ b/evmlib/src/transaction.rs @@ -1,3 +1,11 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + use crate::common::{Address, QuoteHash, TxHash, U256}; use crate::event::{ChunkPaymentEvent, DATA_PAYMENT_EVENT_SIGNATURE}; use crate::Network; diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 0ebf444645..31d13d413a 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -1,3 +1,13 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +#![allow(dead_code)] + use crate::common::{Address, Hash}; use crate::{CustomNetwork, Network}; use dirs_next::data_dir; @@ -31,8 +41,19 @@ pub fn dummy_hash() -> Hash { Hash::new(rand::rngs::OsRng.gen()) } +pub fn get_evm_testnet_csv_path() -> Result { + let file = data_dir() + .ok_or(Error::FailedToGetEvmNetwork( + "failed to get data dir when fetching evm testnet CSV file".to_string(), + ))? + .join("safe") + .join(EVM_TESTNET_CSV_FILENAME); + Ok(file) +} + /// Get the `Network` from environment variables -pub fn network_from_env() -> Result { +/// Returns an error if the we cannot obtain the network from any means. +pub fn get_evm_network_from_env() -> Result { let evm_vars = [ env::var(RPC_URL) .ok() @@ -52,40 +73,43 @@ pub fn network_from_env() -> Result { }) .collect::, Error>>(); - let use_local_evm = std::env::var("EVM_NETWORK") + let mut use_local_evm = std::env::var("EVM_NETWORK") .map(|v| v == "local") .unwrap_or(false); + if use_local_evm { + info!("Using local EVM network as EVM_NETWORK is set to 'local'"); + } + if cfg!(feature = "local") { + use_local_evm = true; + info!("Using local EVM network as 'local' feature flag is enabled"); + } + let use_arbitrum_one = std::env::var("EVM_NETWORK") .map(|v| v == "arbitrum-one") .unwrap_or(false); - if use_arbitrum_one { - Ok(Network::ArbitrumOne) - } else if use_local_evm { + if use_local_evm { local_evm_network_from_csv() + } else if use_arbitrum_one { + info!("Using Arbitrum One EVM network as EVM_NETWORK is set to 'arbitrum-one'"); + Ok(Network::ArbitrumOne) } else if let Ok(evm_vars) = evm_vars { + info!("Using custom EVM network from environment variables"); Ok(Network::Custom(CustomNetwork::new( &evm_vars[0], &evm_vars[1], &evm_vars[2], ))) } else { - Ok(Network::ArbitrumOne) + error!("Failed to obtain EVM Network through any means"); + Err(Error::FailedToGetEvmNetwork( + "Failed to obtain EVM Network through any means".to_string(), + )) } } -pub fn get_evm_testnet_csv_path() -> Result { - let file = data_dir() - .ok_or(Error::FailedToGetEvmNetwork( - "failed to get data dir when fetching evm testnet CSV file".to_string(), - ))? - .join("safe") - .join(EVM_TESTNET_CSV_FILENAME); - Ok(file) -} - /// Get the `Network::Custom` from the local EVM testnet CSV file -pub fn local_evm_network_from_csv() -> Result { +fn local_evm_network_from_csv() -> Result { // load the csv let csv_path = get_evm_testnet_csv_path()?; diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index 2d52bb4690..e758e58eee 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -1,4 +1,10 @@ -use std::collections::BTreeMap; +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. use crate::common::{Address, QuoteHash, QuotePayment, TxHash, U256}; use crate::contract::data_payments::{DataPaymentsHandler, MAX_TRANSFERS_PER_TRANSACTION}; @@ -14,6 +20,7 @@ use alloy::rpc::types::TransactionRequest; use alloy::signers::local::{LocalSigner, PrivateKeySigner}; use alloy::transports::http::{reqwest, Client, Http}; use alloy::transports::{RpcError, TransportErrorKind}; +use std::collections::BTreeMap; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -244,8 +251,9 @@ pub async fn pay_for_quotes>( network: &Network, payments: T, ) -> Result, PayForQuotesError> { - info!("Paying for quotes"); let payments: Vec<_> = payments.into_iter().collect(); + info!("Paying for quotes of len: {}", payments.len()); + let total_amount = payments.iter().map(|(_, _, amount)| amount).sum(); let mut tx_hashes_by_quote = BTreeMap::new(); @@ -268,11 +276,16 @@ pub async fn pay_for_quotes>( for batch in chunks { let batch: Vec = batch.to_vec(); + debug!( + "Paying for batch of quotes of len: {}, {batch:?}", + batch.len() + ); let tx_hash = data_payments .pay_for_quotes(batch.clone()) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + info!("Paid for batch of quotes with final tx hash: {tx_hash}"); for (quote_hash, _, _) in batch { tx_hashes_by_quote.insert(quote_hash, tx_hash); diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 4ee7753fde..23e9f5c310 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -12,6 +12,7 @@ version = "0.1.0" [features] test-utils = [] +local = ["evmlib/local"] [dependencies] custom_debug = "~0.6.1" diff --git a/sn_evm/src/evm.rs b/sn_evm/src/evm.rs deleted file mode 100644 index 40fde7dc18..0000000000 --- a/sn_evm/src/evm.rs +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; - -/// Load the evm network from env -pub use evmlib::utils::network_from_env; - -/// Load the evm network from local CSV -pub use evmlib::utils::local_evm_network_from_csv; diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index c48f36b43e..f4c70e04a6 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -13,6 +13,8 @@ pub use evmlib::common::Address as RewardsAddress; pub use evmlib::common::QuotePayment; pub use evmlib::common::{QuoteHash, TxHash}; pub use evmlib::utils; +pub use evmlib::utils::get_evm_network_from_env; +pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; pub use evmlib::wallet::Error as EvmWalletError; pub use evmlib::wallet::Wallet as EvmWallet; pub use evmlib::Network as EvmNetwork; @@ -21,9 +23,6 @@ mod amount; mod data_payments; mod error; -/// EVM network configuration -pub mod evm; - pub use data_payments::{PaymentQuote, ProofOfPayment, QuotingMetrics}; /// Types used in the public API diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 8cd3f704c7..ad6e1781b6 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -22,16 +22,14 @@ use libp2p::{ }; use sn_evm::PaymentQuote; +#[cfg(feature = "open-metrics")] +use sn_protocol::CLOSE_GROUP_SIZE; use sn_protocol::{ messages::{Query, Request, Response}, NetworkAddress, PrettyPrintRecordKey, }; - -#[cfg(feature = "open-metrics")] -use sn_protocol::CLOSE_GROUP_SIZE; #[cfg(feature = "open-metrics")] use std::collections::HashSet; - use std::{ collections::BTreeSet, fmt::{Debug, Formatter}, diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 980d1efaa3..46a90789d6 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -15,7 +15,7 @@ path = "src/bin/safenode/main.rs" [features] default = ["metrics", "upnp", "open-metrics", "encrypt-records"] -local = ["sn_networking/local", "test_utils/local"] +local = ["sn_networking/local", "test_utils/local", "sn_evm/local"] otlp = ["sn_logging/otlp"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 7b5756cd0b..802c6696a8 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -17,7 +17,7 @@ use clap::{command, Parser}; use color_eyre::{eyre::eyre, Result}; use const_hex::traits::FromHex; use libp2p::{identity::Keypair, PeerId}; -use sn_evm::{EvmNetwork, RewardsAddress}; +use sn_evm::{get_evm_network_from_env, EvmNetwork, RewardsAddress}; #[cfg(feature = "metrics")] use sn_logging::metrics::init_metrics; use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; @@ -253,22 +253,12 @@ fn main() -> Result<()> { return Ok(()); } - #[cfg(feature = "local")] - let evm_network = sn_evm::utils::local_evm_network_from_csv()?; - #[cfg(not(feature = "local"))] let evm_network: EvmNetwork = opt .evm_network .as_ref() .cloned() - .map(|v| v.into()) - .unwrap_or_else(|| { - sn_evm::evm::network_from_env() - .expect("Failed to get EVM network from environment variables") - }); - if matches!(evm_network, EvmNetwork::Custom(_)) { - println!("Using custom EVM network"); - info!("Using custom EVM network {evm_network:?}"); - } + .map(|v| Ok(v.into())) + .unwrap_or_else(get_evm_network_from_env)?; println!("EVM network: {evm_network:?}"); let node_socket_addr = SocketAddr::new(opt.ip, opt.port); diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs index a046609e4e..a77893f609 100644 --- a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use clap::Subcommand; -use color_eyre::eyre::Result; -use sn_evm::{utils::local_evm_network_from_csv, EvmNetwork}; +use color_eyre::{eyre::Result, Section}; +use sn_evm::{utils::get_evm_network_from_env, EvmNetwork}; #[derive(Subcommand, Clone, Debug)] #[allow(clippy::enum_variant_names)] @@ -42,7 +42,13 @@ impl TryInto for EvmNetworkCommand { match self { Self::EvmArbitrumOne => Ok(EvmNetwork::ArbitrumOne), Self::EvmLocal => { - let network = local_evm_network_from_csv()?; + if !cfg!(feature = "local") { + return Err(color_eyre::eyre::eyre!( + "The 'local' feature flag is not enabled." + )) + .suggestion("Enable the 'local' feature flag to use the local EVM testnet."); + } + let network = get_evm_network_from_env()?; Ok(network) } Self::EvmCustom { diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index b7443df991..1e7731b153 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -10,11 +10,12 @@ use color_eyre::{ eyre::{bail, Context}, Result, }; -use evmlib::{utils::network_from_env, wallet::Wallet, Network}; +use evmlib::{utils::get_evm_network_from_env, wallet::Wallet, Network}; use std::env; -pub fn get_funded_wallet() -> Wallet { - let network = network_from_env().expect("Failed to get EVM network from environment variables"); +pub fn get_funded_wallet() -> evmlib::wallet::Wallet { + let network = + get_evm_network_from_env().expect("Failed to get EVM network from environment variables"); if matches!(network, Network::ArbitrumOne) { panic!("You're trying to use ArbitrumOne network. Use a custom network for testing."); } @@ -28,8 +29,8 @@ pub fn get_funded_wallet() -> Wallet { } pub fn get_new_wallet() -> Result { - let network = - network_from_env().wrap_err("Failed to get EVM network from environment variables")?; + let network = get_evm_network_from_env() + .wrap_err("Failed to get EVM network from environment variables")?; if matches!(network, Network::ArbitrumOne) { bail!("You're trying to use ArbitrumOne network. Use a custom network for testing."); } From 2cf6fe0a96046dfc4a1f59af6f1d4f1f49658c8a Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 14 Oct 2024 11:47:34 +0200 Subject: [PATCH 168/255] chore(launchpad): more detail on error messages when scaling nodes --- node-launchpad/src/node_mgmt.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 893523f245..0ee2c22294 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -410,9 +410,9 @@ async fn add_nodes( { if let Err(err) = action_sender.send(Action::StatusActions( StatusActions::ErrorScalingUpNodes { - raw_error: "When trying to add a node, we failed.\n\n\ - Maybe you ran out of disk space?\n\n\ - Maybe you need to change the port range?\n\n" + raw_error: "When trying to add a node, we failed.\n\ + Maybe you ran out of disk space?\n\ + Maybe you need to change the port range?" .to_string(), }, )) { @@ -432,7 +432,9 @@ async fn add_nodes( if let Err(err) = action_sender.send(Action::StatusActions(StatusActions::ErrorScalingUpNodes { raw_error: format!( - "When trying run a node, we reached the maximum amount of retries ({}).", + "When trying run a node, we reached the maximum amount of retries ({}).\n\ + Could this be a firewall blocking nodes starting?\n\ + Or ports on your router already in use?", NODE_ADD_MAX_RETRIES ), })) From 5579c8b4c60d464492ed426b9ce817caa5032c60 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Sat, 12 Oct 2024 14:39:10 +0530 Subject: [PATCH 169/255] feat(autonomi): enable files benchmark --- Cargo.lock | 5 + autonomi_cli/Cargo.toml | 15 +++ autonomi_cli/benches/files.rs | 188 ++++++++++++++++++++++++++++++++++ test_utils/src/evm.rs | 2 +- 4 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 autonomi_cli/benches/files.rs diff --git a/Cargo.lock b/Cargo.lock index 14c241bde9..f5267b6c79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1118,11 +1118,16 @@ dependencies = [ "autonomi", "clap", "color-eyre", + "criterion", "dirs-next", + "eyre", "indicatif", + "rand 0.8.5", + "rayon", "sn_build_info", "sn_logging", "sn_peers_acquisition", + "tempfile", "tokio", "tracing", ] diff --git a/autonomi_cli/Cargo.toml b/autonomi_cli/Cargo.toml index 7b1d0e948b..55a7caad32 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi_cli/Cargo.toml @@ -9,6 +9,10 @@ local = ["sn_peers_acquisition/local", "autonomi/local"] metrics = ["sn_logging/process-metrics"] network-contacts = ["sn_peers_acquisition/network-contacts"] +[[bench]] +name = "files" +harness = false + [dependencies] autonomi = { path = "../autonomi", version = "0.1.0", features = [ "data", @@ -34,5 +38,16 @@ sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.0" } sn_build_info = { path = "../sn_build_info", version = "0.1.11" } sn_logging = { path = "../sn_logging", version = "0.2.33" } +[dev-dependencies] +autonomi = { path = "../autonomi", version = "0.1.0", features = [ + "data", + "fs", +] } +eyre = "0.6.8" +criterion = "0.5.1" +tempfile = "3.6.0" +rand = { version = "~0.8.5", features = ["small_rng"] } +rayon = "1.8.0" + [lints] workspace = true diff --git a/autonomi_cli/benches/files.rs b/autonomi_cli/benches/files.rs new file mode 100644 index 0000000000..f545936334 --- /dev/null +++ b/autonomi_cli/benches/files.rs @@ -0,0 +1,188 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use criterion::{criterion_group, criterion_main, Criterion, Throughput}; +use rand::{thread_rng, Rng}; +use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use std::{ + collections::HashSet, + fs::File, + io::Write, + path::{Path, PathBuf}, + process::{exit, Command}, + time::Duration, +}; +use tempfile::tempdir; + +const SAMPLE_SIZE: usize = 20; + +// Default deployer wallet of the testnet. +const DEFAULT_WALLET_PRIVATE_KEY: &str = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + +// This procedure includes the client startup, which will be measured by criterion as well. +// As normal user won't care much about initial client startup, +// but be more alerted on communication speed during transmission. +// It will be better to execute bench test with `local`, +// to make the measurement results reflect speed improvement or regression more accurately. +fn autonomi_file_upload(dir: &str) -> String { + let autonomi_cli_path = get_cli_path(); + let output = Command::new(autonomi_cli_path) + .arg("file") + .arg("upload") + .arg(dir) + .output() + .expect("Failed to execute command"); + + if !output.status.success() { + let err = output.stderr; + let err_string = String::from_utf8(err).expect("Failed to parse error string"); + panic!("Upload command executed with failing error code: {err_string:?}"); + } else { + let out = output.stdout; + let out_string = String::from_utf8(out).expect("Failed to parse output string"); + let address = out_string + .lines() + .find(|line| line.starts_with("At address:")) + .expect("Failed to find the address of the uploaded file"); + let address = address.trim_start_matches("At address: "); + address.to_string() + } +} + +fn autonomi_file_download(uploaded_files: HashSet) { + let autonomi_cli_path = get_cli_path(); + + let temp_dir = tempdir().expect("Failed to create temp dir"); + for address in uploaded_files.iter() { + let output = Command::new(autonomi_cli_path.clone()) + .arg("file") + .arg("download") + .arg(address) + .arg(temp_dir.path()) + .output() + .expect("Failed to execute command"); + + if !output.status.success() { + let err = output.stderr; + let err_string = String::from_utf8(err).expect("Failed to parse error string"); + panic!("Download command executed with failing error code: {err_string:?}"); + } + } +} + +fn generate_file(path: &PathBuf, file_size_mb: usize) { + let mut file = File::create(path).expect("Failed to create file"); + let mut rng = thread_rng(); + + // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks + let n_small_chunks = file_size_mb * 1024 * 32; + for _ in 0..n_small_chunks { + let random_data: [u8; 32] = rng.gen(); + file.write_all(&random_data) + .expect("Failed to write to file"); + } + let size = file.metadata().expect("Failed to get metadata").len() as f64 / (1024 * 1024) as f64; + assert_eq!(file_size_mb as f64, size); +} + +fn get_cli_path() -> PathBuf { + let mut path = PathBuf::new(); + if let Ok(val) = std::env::var("CARGO_TARGET_DIR") { + path.push(val); + } else { + path.push("target"); + } + path.push("release"); + path.push("autonomi_cli"); + path +} + +fn criterion_benchmark(c: &mut Criterion) { + // Check if the binary exists + let cli_path = get_cli_path(); + if !Path::new(&cli_path).exists() { + eprintln!("Error: Binary {cli_path:?} does not exist. Please make sure to compile your project first"); + exit(1); + } + + if std::env::var("SECRET_KEY").is_err() { + std::env::set_var("SECRET_KEY", DEFAULT_WALLET_PRIVATE_KEY); + } + + let sizes: [u64; 2] = [1, 10]; // File sizes in MB. Add more sizes as needed + let mut uploaded_files = HashSet::new(); + + for size in sizes.iter() { + let temp_dir = tempdir().expect("Failed to create temp dir"); + let temp_dir_path = temp_dir.into_path(); + let temp_dir_path_str = temp_dir_path.to_str().expect("Invalid unicode encountered"); + + // create 23 random files. This is to keep the benchmark results consistent with prior runs. The change to make + // use of ChunkManager means that we don't upload the same file twice and the `uploaded_files` file is now read + // as a set and we don't download the same file twice. Hence create 23 files as counted from the logs + // pre ChunkManager change. + (0..23).into_par_iter().for_each(|idx| { + let path = temp_dir_path.join(format!("random_file_{size}_mb_{idx}")); + generate_file(&path, *size as usize); + }); + + // Wait little bit for the fund to be settled. + std::thread::sleep(Duration::from_secs(10)); + + let mut group = c.benchmark_group(format!("Upload Benchmark {size}MB")); + group.sampling_mode(criterion::SamplingMode::Flat); + // One sample may compose of multiple iterations, and this is decided by `measurement_time`. + // Set this to a lower value to ensure each sample only contains one iteration. + // To ensure the download throughput calculation is correct. + group.measurement_time(Duration::from_secs(5)); + group.warm_up_time(Duration::from_secs(5)); + group.sample_size(SAMPLE_SIZE); + + // Set the throughput to be reported in terms of bytes + group.throughput(Throughput::Bytes(size * 1024 * 1024)); + let bench_id = format!("autonomi files upload {size}mb"); + group.bench_function(bench_id, |b| { + b.iter(|| { + let uploaded_address = autonomi_file_upload(temp_dir_path_str); + uploaded_files.insert(uploaded_address); + }) + }); + group.finish(); + } + + let mut group = c.benchmark_group("Download Benchmark".to_string()); + group.sampling_mode(criterion::SamplingMode::Flat); + group.measurement_time(Duration::from_secs(10)); + group.warm_up_time(Duration::from_secs(5)); + + // The download will download all uploaded files during bench. + // If the previous bench executed with the default 100 sample size, + // there will then be around 1.1GB in total, and may take around 40s for each iteratioin. + // Hence we have to reduce the number of iterations from the default 100 to 10, + // To avoid the benchmark test taking over one hour to complete. + // + // During `measurement_time` and `warm_up_time`, there will be one upload run for each. + // Which means two additional `uploaded_files` created and for downloading. + let total_size: u64 = sizes + .iter() + .map(|size| (SAMPLE_SIZE as u64 + 2) * size) + .sum(); + group.sample_size(SAMPLE_SIZE / 2); + + // Set the throughput to be reported in terms of bytes + group.throughput(Throughput::Bytes(total_size * 1024 * 1024)); + let bench_id = "autonomi files download".to_string(); + group.bench_function(bench_id, |b| { + b.iter(|| autonomi_file_download(uploaded_files.clone())) + }); + group.finish(); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/test_utils/src/evm.rs b/test_utils/src/evm.rs index 1e7731b153..05eb710bde 100644 --- a/test_utils/src/evm.rs +++ b/test_utils/src/evm.rs @@ -23,7 +23,7 @@ pub fn get_funded_wallet() -> evmlib::wallet::Wallet { const DEFAULT_WALLET_PRIVATE_KEY: &str = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; - let private_key = env::var("EVM_PRIVATE_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); + let private_key = env::var("SECRET_KEY").unwrap_or(DEFAULT_WALLET_PRIVATE_KEY.to_string()); Wallet::new_from_private_key(network, &private_key).expect("Invalid private key") } From e11e78716521f487a6834765202dc56d465658c1 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Sun, 13 Oct 2024 12:06:20 +0530 Subject: [PATCH 170/255] fix(ci): run benchmarks charts with the evm testnet --- .../workflows/generate-benchmark-charts.yml | 35 +++++++------------ 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index 6c69dc7d1b..4b077c7cc8 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -15,7 +15,7 @@ permissions: env: CARGO_INCREMENTAL: "0" RUST_BACKTRACE: 1 - CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi_cli NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: @@ -45,43 +45,29 @@ jobs: shell: bash run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - - name: Build node and client - run: cargo build --release --features local --bin safenode --bin safe - timeout-minutes: 30 - - - name: Build faucet bin - run: cargo build --release --bin faucet --features local --features gifting + - name: Build node and cli binaries + run: cargo build --release --features local --bin safenode --bin autonomi_cli timeout-minutes: 30 - name: Start a local network uses: maidsafe/sn-local-testnet-action@main - env: - SN_LOG: "all" with: action: start - interval: 2000 + enable-evm-testnet: true node-path: target/release/safenode - faucet-path: target/release/faucet platform: ubuntu-latest build: true - - - name: Create and fund a wallet to pay for files storage - run: | - cargo run --bin faucet --release -- --log-output-dest=data-dir send 1000000 $(cargo run --bin safe --release -- wallet address | tail -n 1) | tail -n 1 > transfer_hex - cargo run --bin safe --release -- wallet receive --file transfer_hex - env: - SN_LOG: "all" - timeout-minutes: 10 + sn-log: "all" ######################## ### Benchmark ### ######################## - - name: Bench `safe` cli + - name: Bench `autonomi` cli shell: bash # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # passes to tee which displays it in the terminal and writes to output.txt run: | - cargo criterion --features=local --message-format=json 2>&1 -p sn_cli | tee -a output.txt + cargo criterion --features=local --message-format=json 2>&1 -p autonomi_cli | tee -a output.txt cat output.txt | rg benchmark-complete | jq -s 'map({ name: (.id | split("/"))[-1], unit: "MiB/s", @@ -107,9 +93,14 @@ jobs: auto-push: true max-items-in-chart: 300 + # FIXME: do this in a generic way for localtestnets + - name: export default secret key + run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV + shell: bash + - name: Start a client instance to compare memory usage shell: bash - run: cargo run --bin safe --release -- --log-output-dest=data-dir files upload the-test-data.zip --retry-strategy quick + run: cargo run --bin autonomi_cli --release -- --log-output-dest=data-dir file upload the-test-data.zip env: SN_LOG: "all" From 348827890aaa5c1f13a64c5b1f31782b6efbcbec Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 14 Oct 2024 15:33:17 +0530 Subject: [PATCH 171/255] chore: tryout --- .../workflows/generate-benchmark-charts.yml | 62 +++---------------- 1 file changed, 7 insertions(+), 55 deletions(-) diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index 4b077c7cc8..e0fdc9c861 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -2,15 +2,8 @@ name: Benchmark Chart Generation # Do not run this workflow on pull request since this workflow has permission to modify contents. on: - push: - branches: - - main - -permissions: - # deployments permission to deploy GitHub pages website - deployments: write - # contents permission to update benchmark contents in gh-pages branch - contents: write + pull_request: + branches: ["*"] env: CARGO_INCREMENTAL: "0" @@ -74,26 +67,11 @@ jobs: value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) })' > files-benchmark.json - - name: Remove git hooks so gh-pages git commits will work - shell: bash - run: rm -rf .git/hooks/pre-commit - - name: check files-benchmark.json shell: bash run: cat files-benchmark.json - # gh-pages branch is updated and pushed automatically with extracted benchmark data - - name: Store cli files benchmark result - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "`safe files` benchmarks" - tool: "customBiggerIsBetter" - output-file-path: files-benchmark.json - github-token: ${{ secrets.GITHUB_TOKEN }} - auto-push: true - max-items-in-chart: 300 - - # FIXME: do this in a generic way for localtestnets + # FIXME: do this in a generic way for localtestnets - name: export default secret key run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV shell: bash @@ -144,16 +122,6 @@ jobs: shell: bash run: cat node_memory_usage.json - - name: Upload Node Memory Usage - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "Node memory" - tool: "customSmallerIsBetter" - output-file-path: node_memory_usage.json - github-token: ${{ secrets.GITHUB_TOKEN }} - auto-push: true - max-items-in-chart: 300 - - name: Check client memory usage shell: bash run: | @@ -196,16 +164,6 @@ jobs: shell: bash run: cat client_memory_usage.json - - name: Upload Client Memory Usage - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "Client memory" - tool: "customSmallerIsBetter" - output-file-path: client_memory_usage.json - github-token: ${{ secrets.GITHUB_TOKEN }} - auto-push: true - max-items-in-chart: 300 - ########################################### ### Swarm_driver handling time Analysis ### ########################################### @@ -270,13 +228,7 @@ jobs: - name: check swarm_driver_long_handlings.json shell: bash run: cat swarm_driver_long_handlings.json - - - name: Upload swarm_driver Long Handlings - uses: benchmark-action/github-action-benchmark@v1 - with: - name: "swarm_driver long handlings" - tool: "customSmallerIsBetter" - output-file-path: swarm_driver_long_handlings.json - github-token: ${{ secrets.GITHUB_TOKEN }} - auto-push: true - max-items-in-chart: 300 + + - name: Setup tmate session + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 \ No newline at end of file From 71a986a4a33f67351708ca5198dfe8b8bb69eb89 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 14 Oct 2024 13:38:55 +0200 Subject: [PATCH 172/255] chore(launchpad): refactor status screen --- node-launchpad/src/components/status.rs | 676 ++++++++++++++---------- 1 file changed, 391 insertions(+), 285 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 43e0970782..3061c4dd5b 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -30,7 +30,7 @@ use crate::{ clear_area, EUCALYPTUS, GHOST_WHITE, LIGHT_PERIWINKLE, VERY_LIGHT_AZURE, VIVID_SKY_BLUE, }, }; -use color_eyre::eyre::{OptionExt, Result}; +use color_eyre::eyre::{Ok, OptionExt, Result}; use crossterm::event::KeyEvent; use ratatui::text::Span; use ratatui::{prelude::*, widgets::*}; @@ -40,7 +40,7 @@ use sn_peers_acquisition::PeersArgs; use sn_service_management::{ control::ServiceController, NodeRegistry, NodeServiceData, ServiceStatus, }; -use std::collections::HashMap; +use std::fmt; use std::{ path::PathBuf, time::{Duration, Instant}, @@ -50,27 +50,42 @@ use tokio::sync::mpsc::UnboundedSender; use super::super::node_mgmt::{maintain_n_running_nodes, reset_nodes, stop_nodes}; -use throbber_widgets_tui::{self, ThrobberState}; +use throbber_widgets_tui::{self, Throbber, ThrobberState}; pub const NODE_STAT_UPDATE_INTERVAL: Duration = Duration::from_secs(5); /// If nat detection fails for more than 3 times, we don't want to waste time running during every node start. const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; +// Table Widths +const NODE_WIDTH: usize = 10; +const VERSION_WIDTH: usize = 7; +const NANOS_WIDTH: usize = 5; +const MEMORY_WIDTH: usize = 7; +const MBPS_WIDTH: usize = 15; +const RECORDS_WIDTH: usize = 4; +const PEERS_WIDTH: usize = 5; +const CONNS_WIDTH: usize = 5; +const STATUS_WIDTH: usize = 8; +const SPINNER_WIDTH: usize = 1; + #[derive(Clone)] -pub struct Status { +pub struct Status<'a> { /// Whether the component is active right now, capturing keystrokes + drawing things. active: bool, action_sender: Option>, config: Config, - // state - node_services: Vec, - node_services_throttle_state: HashMap, + // NAT is_nat_status_determined: bool, error_while_running_nat_detection: usize, + // Device Stats Section node_stats: NodeStats, node_stats_last_update: Instant, - node_table_state: TableState, + // Nodes + node_services: Vec, + items: Option>>, + // Amount of nodes nodes_to_start: usize, + // Discord username discord_username: String, // Currently the node registry file does not support concurrent actions and thus can lead to // inconsistent state. Another solution would be to have a file lock/db. @@ -108,21 +123,20 @@ pub struct StatusConfig { pub port_to: Option, } -impl Status { +impl Status<'_> { pub async fn new(config: StatusConfig) -> Result { let mut status = Self { peers_args: config.peers_args, action_sender: Default::default(), config: Default::default(), active: true, - node_services: Default::default(), - node_services_throttle_state: HashMap::new(), is_nat_status_determined: false, error_while_running_nat_detection: 0, node_stats: NodeStats::default(), node_stats_last_update: Instant::now(), + node_services: Default::default(), + items: None, nodes_to_start: config.allocated_disk_space, - node_table_state: Default::default(), lock_registry: None, discord_username: config.discord_username, safenode_path: config.safenode_path, @@ -133,6 +147,7 @@ impl Status { error_popup: None, }; + // Nodes registry let now = Instant::now(); debug!("Refreshing node registry states on startup"); let mut node_registry = NodeRegistry::load(&get_node_registry_path()?)?; @@ -151,6 +166,94 @@ impl Status { Ok(status) } + fn update_node_items(&mut self) -> Result<()> { + // Iterate over existing node services and update their corresponding NodeItem + if let Some(ref mut items) = self.items { + for (node_item, item) in self.node_services.iter().zip(&mut items.items) { + if node_item.status == ServiceStatus::Removed { + continue; // Skip removed nodes + } + + // Update status based on current node status + item.status = match node_item.status { + ServiceStatus::Running => { + // Call calc_next on the spinner state + item.spinner_state.calc_next(); + NodeStatus::Running + } + ServiceStatus::Stopped => NodeStatus::Stopped, + ServiceStatus::Added => NodeStatus::Added, + ServiceStatus::Removed => NodeStatus::Removed, + }; + + // Starting is not part of ServiceStatus so we do it manually + if let Some(LockRegistryState::StartingNodes) = self.lock_registry { + item.spinner_state.calc_next(); + item.status = NodeStatus::Starting; + } + + // Update peers count + item.peers = match node_item.connected_peers { + Some(ref peers) => peers.len(), + None => 0, + }; + + // Update individual stats if available + if let Some(stats) = self + .node_stats + .individual_stats + .iter() + .find(|s| s.service_name == node_item.service_name) + { + item.nanos = stats.forwarded_rewards; + item.memory = stats.memory_usage_mb; + item.mbps = format!( + "↓{:06.2} ↑{:06.2}", + stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), + stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) + ); + item.records = stats.max_records; + item.connections = stats.connections; + } + } + } else { + // If items is None, create a new list (fallback) + let node_items: Vec = self + .node_services + .iter() + .filter_map(|node_item| { + if node_item.status == ServiceStatus::Removed { + return None; + } + // Update status based on current node status + let status = match node_item.status { + ServiceStatus::Running => NodeStatus::Running, + ServiceStatus::Stopped => NodeStatus::Stopped, + ServiceStatus::Added => NodeStatus::Added, + ServiceStatus::Removed => NodeStatus::Removed, + }; + + // Create a new NodeItem for the first time + Some(NodeItem { + name: node_item.service_name.clone().to_string(), + version: node_item.version.to_string(), + nanos: 0, + memory: 0, + mbps: "-".to_string(), + records: 0, + peers: 0, + connections: 0, + status, + spinner: Throbber::default(), + spinner_state: ThrobberState::default(), + }) + }) + .collect(); + self.items = Some(StatefulTable::with_items(node_items)); + } + Ok(()) + } + /// Tries to trigger the update of node stats if the last update was more than `NODE_STAT_UPDATE_INTERVAL` ago. /// The result is sent via the StatusActions::NodesStatsObtained action. fn try_update_node_stats(&mut self, force_update: bool) -> Result<()> { @@ -180,11 +283,6 @@ impl Status { self.node_services.len() ); - if !self.node_services.is_empty() && self.node_table_state.selected().is_none() { - // self.node_table_state.select(Some(0)); - self.node_table_state.select(None); - } - Ok(()) } @@ -207,37 +305,9 @@ impl Status { }) .collect() } - - fn _select_next_table_item(&mut self) { - let i = match self.node_table_state.selected() { - Some(i) => { - if i >= self.node_services.len() - 1 { - 0 - } else { - i + 1 - } - } - None => 0, - }; - self.node_table_state.select(Some(i)); - } - - fn _select_previous_table_item(&mut self) { - let i = match self.node_table_state.selected() { - Some(i) => { - if i == 0 { - self.node_services.len() - 1 - } else { - i - 1 - } - } - None => 0, - }; - self.node_table_state.select(Some(i)); - } } -impl Component for Status { +impl Component for Status<'_> { fn register_action_handler(&mut self, tx: UnboundedSender) -> Result<()> { self.action_sender = Some(tx); @@ -264,9 +334,7 @@ impl Component for Status { match action { Action::Tick => { self.try_update_node_stats(false)?; - for (_spinner_key, spinner_state) in self.node_services_throttle_state.iter_mut() { - spinner_state.calc_next(); // Assuming calc_next() is a method of ThrobberState - } + let _ = self.update_node_items(); } Action::SwitchScene(scene) => match scene { Scene::Status | Scene::StatusBetaProgrammePopUp => { @@ -636,262 +704,136 @@ impl Component for Status { // ==== Node Status ===== - // Widths - const NODE_WIDTH: usize = 10; - const VERSION_WIDTH: usize = 7; - const NANOS_WIDTH: usize = 5; - const MEMORY_WIDTH: usize = 7; - const MBPS_WIDTH: usize = 13; - const RECORDS_WIDTH: usize = 4; - const PEERS_WIDTH: usize = 5; - const CONNS_WIDTH: usize = 5; - const STATUS_WIDTH: usize = 8; - const SPINNER_WIDTH: usize = 1; - - let node_rows: Vec<_> = self - .node_services - .iter() - .filter_map(|n| { - if n.status == ServiceStatus::Removed { - return None; - } - - let mut status = format!("{:?}", n.status); - if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - status = "Starting".to_string(); - } - let connected_peers = match n.connected_peers { - Some(ref peers) => format!("{:?}", peers.len()), - None => "0".to_string(), - }; - - let mut nanos = "-".to_string(); - let mut memory = "-".to_string(); - let mut mbps = " -".to_string(); - let mut records = "-".to_string(); - let mut connections = "-".to_string(); - - let individual_stats = self - .node_stats - .individual_stats - .iter() - .find(|s| s.service_name == n.service_name); - if let Some(stats) = individual_stats { - nanos = stats.forwarded_rewards.to_string(); - memory = stats.memory_usage_mb.to_string(); - mbps = format!( - "↓{:05.2} ↑{:05.2}", - stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), - stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) - ); - records = stats.max_records.to_string(); - connections = stats.connections.to_string(); - } - - // Create a row vector - let row = vec![ - n.service_name.clone().to_string(), - n.version.to_string(), + // No nodes. Empty Table. + if let Some(ref items) = self.items { + if items.items.is_empty() { + let line1 = Line::from(vec![ + Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), + Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE).bold()), + Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), + ]); + + let line2 = Line::from(vec![Span::styled( format!( - "{}{}", - " ".repeat(NANOS_WIDTH.saturating_sub(nanos.len())), - nanos.to_string() + "Each node will use {}GB of storage and a small amount of memory, \ + CPU, and Network bandwidth. Most computers can run many nodes at once, \ + but we recommend you add them gradually", + GB_PER_NODE ), - format!( - "{}{} MB", - " ".repeat(MEMORY_WIDTH.saturating_sub(memory.len() + 4)), - memory.to_string() - ), - mbps.to_string(), - format!( - "{}{}", - " ".repeat(RECORDS_WIDTH.saturating_sub(records.len())), - records.to_string() - ), - format!( - "{}{}", - " ".repeat(PEERS_WIDTH.saturating_sub(connected_peers.len())), - connected_peers.to_string() - ), - format!( - "{}{}", - " ".repeat(CONNS_WIDTH.saturating_sub(connections.len())), - connections.to_string() - ), - status.to_string(), + Style::default().fg(LIGHT_PERIWINKLE), + )]); + + f.render_widget( + Paragraph::new(vec![Line::raw(""), line1, Line::raw(""), line2]) + .wrap(Wrap { trim: false }) + .fg(LIGHT_PERIWINKLE) + .block( + Block::default() + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled(" (0) ", Style::default().fg(LIGHT_PERIWINKLE)), + ])) + .title_style(Style::default().fg(LIGHT_PERIWINKLE)) + .borders(Borders::ALL) + .border_style(style::Style::default().fg(EUCALYPTUS)) + .padding(Padding::horizontal(1)), + ), + layout[2], + ); + } else { + // Node/s block + let block_nodes = Block::default() + .title(Line::from(vec![ + Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), + Span::styled( + format!(" ({}) ", self.nodes_to_start), + Style::default().fg(LIGHT_PERIWINKLE), + ), + ])) + .padding(Padding::new(1, 1, 0, 0)) + .title_style(Style::default().fg(GHOST_WHITE)) + .borders(Borders::ALL) + .border_style(Style::default().fg(EUCALYPTUS)); + + // Split the inner area of the combined block + let inner_area = block_nodes.inner(layout[2]); + + // Column Widths + let node_widths = [ + Constraint::Min(NODE_WIDTH as u16), + Constraint::Min(VERSION_WIDTH as u16), + Constraint::Min(NANOS_WIDTH as u16), + Constraint::Min(MEMORY_WIDTH as u16), + Constraint::Min(MBPS_WIDTH as u16), + Constraint::Min(RECORDS_WIDTH as u16), + Constraint::Min(PEERS_WIDTH as u16), + Constraint::Min(CONNS_WIDTH as u16), + Constraint::Min(STATUS_WIDTH as u16), + Constraint::Max(SPINNER_WIDTH as u16), ]; - // Create a styled row - let row_style = if n.status == ServiceStatus::Running { - Style::default().fg(EUCALYPTUS) - } else { - Style::default().fg(GHOST_WHITE) - }; - - Some(Row::new(row).style(row_style)) - }) - .collect(); - - if node_rows.is_empty() { - let line1 = Line::from(vec![ - Span::styled("Press ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("[Ctrl+G] ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled("to Add and ", Style::default().fg(LIGHT_PERIWINKLE)), - Span::styled("Start Nodes ", Style::default().fg(GHOST_WHITE).bold()), - Span::styled("on this device", Style::default().fg(LIGHT_PERIWINKLE)), - ]); - - let line2 = Line::from(vec![Span::styled( - format!( - "Each node will use {}GB of storage and a small amount of memory, \ - CPU, and Network bandwidth. Most computers can run many nodes at once, \ - but we recommend you add them gradually", - GB_PER_NODE - ), - Style::default().fg(LIGHT_PERIWINKLE), - )]); - - f.render_widget( - Paragraph::new(vec![Line::raw(""), line1, Line::raw(""), line2]) - .wrap(Wrap { trim: false }) - .fg(LIGHT_PERIWINKLE) - .block( - Block::default() - .title(Line::from(vec![ - Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), - Span::styled(" (0) ", Style::default().fg(LIGHT_PERIWINKLE)), - ])) - .title_style(Style::default().fg(LIGHT_PERIWINKLE)) - .borders(Borders::ALL) - .border_style(style::Style::default().fg(EUCALYPTUS)) - .padding(Padding::horizontal(1)), - ), - layout[2], - ); - } else { - // Node/s block - let block_nodes = Block::default() - .title(Line::from(vec![ - Span::styled(" Nodes", Style::default().fg(GHOST_WHITE).bold()), - Span::styled( - format!(" ({}) ", self.nodes_to_start), - Style::default().fg(LIGHT_PERIWINKLE), + // Header + let header_row = Row::new(vec![ + Cell::new("Node").fg(COOL_GREY), + Cell::new("Version").fg(COOL_GREY), + Cell::new("Nanos").fg(COOL_GREY), + Cell::new("Memory").fg(COOL_GREY), + Cell::new( + format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps") + .fg(COOL_GREY), ), - ])) - .padding(Padding::new(1, 1, 0, 0)) - .title_style(Style::default().fg(GHOST_WHITE)) - .borders(Borders::ALL) - .border_style(Style::default().fg(EUCALYPTUS)); - - // Create a layout to arrange the header and table vertically - let inner_layout = Layout::new( - Direction::Vertical, - vec![Constraint::Length(1), Constraint::Min(0)], - ); - - // Split the inner area of the combined block - let inner_area = block_nodes.inner(layout[2]); - let inner_chunks = inner_layout.split(inner_area); - - // Column Widths - let node_widths = [ - Constraint::Min(NODE_WIDTH as u16), - Constraint::Min(VERSION_WIDTH as u16), - Constraint::Min(NANOS_WIDTH as u16), - Constraint::Min(MEMORY_WIDTH as u16), - Constraint::Min(MBPS_WIDTH as u16), - Constraint::Min(RECORDS_WIDTH as u16), - Constraint::Min(PEERS_WIDTH as u16), - Constraint::Min(CONNS_WIDTH as u16), - Constraint::Min(STATUS_WIDTH as u16), - Constraint::Max(SPINNER_WIDTH as u16), - ]; - - // Header - let header_row = Row::new(vec![ - Cell::new("Node").fg(COOL_GREY), - Cell::new("Version").fg(COOL_GREY), - Cell::new("Nanos").fg(COOL_GREY), - Cell::new("Memory").fg(COOL_GREY), - Cell::new( - format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps").fg(COOL_GREY), - ), - Cell::new("Recs").fg(COOL_GREY), - Cell::new("Peers").fg(COOL_GREY), - Cell::new("Conns").fg(COOL_GREY), - Cell::new("Status").fg(COOL_GREY), - Cell::new(" ").fg(COOL_GREY), // Spinner - ]); - - let header = Table::new(vec![header_row.clone()], node_widths) + Cell::new("Recs").fg(COOL_GREY), + Cell::new("Peers").fg(COOL_GREY), + Cell::new("Conns").fg(COOL_GREY), + Cell::new("Status").fg(COOL_GREY), + Cell::new(" ").fg(COOL_GREY), // Spinner + ]) .style(Style::default().add_modifier(Modifier::BOLD)); - // Table items - let table = Table::new(node_rows.clone(), node_widths) - .column_spacing(1) - .highlight_style(Style::default().bg(INDIGO)) - .highlight_spacing(HighlightSpacing::Always); - - f.render_stateful_widget(header, inner_chunks[0], &mut self.node_table_state); - f.render_stateful_widget(table, inner_chunks[1], &mut self.node_table_state); - - // Render the throbber in the last column for running nodes - for (i, node) in self.node_services.iter().enumerate() { - let mut throbber = throbber_widgets_tui::Throbber::default() - .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE); - match node.status { - ServiceStatus::Running => { - throbber = throbber - .throbber_style( - Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), - ) - .use_type(throbber_widgets_tui::WhichUse::Spin); - } - ServiceStatus::Stopped => { - throbber = throbber - .throbber_style( - Style::default() - .fg(GHOST_WHITE) - .add_modifier(Modifier::BOLD), - ) - .use_type(throbber_widgets_tui::WhichUse::Full); - } - _ => {} - } - if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - throbber = throbber - .throbber_style( - Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD), - ) - .throbber_set(throbber_widgets_tui::BOX_DRAWING) - .use_type(throbber_widgets_tui::WhichUse::Spin); - } - let throbber_area = - Rect::new(inner_chunks[1].width, inner_chunks[1].y + i as u16, 1, 1); - let throttle_state = self - .node_services_throttle_state - .entry(node.service_name.clone()) - .or_default(); - f.render_stateful_widget(throbber, throbber_area, throttle_state); + let items: Vec = self + .items + .as_mut() + .unwrap() + .items + .iter_mut() + .enumerate() + .map(|(i, node_item)| node_item.to_table_item(i, layout[2], f)) + .collect(); + + // Table items + let table = Table::new(items, node_widths) + .header(header_row) + .column_spacing(1) + .highlight_style(Style::default().bg(INDIGO)) + .highlight_spacing(HighlightSpacing::Always); + + f.render_widget(table, inner_area); + + f.render_widget(block_nodes, layout[2]); } - f.render_widget(block_nodes, layout[2]); } // ==== Footer ===== let footer = Footer::default(); - let footer_state = if !node_rows.is_empty() { - if !self.get_running_nodes().is_empty() { - &mut NodesToStart::Running + let footer_state = if let Some(ref items) = self.items { + if !items.items.is_empty() { + if !self.get_running_nodes().is_empty() { + &mut NodesToStart::Running + } else { + &mut NodesToStart::Configured + } } else { - &mut NodesToStart::Configured + &mut NodesToStart::NotConfigured } } else { &mut NodesToStart::NotConfigured }; f.render_stateful_widget(footer, layout[3], footer_state); - // ===== Popup ===== + // ===== Popups ===== // Error Popup if let Some(error_popup) = &self.error_popup { @@ -987,3 +929,167 @@ impl Component for Status { Ok(vec![]) } } + +#[allow(dead_code)] +#[derive(Default, Clone)] +struct StatefulTable { + state: TableState, + items: Vec, + last_selected: Option, +} + +#[allow(dead_code)] +impl StatefulTable { + fn with_items(items: Vec) -> Self { + StatefulTable { + state: TableState::default(), + items, + last_selected: None, + } + } + + fn next(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i >= self.items.len() - 1 { + 0 + } else { + i + 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } + + fn previous(&mut self) { + let i = match self.state.selected() { + Some(i) => { + if i == 0 { + self.items.len() - 1 + } else { + i - 1 + } + } + None => self.last_selected.unwrap_or(0), + }; + self.state.select(Some(i)); + } +} + +#[derive(Default, Debug, Copy, Clone, PartialEq)] +enum NodeStatus { + #[default] + Added, + Running, + Starting, + Stopped, + Removed, +} + +impl fmt::Display for NodeStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + NodeStatus::Added => write!(f, "Added"), + NodeStatus::Running => write!(f, "Running"), + NodeStatus::Starting => write!(f, "Starting"), + NodeStatus::Stopped => write!(f, "Stopped"), + NodeStatus::Removed => write!(f, "Removed"), + } + } +} + +#[derive(Default, Debug, Clone)] +pub struct NodeItem<'a> { + name: String, + version: String, + nanos: u64, + memory: usize, + mbps: String, + records: usize, + peers: usize, + connections: usize, + status: NodeStatus, + spinner: Throbber<'a>, + spinner_state: ThrobberState, +} + +impl NodeItem<'_> { + fn to_table_item(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { + let mut row_style = Style::default().fg(GHOST_WHITE); + let mut spinner_state = self.spinner_state.clone(); + match self.status { + NodeStatus::Running => { + self.spinner = self + .spinner + .clone() + .throbber_style(Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD)) + .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE) + .use_type(throbber_widgets_tui::WhichUse::Spin); + row_style = Style::default().fg(EUCALYPTUS); + } + NodeStatus::Starting => { + self.spinner = self + .spinner + .clone() + .throbber_style(Style::default().fg(EUCALYPTUS).add_modifier(Modifier::BOLD)) + .throbber_set(throbber_widgets_tui::BOX_DRAWING) + .use_type(throbber_widgets_tui::WhichUse::Spin); + } + NodeStatus::Stopped => { + self.spinner = self + .spinner + .clone() + .throbber_style( + Style::default() + .fg(GHOST_WHITE) + .add_modifier(Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::BRAILLE_SIX_DOUBLE) + .use_type(throbber_widgets_tui::WhichUse::Full); + } + _ => {} + }; + + let row = vec![ + self.name.clone().to_string(), + self.version.to_string(), + format!( + "{}{}", + " ".repeat(NANOS_WIDTH.saturating_sub(self.nanos.to_string().len())), + self.nanos.to_string() + ), + format!( + "{}{} MB", + " ".repeat(MEMORY_WIDTH.saturating_sub(self.memory.to_string().len() + 4)), + self.memory.to_string() + ), + format!( + "{}{}", + " ".repeat(MBPS_WIDTH.saturating_sub(self.mbps.to_string().len())), + self.mbps.to_string() + ), + format!( + "{}{}", + " ".repeat(RECORDS_WIDTH.saturating_sub(self.records.to_string().len())), + self.records.to_string() + ), + format!( + "{}{}", + " ".repeat(PEERS_WIDTH.saturating_sub(self.peers.to_string().len())), + self.peers.to_string() + ), + format!( + "{}{}", + " ".repeat(CONNS_WIDTH.saturating_sub(self.connections.to_string().len())), + self.connections.to_string() + ), + self.status.to_string(), + ]; + let throbber_area = Rect::new(area.width - 2, area.y + 2 + index as u16, 1, 1); + + f.render_stateful_widget(self.spinner.clone(), throbber_area, &mut spinner_state); + + Row::new(row).style(row_style) + } +} From bc0a1ffa4855dbee05a5e9588811d367ffa5bfa4 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 14 Oct 2024 14:10:31 +0200 Subject: [PATCH 173/255] fix(evmlib): load evm net from env wrong var name --- evmlib/src/utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 31d13d413a..a695dd66fa 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -52,7 +52,7 @@ pub fn get_evm_testnet_csv_path() -> Result { } /// Get the `Network` from environment variables -/// Returns an error if the we cannot obtain the network from any means. +/// Returns an error if we cannot obtain the network from any means. pub fn get_evm_network_from_env() -> Result { let evm_vars = [ env::var(RPC_URL) @@ -61,7 +61,7 @@ pub fn get_evm_network_from_env() -> Result { env::var(PAYMENT_TOKEN_ADDRESS) .ok() .or_else(|| PAYMENT_TOKEN_ADDRESS_BUILD_TIME_VAL.map(|s| s.to_string())), - env::var(PAYMENT_TOKEN_ADDRESS) + env::var(DATA_PAYMENTS_ADDRESS) .ok() .or_else(|| DATA_PAYMENTS_ADDRESS_BUILD_TIME_VAL.map(|s| s.to_string())), ] From e4481f8027269cc2030ecf069372de6d652f4b0b Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 14 Oct 2024 18:42:24 +0530 Subject: [PATCH 174/255] feat(metrics): move shunned by close group metrics to the main endpoint --- sn_networking/src/metrics/mod.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs index b4837d35fa..feb48bafd6 100644 --- a/sn_networking/src/metrics/mod.rs +++ b/sn_networking/src/metrics/mod.rs @@ -181,6 +181,20 @@ impl NetworkMetricsRecorder { live_time.clone(), ); + let shunned_by_close_group = Gauge::default(); + sub_registry.register( + "shunned_by_close_group", + "The number of close group peers that have shunned our node", + shunned_by_close_group.clone(), + ); + + let shunned_by_old_close_group = Gauge::default(); + sub_registry.register( + "shunned_by_old_close_group", + "The number of close group peers that have shunned our node. This contains the peers that were once in our close group but have since been evicted.", + shunned_by_old_close_group.clone(), + ); + // ==== Extended metrics ===== let extended_metrics_sub_registry = registries @@ -193,25 +207,11 @@ impl NetworkMetricsRecorder { shunned_count_across_time_frames.clone(), ); - let shunned_by_close_group = Gauge::default(); - extended_metrics_sub_registry.register( - "shunned_by_close_group", - "The number of close group peers that have shunned our node", - shunned_by_close_group.clone(), - ); - - let shunned_by_old_close_group = Gauge::default(); - extended_metrics_sub_registry.register( - "shunned_by_old_close_group", - "The number of close group peers that have shunned our node. This contains the peers that were once in our close group but have since been evicted.", - shunned_by_old_close_group.clone(), - ); let bad_nodes_notifier = BadNodeMetrics::spawn_background_task( shunned_count_across_time_frames.clone(), shunned_by_close_group.clone(), shunned_by_old_close_group.clone(), ); - let network_metrics = Self { libp2p_metrics, #[cfg(feature = "upnp")] From eaea54fc1bd7dc86f2897c4fd671a727cab41856 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 14 Oct 2024 18:52:05 +0200 Subject: [PATCH 175/255] chore(launchpad): using find and not zip to avoid errors when sync list of nodes --- node-launchpad/src/components/status.rs | 101 ++++++++++++++---------- 1 file changed, 60 insertions(+), 41 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 3061c4dd5b..1f358d4692 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -169,51 +169,70 @@ impl Status<'_> { fn update_node_items(&mut self) -> Result<()> { // Iterate over existing node services and update their corresponding NodeItem if let Some(ref mut items) = self.items { - for (node_item, item) in self.node_services.iter().zip(&mut items.items) { - if node_item.status == ServiceStatus::Removed { - continue; // Skip removed nodes - } + for node_item in self.node_services.iter() { + // Find the corresponding item by service name + if let Some(item) = items + .items + .iter_mut() + .find(|i| i.name == node_item.service_name) + { + // Update status based on current node status + item.status = match node_item.status { + ServiceStatus::Running => { + // Call calc_next on the spinner state + item.spinner_state.calc_next(); + NodeStatus::Running + } + ServiceStatus::Stopped => NodeStatus::Stopped, + ServiceStatus::Added => NodeStatus::Added, + ServiceStatus::Removed => NodeStatus::Removed, + }; - // Update status based on current node status - item.status = match node_item.status { - ServiceStatus::Running => { - // Call calc_next on the spinner state + // Starting is not part of ServiceStatus so we do it manually + if let Some(LockRegistryState::StartingNodes) = self.lock_registry { item.spinner_state.calc_next(); - NodeStatus::Running + item.status = NodeStatus::Starting; } - ServiceStatus::Stopped => NodeStatus::Stopped, - ServiceStatus::Added => NodeStatus::Added, - ServiceStatus::Removed => NodeStatus::Removed, - }; - - // Starting is not part of ServiceStatus so we do it manually - if let Some(LockRegistryState::StartingNodes) = self.lock_registry { - item.spinner_state.calc_next(); - item.status = NodeStatus::Starting; - } - // Update peers count - item.peers = match node_item.connected_peers { - Some(ref peers) => peers.len(), - None => 0, - }; - - // Update individual stats if available - if let Some(stats) = self - .node_stats - .individual_stats - .iter() - .find(|s| s.service_name == node_item.service_name) - { - item.nanos = stats.forwarded_rewards; - item.memory = stats.memory_usage_mb; - item.mbps = format!( - "↓{:06.2} ↑{:06.2}", - stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), - stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) - ); - item.records = stats.max_records; - item.connections = stats.connections; + // Update peers count + item.peers = match node_item.connected_peers { + Some(ref peers) => peers.len(), + None => 0, + }; + + // Update individual stats if available + if let Some(stats) = self + .node_stats + .individual_stats + .iter() + .find(|s| s.service_name == node_item.service_name) + { + item.nanos = stats.forwarded_rewards; + item.memory = stats.memory_usage_mb; + item.mbps = format!( + "↓{:06.2} ↑{:06.2}", + stats.bandwidth_inbound as f64 / (1024_f64 * 1024_f64), + stats.bandwidth_outbound as f64 / (1024_f64 * 1024_f64) + ); + item.records = stats.max_records; + item.connections = stats.connections; + } + } else { + // If not found, create a new NodeItem and add it to items + let new_item = NodeItem { + name: node_item.service_name.clone(), + version: node_item.version.to_string(), + nanos: 0, + memory: 0, + mbps: "-".to_string(), + records: 0, + peers: 0, + connections: 0, + status: NodeStatus::Added, // Set initial status as Added + spinner: Throbber::default(), + spinner_state: ThrobberState::default(), + }; + items.items.push(new_item); } } } else { From daad4431a9e28f7af7aa76ef7ef099ae3d80aeda Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 14 Oct 2024 22:29:25 +0530 Subject: [PATCH 176/255] fix(autonomi): log the process metrics --- autonomi_cli/src/main.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index d655b2cf0a..944f401f88 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -22,6 +22,8 @@ use clap::Parser; use color_eyre::Result; use opt::Opt; +#[cfg(feature = "metrics")] +use sn_logging::metrics::init_metrics; use sn_logging::{LogBuilder, LogFormat, ReloadHandle, WorkerGuard}; use tracing::Level; @@ -30,6 +32,8 @@ async fn main() -> Result<()> { color_eyre::install().expect("Failed to initialise error handler"); let opt = Opt::parse(); let _log_guards = init_logging_and_metrics(&opt)?; + #[cfg(feature = "metrics")] + tokio::spawn(init_metrics(std::process::id())); // Log the full command that was run and the git version info!("\"{}\"", std::env::args().collect::>().join(" ")); From 463a5246fb5f50eb55f4bcac304948768ae280be Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Mon, 14 Oct 2024 22:36:47 +0530 Subject: [PATCH 177/255] Revert "chore: tryout" This reverts commit 348827890aaa5c1f13a64c5b1f31782b6efbcbec. --- .../workflows/generate-benchmark-charts.yml | 62 ++++++++++++++++--- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index e0fdc9c861..4b077c7cc8 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -2,8 +2,15 @@ name: Benchmark Chart Generation # Do not run this workflow on pull request since this workflow has permission to modify contents. on: - pull_request: - branches: ["*"] + push: + branches: + - main + +permissions: + # deployments permission to deploy GitHub pages website + deployments: write + # contents permission to update benchmark contents in gh-pages branch + contents: write env: CARGO_INCREMENTAL: "0" @@ -67,11 +74,26 @@ jobs: value: ((if .throughput[0].unit == "KiB/s" then (.throughput[0].per_iteration / (1024*1024*1024)) else (.throughput[0].per_iteration / (1024*1024)) end) / (.mean.estimate / 1e9)) })' > files-benchmark.json + - name: Remove git hooks so gh-pages git commits will work + shell: bash + run: rm -rf .git/hooks/pre-commit + - name: check files-benchmark.json shell: bash run: cat files-benchmark.json - # FIXME: do this in a generic way for localtestnets + # gh-pages branch is updated and pushed automatically with extracted benchmark data + - name: Store cli files benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "`safe files` benchmarks" + tool: "customBiggerIsBetter" + output-file-path: files-benchmark.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + max-items-in-chart: 300 + + # FIXME: do this in a generic way for localtestnets - name: export default secret key run: echo "SECRET_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" >> $GITHUB_ENV shell: bash @@ -122,6 +144,16 @@ jobs: shell: bash run: cat node_memory_usage.json + - name: Upload Node Memory Usage + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "Node memory" + tool: "customSmallerIsBetter" + output-file-path: node_memory_usage.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + max-items-in-chart: 300 + - name: Check client memory usage shell: bash run: | @@ -164,6 +196,16 @@ jobs: shell: bash run: cat client_memory_usage.json + - name: Upload Client Memory Usage + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "Client memory" + tool: "customSmallerIsBetter" + output-file-path: client_memory_usage.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + max-items-in-chart: 300 + ########################################### ### Swarm_driver handling time Analysis ### ########################################### @@ -228,7 +270,13 @@ jobs: - name: check swarm_driver_long_handlings.json shell: bash run: cat swarm_driver_long_handlings.json - - - name: Setup tmate session - if: ${{ failure() }} - uses: mxschmitt/action-tmate@v3 \ No newline at end of file + + - name: Upload swarm_driver Long Handlings + uses: benchmark-action/github-action-benchmark@v1 + with: + name: "swarm_driver long handlings" + tool: "customSmallerIsBetter" + output-file-path: swarm_driver_long_handlings.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + max-items-in-chart: 300 From f66e0aafba8c902e762a6691a3a9babd75400ce4 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 8 Oct 2024 20:05:08 +0100 Subject: [PATCH 178/255] feat: configure anvil to listen on non-local address For use in a remote network, Anvil must listen on the public IP address of the VM it is running on. The `anvil` binary respects the `ANVIL_IP_ADDR` variable for configuring a listener address. However, it turns out the `alloy` crate is actually hard coded to work with "localhost", so unfortunately the `AnvilInstance::endpoint` function cannot be used to return the resulting RPC URL. For that reason, a new `rpc_url` field was added to the `Testnet` struct to keep track of the non-local listening address. The deployer wallet private key was also saved in the CSV data because it will be necessary for `testnet-deploy` to obtain this. --- evm_testnet/src/main.rs | 7 +++-- evmlib/src/testnet.rs | 55 ++++++++++++++++++++--------------- evmlib/tests/data_payments.rs | 6 ++-- evmlib/tests/network_token.rs | 4 +-- evmlib/tests/wallet.rs | 8 ++--- 5 files changed, 46 insertions(+), 34 deletions(-) diff --git a/evm_testnet/src/main.rs b/evm_testnet/src/main.rs index 52df961aab..9e7f5a9dfd 100644 --- a/evm_testnet/src/main.rs +++ b/evm_testnet/src/main.rs @@ -156,8 +156,11 @@ impl TestnetData { } let csv = format!( - "{},{},{}", - self.rpc_url, self.payment_token_address, self.data_payments_address + "{},{},{},{}", + self.rpc_url, + self.payment_token_address, + self.data_payments_address, + self.deployer_wallet_private_key ); std::fs::write(&csv_path, csv).expect("Could not write to evm_testnet_data.csv file"); println!("EVM testnet data saved to: {csv_path:?}"); diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 93922b539d..79f7223507 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -9,6 +9,7 @@ use crate::common::Address; use crate::contract::data_payments::DataPaymentsHandler; use crate::contract::network_token::NetworkToken; +use crate::reqwest::Url; use crate::{CustomNetwork, Network}; use alloy::hex::ToHexExt; use alloy::network::{Ethereum, EthereumWallet}; @@ -22,6 +23,7 @@ use alloy::transports::http::{Client, Http}; pub struct Testnet { anvil: AnvilInstance, + rpc_url: Url, network_token_address: Address, data_payments_address: Address, } @@ -29,28 +31,24 @@ pub struct Testnet { impl Testnet { /// Starts an Anvil node and automatically deploys the network token and chunk payments smart contracts. pub async fn new() -> Self { - let anvil = start_node(); + let (anvil, rpc_url) = start_node(); - let network_token = deploy_network_token_contract(&anvil).await; + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; let data_payments = - deploy_data_payments_contract(&anvil, *network_token.contract.address()).await; + deploy_data_payments_contract(&rpc_url, &anvil, *network_token.contract.address()) + .await; Testnet { anvil, + rpc_url, network_token_address: *network_token.contract.address(), data_payments_address: *data_payments.contract.address(), } } pub fn to_network(&self) -> Network { - let rpc_url = self - .anvil - .endpoint() - .parse() - .expect("Could not parse RPC URL"); - Network::Custom(CustomNetwork { - rpc_url_http: rpc_url, + rpc_url_http: self.rpc_url.clone(), payment_token_address: self.network_token_address, data_payments_address: self.data_payments_address, }) @@ -63,17 +61,31 @@ impl Testnet { } } -/// Runs a local Anvil node. -pub fn start_node() -> AnvilInstance { - // Spin up a local Anvil node. - // Requires you to have Foundry installed: https://book.getfoundry.sh/getting-started/installation - Anvil::new() - .port(4343_u16) +/// Runs a local Anvil node bound to a specified IP address. +/// +/// The `AnvilInstance` `endpoint` function is hardcoded to return "localhost", so we must also +/// return the RPC URL if we want to listen on a different address. +/// +/// The `anvil` binary respects the `ANVIL_IP_ADDR` environment variable, but defaults to "localhost". +pub fn start_node() -> (AnvilInstance, Url) { + let host = std::env::var("ANVIL_IP_ADDR").unwrap_or_else(|_| "localhost".to_string()); + let port = std::env::var("ANVIL_PORT") + .unwrap_or_else(|_| "4343".to_string()) + .parse::() + .expect("Invalid port number"); + + let anvil = Anvil::new() + .port(port) .try_spawn() - .expect("Could not spawn Anvil node") + .expect("Could not spawn Anvil node"); + + let url = Url::parse(&format!("http://{}:{}", host, port)).expect("Failed to parse URL"); + + (anvil, url) } pub async fn deploy_network_token_contract( + rpc_url: &Url, anvil: &AnvilInstance, ) -> NetworkToken< Http, @@ -95,18 +107,17 @@ pub async fn deploy_network_token_contract( let signer: PrivateKeySigner = anvil.keys()[0].clone().into(); let wallet = EthereumWallet::from(signer); - let rpc_url = anvil.endpoint().parse().expect("Could not parse RPC URL"); - let provider = ProviderBuilder::new() .with_recommended_fillers() .wallet(wallet) - .on_http(rpc_url); + .on_http(rpc_url.clone()); // Deploy the contract. NetworkToken::deploy(provider).await } pub async fn deploy_data_payments_contract( + rpc_url: &Url, anvil: &AnvilInstance, token_address: Address, ) -> DataPaymentsHandler< @@ -129,12 +140,10 @@ pub async fn deploy_data_payments_contract( let signer: PrivateKeySigner = anvil.keys()[1].clone().into(); let wallet = EthereumWallet::from(signer); - let rpc_url = anvil.endpoint().parse().expect("Could not parse RPC URL"); - let provider = ProviderBuilder::new() .with_recommended_fillers() .wallet(wallet) - .on_http(rpc_url); + .on_http(rpc_url.clone()); // Deploy the contract. DataPaymentsHandler::deploy(provider, token_address).await diff --git a/evmlib/tests/data_payments.rs b/evmlib/tests/data_payments.rs index ed9e2ac413..26223cfcc1 100644 --- a/evmlib/tests/data_payments.rs +++ b/evmlib/tests/data_payments.rs @@ -58,12 +58,12 @@ async fn setup() -> ( Ethereum, >, ) { - let anvil = start_node(); + let (anvil, rpc_url) = start_node(); - let network_token = deploy_network_token_contract(&anvil).await; + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; let data_payments = - deploy_data_payments_contract(&anvil, *network_token.contract.address()).await; + deploy_data_payments_contract(&rpc_url, &anvil, *network_token.contract.address()).await; (anvil, network_token, data_payments) } diff --git a/evmlib/tests/network_token.rs b/evmlib/tests/network_token.rs index 40ea9ba041..0cc2b1c1eb 100644 --- a/evmlib/tests/network_token.rs +++ b/evmlib/tests/network_token.rs @@ -36,9 +36,9 @@ async fn setup() -> ( Ethereum, >, ) { - let anvil = start_node(); + let (anvil, rpc_url) = start_node(); - let network_token = deploy_network_token_contract(&anvil).await; + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; (anvil, network_token) } diff --git a/evmlib/tests/wallet.rs b/evmlib/tests/wallet.rs index a95ee34eca..905f719fc3 100644 --- a/evmlib/tests/wallet.rs +++ b/evmlib/tests/wallet.rs @@ -17,11 +17,11 @@ use std::collections::HashSet; #[allow(clippy::unwrap_used)] async fn local_testnet() -> (AnvilInstance, Network, EthereumWallet) { - let anvil = start_node(); - let rpc_url = anvil.endpoint().parse().unwrap(); - let network_token = deploy_network_token_contract(&anvil).await; + let (anvil, rpc_url) = start_node(); + let network_token = deploy_network_token_contract(&rpc_url, &anvil).await; let payment_token_address = *network_token.contract.address(); - let data_payments = deploy_data_payments_contract(&anvil, payment_token_address).await; + let data_payments = + deploy_data_payments_contract(&rpc_url, &anvil, payment_token_address).await; ( anvil, From f8f6012b2818134c8791da4cc5a8102c1a0b4be7 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 15 Oct 2024 14:18:08 +0900 Subject: [PATCH 179/255] feat: loud mode and vdash log line --- sn_node/Cargo.toml | 1 + sn_node/src/put_validation.rs | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 46a90789d6..b55388d1f6 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -23,6 +23,7 @@ nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] encrypt-records = ["sn_networking/encrypt-records"] upnp = ["sn_networking/upnp"] +loud = [] # loud mode: print important messages to console [dependencies] assert_fs = "1.0.0" diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 9124f314ef..382855b9bd 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -637,9 +637,16 @@ impl Node { self.events_channel() .broadcast(crate::NodeEvent::RewardReceived(storecost, address.clone())); - // NB TODO: tell happybeing about the AttoToken change // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): - info!("Total payment of {storecost:?} nanos accepted for record {pretty_key}"); + info!("Total payment of {storecost:?} atto tokens accepted for record {pretty_key}"); + + // loud mode: print a celebratory message to console + #[cfg(feature = "loud")] + { + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟 GOT REWARD 🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); + println!("Total payment of {storecost:?} atto tokens accepted for record {pretty_key}"); + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); + } Ok(()) } From 2172b01e3ec8ba224b057140d286948e5b86abae Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 15 Oct 2024 14:30:46 +0900 Subject: [PATCH 180/255] chore: improve loud msg --- sn_node/src/put_validation.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 382855b9bd..3f3343f403 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -643,9 +643,9 @@ impl Node { // loud mode: print a celebratory message to console #[cfg(feature = "loud")] { - println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟 GOT REWARD 🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟 RECEIVED REWARD 🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); println!("Total payment of {storecost:?} atto tokens accepted for record {pretty_key}"); - println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); + println!("🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟🌟"); } Ok(()) From aea6f6edb60a5ef2a65111a042066a9f7f6137dd Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 15 Oct 2024 11:47:29 +0530 Subject: [PATCH 181/255] fix(test): retry during register create to fix failures from node restarts --- sn_node/tests/data_with_churn.rs | 41 +++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index c372fc0331..347c74dc44 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -282,20 +282,33 @@ fn create_registers_task( sleep(delay).await; - let register = client - .register_create(random_data, &random_name, owner, &wallet) - .await - .inspect_err(|err| { - println!("Error while creating register: {err:?}"); - error!("Error while creating register: {err:?}") - })?; - - let addr = register.address(); - println!("Created new Register ({addr:?}) after a delay of: {delay:?}"); - content - .write() - .await - .push_back(NetworkAddress::RegisterAddress(*addr)); + let mut retries = 1; + loop { + match client + .register_create(random_data.clone(), &random_name, owner.clone(), &wallet) + .await + { + Ok(register) => { + let addr = register.address(); + println!("Created new Register ({addr:?}) after a delay of: {delay:?}"); + content + .write() + .await + .push_back(NetworkAddress::RegisterAddress(*addr)); + break; + } + Err(err) => { + println!("Failed to create register: {err:?}. Retrying ..."); + error!("Failed to create register: {err:?}. Retrying ..."); + if retries >= 3 { + println!("Failed to create register after 3 retries: {err}"); + error!("Failed to create register after 3 retries: {err}"); + bail!("Failed to create register after 3 retries: {err}"); + } + retries += 1; + } + } + } } }); handle From 27a4b83b7048ccef05b0f42275f213e4b99c191b Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 15 Oct 2024 15:52:10 +0900 Subject: [PATCH 182/255] feat(networking): ensure we disconnect when removing peers --- sn_networking/src/event/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index ad6e1781b6..fc87e285a2 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -268,6 +268,11 @@ impl SwarmDriver { /// Update state on removal of a peer from the routing table. pub(crate) fn update_on_peer_removal(&mut self, removed_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_sub(1); + + // ensure we disconnect bad peer + // err result just means no connections were open + let _result = self.swarm.disconnect_peer_id(removed_peer); + info!( "Peer removed from routing table: {removed_peer:?}, now we have #{} connected peers", self.peers_in_rt From 8b4e689f81664120b565f3f97cca32acb90de228 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 15 Oct 2024 15:55:57 +0900 Subject: [PATCH 183/255] feat: print for peers connected and net size in loud mode --- sn_networking/Cargo.toml | 2 +- sn_networking/src/event/mod.rs | 12 ++++++++---- sn_node/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 0b03bb87b1..ce8c88d950 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -18,7 +18,7 @@ upnp = ["libp2p/upnp"] websockets = ["libp2p/tcp"] open-metrics = ["libp2p/metrics", "prometheus-client", "hyper", "sysinfo"] encrypt-records = [] - +loud = [] [dependencies] lazy_static = "~1.4.0" diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index ad6e1781b6..4fa0f51b86 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -245,10 +245,12 @@ impl SwarmDriver { /// Update state on addition of a peer to the routing table. pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId) { self.peers_in_rt = self.peers_in_rt.saturating_add(1); - info!( - "New peer added to routing table: {added_peer:?}, now we have #{} connected peers", - self.peers_in_rt - ); + let n_peers = self.peers_in_rt; + info!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + + #[cfg(feature = "loud")] + println!("New peer added to routing table: {added_peer:?}, now we have #{n_peers} connected peers"); + self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); @@ -341,6 +343,8 @@ impl SwarmDriver { } info!("kBucketTable has {index:?} kbuckets {total_peers:?} peers, {kbucket_table_stats:?}, estimated network size: {estimated_network_size:?}"); + #[cfg(feature = "loud")] + println!("Estimated network size: {estimated_network_size:?}"); } /// Estimate the number of nodes in the network diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index b55388d1f6..9e13f59d3d 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -23,7 +23,7 @@ nightly = [] open-metrics = ["sn_networking/open-metrics", "prometheus-client"] encrypt-records = ["sn_networking/encrypt-records"] upnp = ["sn_networking/upnp"] -loud = [] # loud mode: print important messages to console +loud = ["sn_networking/loud"] # loud mode: print important messages to console [dependencies] assert_fs = "1.0.0" From 4461543efc389da07be3abc80d906e25b8e56fc2 Mon Sep 17 00:00:00 2001 From: grumbach Date: Tue, 15 Oct 2024 16:47:21 +0900 Subject: [PATCH 184/255] feat: print of bad bootstrap peer --- sn_networking/src/event/swarm.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 028c791712..982088f102 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -408,6 +408,11 @@ impl SwarmDriver { match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); + #[cfg(feature = "loud")] + { + println!("Multiaddr not supported : {addr:?}"); + println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); + } // if we can't dial a peer on a given address, we should remove it from the routing table there_is_a_serious_issue = true } From 403fc3b9141b22f8fe9a279c176cd4367c398c93 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 15 Oct 2024 12:25:37 +0530 Subject: [PATCH 185/255] feat(autonomi): implement client events --- autonomi/src/client/data.rs | 20 ++++++++++++++++++ autonomi/src/client/mod.rs | 36 +++++++++++++++++++++++++++----- autonomi/src/client/registers.rs | 12 +++++++++++ 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/autonomi/src/client/data.rs b/autonomi/src/client/data.rs index cc9af3a267..055016f291 100644 --- a/autonomi/src/client/data.rs +++ b/autonomi/src/client/data.rs @@ -13,6 +13,7 @@ use tokio::task::JoinError; use std::collections::HashSet; use xor_name::XorName; +use crate::client::{ClientEvent, UploadSummary}; use crate::{self_encryption::encrypt, Client}; use sn_evm::{Amount, AttoTokens}; use sn_evm::{EvmWallet, EvmWalletError}; @@ -112,12 +113,15 @@ impl Client { .await .inspect_err(|err| error!("Error paying for data: {err:?}"))?; + let mut record_count = 0; + // Upload data map if let Some(proof) = payment_proofs.get(&map_xor_name) { debug!("Uploading data map chunk: {map_xor_name:?}"); self.chunk_upload_with_payment(data_map_chunk.clone(), proof.clone()) .await .inspect_err(|err| error!("Error uploading data map chunk: {err:?}"))?; + record_count += 1; } // Upload the rest of the chunks @@ -128,6 +132,22 @@ impl Client { self.chunk_upload_with_payment(chunk, proof.clone()) .await .inspect_err(|err| error!("Error uploading chunk {address:?} :{err:?}"))?; + record_count += 1; + } + } + + if let Some(channel) = self.client_event_sender.as_ref() { + let tokens_spent = payment_proofs + .values() + .map(|proof| proof.quote.cost.as_atto()) + .sum::(); + + let summary = UploadSummary { + record_count, + tokens_spent, + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err:?}"); } } diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index a0a691eaa8..6d80c68c01 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -25,16 +25,19 @@ pub mod wasm; // private module with utility functions mod utils; -use std::{collections::HashSet, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use libp2p::{identity::Keypair, Multiaddr}; +use sn_evm::Amount; use sn_networking::{interval, multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; -use tokio::sync::mpsc::Receiver; +use tokio::sync::mpsc; /// Time before considering the connection timed out. pub const CONNECT_TIMEOUT_SECS: u64 = 20; +const CLIENT_EVENT_CHANNEL_SIZE: usize = 100; + /// Represents a connection to the Autonomi network. /// /// # Example @@ -53,6 +56,7 @@ pub const CONNECT_TIMEOUT_SECS: u64 = 20; #[derive(Clone)] pub struct Client { pub(crate) network: Network, + pub(crate) client_event_sender: Arc>>, } /// Error returned by [`Client::connect`]. @@ -103,11 +107,22 @@ impl Client { receiver.await.expect("sender should not close")?; - Ok(Self { network }) + Ok(Self { + network, + client_event_sender: Arc::new(None), + }) + } + + /// Receive events from the client. + pub fn enable_client_events(&mut self) -> mpsc::Receiver { + let (client_event_sender, client_event_receiver) = + tokio::sync::mpsc::channel(CLIENT_EVENT_CHANNEL_SIZE); + self.client_event_sender = Arc::new(Some(client_event_sender)); + client_event_receiver } } -fn build_client_and_run_swarm(local: bool) -> (Network, Receiver) { +fn build_client_and_run_swarm(local: bool) -> (Network, mpsc::Receiver) { let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local); // TODO: Re-export `Receiver` from `sn_networking`. Else users need to keep their `tokio` dependency in sync. @@ -121,7 +136,7 @@ fn build_client_and_run_swarm(local: bool) -> (Network, Receiver) } async fn handle_event_receiver( - mut event_receiver: Receiver, + mut event_receiver: mpsc::Receiver, sender: futures::channel::oneshot::Sender>, ) { // We switch this to `None` when we've sent the oneshot 'connect' result. @@ -180,3 +195,14 @@ async fn handle_event_receiver( // TODO: Handle closing of network events sender } + +/// Events that can be broadcasted by the client. +pub enum ClientEvent { + UploadComplete(UploadSummary), +} + +/// Summary of an upload operation. +pub struct UploadSummary { + pub record_count: usize, + pub tokens_spent: Amount, +} diff --git a/autonomi/src/client/registers.rs b/autonomi/src/client/registers.rs index 41b6e00736..fb3c55fa6c 100644 --- a/autonomi/src/client/registers.rs +++ b/autonomi/src/client/registers.rs @@ -17,6 +17,8 @@ pub use sn_registers::{Permissions as RegisterPermissions, RegisterAddress}; use crate::client::data::PayError; use crate::client::Client; +use crate::client::ClientEvent; +use crate::client::UploadSummary; use bytes::Bytes; use libp2p::kad::{Quorum, Record}; use sn_evm::EvmWallet; @@ -357,6 +359,16 @@ impl Client { error!("Failed to put record - register {address} to the network: {err}") })?; + if let Some(channel) = self.client_event_sender.as_ref() { + let summary = UploadSummary { + record_count: 1, + tokens_spent: proof.quote.cost.as_atto(), + }; + if let Err(err) = channel.send(ClientEvent::UploadComplete(summary)).await { + error!("Failed to send client event: {err}"); + } + } + Ok(register) } } From 68ab20594a8a5147da2e27a63ab34d22beef16b1 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Tue, 15 Oct 2024 10:09:48 +0200 Subject: [PATCH 186/255] fix(launchpad): raname function for linter compliance --- node-launchpad/src/components/status.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 1f358d4692..69dd9a4d90 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -818,7 +818,7 @@ impl Component for Status<'_> { .items .iter_mut() .enumerate() - .map(|(i, node_item)| node_item.to_table_item(i, layout[2], f)) + .map(|(i, node_item)| node_item.render_as_row(i, layout[2], f)) .collect(); // Table items @@ -1034,7 +1034,7 @@ pub struct NodeItem<'a> { } impl NodeItem<'_> { - fn to_table_item(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { + fn render_as_row(&mut self, index: usize, area: Rect, f: &mut Frame<'_>) -> Row { let mut row_style = Style::default().fg(GHOST_WHITE); let mut spinner_state = self.spinner_state.clone(); match self.status { From 12b1d38bfe136875aba2ad9454d9ba386f818866 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 15 Oct 2024 13:26:55 +0530 Subject: [PATCH 187/255] feat(autonomi): print upload stats --- autonomi/src/client/fs.rs | 4 +- autonomi/src/client/mod.rs | 4 +- autonomi/tests/fs.rs | 2 +- autonomi_cli/src/commands/file.rs | 16 +++++++- autonomi_cli/src/commands/register.rs | 15 ++++++- autonomi_cli/src/main.rs | 1 + autonomi_cli/src/utils.rs | 56 +++++++++++++++++++++++++++ 7 files changed, 90 insertions(+), 8 deletions(-) create mode 100644 autonomi_cli/src/utils.rs diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index ca83442f78..8fff06324c 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -74,7 +74,7 @@ impl Client { /// Upload a directory to the network. The directory is recursively walked. /// Reads all files, splits into chunks, uploads chunks, uploads datamaps, uploads archive, returns ArchiveAddr (pointing to the archive) pub async fn dir_upload( - &mut self, + &self, dir_path: PathBuf, wallet: &EvmWallet, ) -> Result { @@ -107,7 +107,7 @@ impl Client { /// Upload a file to the network. /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) async fn file_upload( - &mut self, + &self, path: PathBuf, wallet: &EvmWallet, ) -> Result { diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index 6d80c68c01..f19216fe84 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -25,12 +25,12 @@ pub mod wasm; // private module with utility functions mod utils; -use std::{collections::HashSet, sync::Arc, time::Duration}; +pub use sn_evm::Amount; use libp2p::{identity::Keypair, Multiaddr}; -use sn_evm::Amount; use sn_networking::{interval, multiaddr_is_global, Network, NetworkBuilder, NetworkEvent}; use sn_protocol::{version::IDENTIFY_PROTOCOL_STR, CLOSE_GROUP_SIZE}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use tokio::sync::mpsc; /// Time before considering the connection timed out. diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 850b7eec00..5b1fce533b 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -26,7 +26,7 @@ async fn dir_upload_download() -> Result<()> { let _log_appender_guard = LogBuilder::init_single_threaded_tokio_test("dir_upload_download", false); - let mut client = Client::connect(&peers_from_env()?).await?; + let client = Client::connect(&peers_from_env()?).await?; let wallet = get_funded_wallet(); let addr = client diff --git a/autonomi_cli/src/commands/file.rs b/autonomi_cli/src/commands/file.rs index 0af446c6b8..d99a848214 100644 --- a/autonomi_cli/src/commands/file.rs +++ b/autonomi_cli/src/commands/file.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::utils::collect_upload_summary; use autonomi::client::address::addr_to_str; use autonomi::Multiaddr; use color_eyre::eyre::Context; @@ -25,12 +26,14 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { println!("Total cost: {cost}"); Ok(()) } - pub async fn upload(file: &str, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; + let event_receiver = client.enable_client_events(); + let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); println!("Uploading data to network..."); + let xor_name = client .dir_upload(PathBuf::from(file), &wallet) .await @@ -39,9 +42,18 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { println!("Successfully uploaded: {file}"); println!("At address: {addr}"); + if let Ok(()) = upload_completed_tx.send(()) { + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("All chunks already exist on the network"); + } else { + println!("Number of chunks uploaded: {}", summary.record_count); + println!("Total cost: {} AttoTokens", summary.tokens_spent); + } + } + Ok(()) } - pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { let mut client = crate::actions::connect_to_network(peers).await?; crate::actions::download(addr, dest_path, &mut client).await diff --git a/autonomi_cli/src/commands/register.rs b/autonomi_cli/src/commands/register.rs index e672a1cc41..d559e6cc55 100644 --- a/autonomi_cli/src/commands/register.rs +++ b/autonomi_cli/src/commands/register.rs @@ -6,6 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. +use crate::utils::collect_upload_summary; use autonomi::client::registers::RegisterAddress; use autonomi::client::registers::RegisterPermissions; use autonomi::client::registers::RegisterSecretKey; @@ -50,7 +51,9 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec let wallet = crate::keys::load_evm_wallet()?; let register_key = crate::keys::get_register_signing_key() .wrap_err("The register key is required to perform this action")?; - let client = crate::actions::connect_to_network(peers).await?; + let mut client = crate::actions::connect_to_network(peers).await?; + let event_receiver = client.enable_client_events(); + let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); println!("Creating register with name: {name}"); let register = if public { @@ -84,6 +87,16 @@ pub async fn create(name: &str, value: &str, public: bool, peers: Vec println!("✅ Register created at address: {address}"); println!("With name: {name}"); println!("And initial value: [{value}]"); + + if let Ok(()) = upload_completed_tx.send(()) { + let summary = upload_summary_thread.await?; + if summary.record_count == 0 { + println!("The register was already created on the network. No tokens were spent."); + } else { + println!("Total cost: {} AttoTokens", summary.tokens_spent); + } + } + Ok(()) } diff --git a/autonomi_cli/src/main.rs b/autonomi_cli/src/main.rs index 944f401f88..2c15b82a66 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi_cli/src/main.rs @@ -13,6 +13,7 @@ mod access; mod actions; mod commands; mod opt; +mod utils; pub use access::data_dir; pub use access::keys; diff --git a/autonomi_cli/src/utils.rs b/autonomi_cli/src/utils.rs new file mode 100644 index 0000000000..5f031a3c24 --- /dev/null +++ b/autonomi_cli/src/utils.rs @@ -0,0 +1,56 @@ +// Copyright 2024 MaidSafe.net limited. +// +// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. Please review the Licences for the specific language governing +// permissions and limitations relating to use of the SAFE Network Software. + +use autonomi::client::{Amount, ClientEvent, UploadSummary}; + +/// Collects upload summary from the event receiver. +/// Send a signal to the returned sender to stop collecting and to return the result via the join handle. +pub fn collect_upload_summary( + mut event_receiver: tokio::sync::mpsc::Receiver, +) -> ( + tokio::task::JoinHandle, + tokio::sync::oneshot::Sender<()>, +) { + let (upload_completed_tx, mut upload_completed_rx) = tokio::sync::oneshot::channel::<()>(); + let stats_thread = tokio::spawn(async move { + let mut tokens_spent: Amount = Amount::from(0); + let mut record_count = 0; + + loop { + tokio::select! { + event = event_receiver.recv() => { + match event { + Some(ClientEvent::UploadComplete(upload_summary)) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; + } + None => break, + } + } + _ = &mut upload_completed_rx => break, + } + } + + // try to drain the event receiver in case there are any more events + while let Ok(event) = event_receiver.try_recv() { + match event { + ClientEvent::UploadComplete(upload_summary) => { + tokens_spent += upload_summary.tokens_spent; + record_count += upload_summary.record_count; + } + } + } + + UploadSummary { + tokens_spent, + record_count, + } + }); + + (stats_thread, upload_completed_tx) +} From 069060badc4d1ed2d1466490f8b8cb86c68a36b2 Mon Sep 17 00:00:00 2001 From: qima Date: Tue, 15 Oct 2024 20:32:26 +0800 Subject: [PATCH 188/255] feat(node): using different storage dir for different network --- sn_networking/src/driver.rs | 7 ++++--- sn_protocol/src/version.rs | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index ec716cb4df..d8d71c5601 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -52,8 +52,8 @@ use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, version::{ - IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, - REQ_RESPONSE_VERSION_STR, + get_key_version_str, IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, + IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; @@ -364,7 +364,8 @@ impl NetworkBuilder { let store_cfg = { // Configures the disk_store to store records under the provided path and increase the max record size - let storage_dir_path = root_dir.join("record_store"); + // The storage dir is appendixed with key_version str to avoid bringing records from old network into new + let storage_dir_path = root_dir.join(format!("record_store_{}", get_key_version_str())); if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index 04921730ef..e1c952976c 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -58,7 +58,7 @@ fn get_truncate_version_str() -> String { /// Get the PKs version string. /// If the public key mis-configed via env variable, /// it shall result in being rejected to join by the network -fn get_key_version_str() -> String { +pub fn get_key_version_str() -> String { let mut f_k_str = FOUNDATION_PK.to_hex(); let _ = f_k_str.split_off(6); let mut g_k_str = GENESIS_PK.to_hex(); From eaee0232e8d48439ad611581721ded4c283448d9 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Sat, 12 Oct 2024 16:17:44 +0100 Subject: [PATCH 189/255] feat: adapt node manager `add` command for evm network BREAKING CHANGE: services configured by the node manager are no longer compatible with networks that are not EVM. The node manager `add` command now supports the following EVM arguments: * The subcommand for selecting the network type is appended. * The `evm-custom` subcommand makes the `--data-payments-address`, `--payment-token-address` and `--rpc-url` arguments available. * The `--rewards-address` argument is provided. The `CustomNetwork` type was modified to enable it to be serialized in the node manager registry. --- Cargo.lock | 35 + evmlib/Cargo.toml | 1 + evmlib/src/lib.rs | 17 +- evmlib/src/testnet.rs | 2 +- evmlib/src/utils.rs | 2 +- node-launchpad/Cargo.toml | 1 + node-launchpad/src/node_mgmt.rs | 15 +- sn_evm/src/lib.rs | 1 + sn_node_manager/src/add_services/config.rs | 290 ++++++++ sn_node_manager/src/add_services/mod.rs | 4 + sn_node_manager/src/add_services/tests.rs | 800 ++++++++++++++++++++- sn_node_manager/src/bin/cli/main.rs | 26 +- sn_node_manager/src/cmd/node.rs | 8 + sn_node_manager/src/lib.rs | 514 ++++++++++++- sn_node_manager/src/local.rs | 6 +- sn_node_manager/src/rpc.rs | 6 + sn_service_management/src/node.rs | 18 +- 17 files changed, 1719 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5267b6c79..8191c7ec60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1677,6 +1677,7 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", "windows-targets 0.52.6", ] @@ -2380,6 +2381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -2763,6 +2765,7 @@ dependencies = [ "getrandom 0.2.15", "rand 0.8.5", "serde", + "serde_with", "thiserror", "tokio", "tracing", @@ -4343,6 +4346,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.3.0", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -5708,6 +5712,7 @@ dependencies = [ "sn-node-manager", "sn-releases", "sn_build_info", + "sn_evm", "sn_peers_acquisition", "sn_protocol", "sn_service_management", @@ -7797,6 +7802,36 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex 0.4.3", + "indexmap 1.9.3", + "indexmap 2.5.0", + "serde", + "serde_derive", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index fae3cba0cb..9a26778c36 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -16,6 +16,7 @@ local = [] alloy = { version = "0.4.2", default-features = false, features = ["std", "reqwest-rustls-tls", "provider-anvil-node", "sol-types", "json", "signers", "contract", "signer-local", "network"] } dirs-next = "~2.0.0" serde = "1.0" +serde_with = { version = "3.11.0", features = ["macros"] } thiserror = "1.0" tracing = { version = "~0.1.26" } tokio = "1.38.0" diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index 6e29a7f4d5..8bf3734265 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -10,6 +10,8 @@ use crate::common::{Address, QuoteHash, TxHash, U256}; use crate::transaction::verify_data_payment; use alloy::primitives::address; use alloy::transports::http::reqwest; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; use std::str::FromStr; use std::sync::LazyLock; @@ -38,8 +40,10 @@ const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); -#[derive(Clone, Debug, PartialEq)] +#[serde_as] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomNetwork { + #[serde_as(as = "DisplayFromStr")] pub rpc_url_http: reqwest::Url, pub payment_token_address: Address, pub data_payments_address: Address, @@ -57,12 +61,21 @@ impl CustomNetwork { } } -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Network { ArbitrumOne, Custom(CustomNetwork), } +impl std::fmt::Display for Network { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Network::ArbitrumOne => write!(f, "evm-arbitrum-one"), + Network::Custom(_) => write!(f, "evm-custom"), + } + } +} + impl Network { pub fn new_custom(rpc_url: &str, payment_token_addr: &str, chunk_payments_addr: &str) -> Self { Self::Custom(CustomNetwork::new( diff --git a/evmlib/src/testnet.rs b/evmlib/src/testnet.rs index 79f7223507..e5f1f79708 100644 --- a/evmlib/src/testnet.rs +++ b/evmlib/src/testnet.rs @@ -79,7 +79,7 @@ pub fn start_node() -> (AnvilInstance, Url) { .try_spawn() .expect("Could not spawn Anvil node"); - let url = Url::parse(&format!("http://{}:{}", host, port)).expect("Failed to parse URL"); + let url = Url::parse(&format!("http://{host}:{port}")).expect("Failed to parse URL"); (anvil, url) } diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index a695dd66fa..00b018fa09 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -125,7 +125,7 @@ fn local_evm_network_from_csv() -> Result { })?; let parts: Vec<&str> = csv.split(',').collect(); match parts.as_slice() { - [rpc_url, payment_token_address, chunk_payments_address] => Ok(Network::Custom( + [rpc_url, payment_token_address, chunk_payments_address, _] => Ok(Network::Custom( CustomNetwork::new(rpc_url, payment_token_address, chunk_payments_address), )), _ => { diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 3b55815bfe..cb9c03ab3f 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -52,6 +52,7 @@ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_evm = { path = "../sn_evm", version = "0.1" } sn-node-manager = { version = "0.10.6", path = "../sn_node_manager" } sn_peers_acquisition = { version = "0.5.3", path = "../sn_peers_acquisition" } sn_protocol = { path = "../sn_protocol", version = "0.17.11" } diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 0ee2c22294..852e8da8a7 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -1,19 +1,16 @@ -use std::path::PathBuf; - +use crate::action::{Action, StatusActions}; +use crate::connection_mode::ConnectionMode; use color_eyre::eyre::{eyre, Error}; +use sn_evm::RewardsAddress; use sn_node_manager::{ add_services::config::PortRange, config::get_node_registry_path, VerbosityLevel, }; use sn_peers_acquisition::PeersArgs; +use sn_releases::{self, ReleaseType, SafeReleaseRepoActions}; use sn_service_management::NodeRegistry; +use std::{path::PathBuf, str::FromStr}; use tokio::sync::mpsc::UnboundedSender; -use crate::action::{Action, StatusActions}; - -use crate::connection_mode::ConnectionMode; - -use sn_releases::{self, ReleaseType, SafeReleaseRepoActions}; - pub const PORT_MAX: u32 = 65535; pub const PORT_MIN: u32 = 1024; @@ -302,6 +299,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { None, // We don't care about the port, as we are scaling down config.owner.clone(), config.peers_args.clone(), + RewardsAddress::from_str("0x1111111111111111111111111111111111111111").unwrap(), None, None, config.safenode_path.clone(), @@ -375,6 +373,7 @@ async fn add_nodes( port_range, config.owner.clone(), config.peers_args.clone(), + RewardsAddress::from_str("0x1111111111111111111111111111111111111111").unwrap(), None, None, config.safenode_path.clone(), diff --git a/sn_evm/src/lib.rs b/sn_evm/src/lib.rs index f4c70e04a6..a62fa5c0fd 100644 --- a/sn_evm/src/lib.rs +++ b/sn_evm/src/lib.rs @@ -17,6 +17,7 @@ pub use evmlib::utils::get_evm_network_from_env; pub use evmlib::utils::{DATA_PAYMENTS_ADDRESS, PAYMENT_TOKEN_ADDRESS, RPC_URL}; pub use evmlib::wallet::Error as EvmWalletError; pub use evmlib::wallet::Wallet as EvmWallet; +pub use evmlib::CustomNetwork; pub use evmlib::Network as EvmNetwork; mod amount; diff --git a/sn_node_manager/src/add_services/config.rs b/sn_node_manager/src/add_services/config.rs index 2d5cac69dc..1910428380 100644 --- a/sn_node_manager/src/add_services/config.rs +++ b/sn_node_manager/src/add_services/config.rs @@ -9,6 +9,7 @@ use color_eyre::{eyre::eyre, Result}; use libp2p::Multiaddr; use service_manager::{ServiceInstallCtx, ServiceLabel}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use std::{ ffi::OsString, @@ -72,6 +73,7 @@ pub struct InstallNodeServiceCtxBuilder { pub bootstrap_peers: Vec, pub data_dir_path: PathBuf, pub env_variables: Option>, + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub local: bool, @@ -84,6 +86,7 @@ pub struct InstallNodeServiceCtxBuilder { pub node_ip: Option, pub node_port: Option, pub owner: Option, + pub rewards_address: RewardsAddress, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, pub service_user: Option, @@ -154,6 +157,23 @@ impl InstallNodeServiceCtxBuilder { args.push(OsString::from(peers_str)); } + args.push(OsString::from("--rewards-address")); + args.push(OsString::from(self.rewards_address.to_string())); + + args.push(OsString::from(self.evm_network.to_string())); + if let EvmNetwork::Custom(custom_network) = &self.evm_network { + args.push(OsString::from("--rpc-url")); + args.push(OsString::from(custom_network.rpc_url_http.to_string())); + args.push(OsString::from("--payment-token-address")); + args.push(OsString::from( + custom_network.payment_token_address.to_string(), + )); + args.push(OsString::from("--data-payments-address")); + args.push(OsString::from( + custom_network.data_payments_address.to_string(), + )); + } + Ok(ServiceInstallCtx { args, autostart: self.autostart, @@ -175,6 +195,7 @@ pub struct AddNodeServiceOptions { pub delete_safenode_src: bool, pub enable_metrics_server: bool, pub env_variables: Option>, + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub local: bool, @@ -185,6 +206,7 @@ pub struct AddNodeServiceOptions { pub node_ip: Option, pub node_port: Option, pub owner: Option, + pub rewards_address: RewardsAddress, pub rpc_address: Option, pub rpc_port: Option, pub safenode_src_path: PathBuf, @@ -319,3 +341,271 @@ pub struct AddDaemonServiceOptions { pub user: String, pub version: String, } + +#[cfg(test)] +mod tests { + use super::*; + use sn_evm::{CustomNetwork, RewardsAddress}; + use std::net::{IpAddr, Ipv4Addr}; + + fn create_default_builder() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::ArbitrumOne, + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + fn create_custom_evm_network_builder() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse().unwrap(), + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + ) + .unwrap(), + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ) + .unwrap(), + }), + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + fn create_builder_with_all_options_enabled() -> InstallNodeServiceCtxBuilder { + InstallNodeServiceCtxBuilder { + autostart: true, + bootstrap_peers: vec![], + data_dir_path: PathBuf::from("/data"), + env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse().unwrap(), + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + ) + .unwrap(), + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ) + .unwrap(), + }), + genesis: false, + home_network: false, + local: false, + log_dir_path: PathBuf::from("/logs"), + log_format: None, + name: "test-node".to_string(), + max_archived_log_files: Some(10), + max_log_files: Some(10), + metrics_port: None, + node_ip: None, + node_port: None, + owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124") + .unwrap(), + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + safenode_path: PathBuf::from("/bin/safenode"), + service_user: None, + upnp: false, + } + } + + #[test] + fn build_should_assign_expected_values_when_mandatory_options_are_provided() { + let builder = create_default_builder(); + let result = builder.build().unwrap(); + + assert_eq!(result.label.to_string(), "test-node"); + assert_eq!(result.program, PathBuf::from("/bin/safenode")); + assert!(result.autostart); + assert_eq!(result.username, None); + assert_eq!(result.working_directory, None); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-arbitrum-one", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + } + + #[test] + fn build_should_assign_expected_values_when_a_custom_evm_network_is_provided() { + let builder = create_custom_evm_network_builder(); + let result = builder.build().unwrap(); + + assert_eq!(result.label.to_string(), "test-node"); + assert_eq!(result.program, PathBuf::from("/bin/safenode")); + assert!(result.autostart); + assert_eq!(result.username, None); + assert_eq!(result.working_directory, None); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-custom", + "--rpc-url", + "http://localhost:8545/", + "--payment-token-address", + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "--data-payments-address", + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + } + + #[test] + fn build_should_assign_expected_values_when_all_options_are_enabled() { + let mut builder = create_builder_with_all_options_enabled(); + builder.genesis = true; + builder.home_network = true; + builder.local = true; + builder.log_format = Some(LogFormat::Json); + builder.upnp = true; + builder.node_ip = Some(Ipv4Addr::new(192, 168, 1, 1)); + builder.node_port = Some(12345); + builder.metrics_port = Some(9090); + builder.owner = Some("test-owner".to_string()); + builder.bootstrap_peers = vec![ + "/ip4/127.0.0.1/tcp/8080".parse().unwrap(), + "/ip4/192.168.1.1/tcp/8081".parse().unwrap(), + ]; + builder.service_user = Some("safenode-user".to_string()); + + let result = builder.build().unwrap(); + + let expected_args = vec![ + "--rpc", + "127.0.0.1:8080", + "--root-dir", + "/data", + "--log-output-dest", + "/logs", + "--first", + "--home-network", + "--local", + "--log-format", + "json", + "--upnp", + "--ip", + "192.168.1.1", + "--port", + "12345", + "--metrics-server-port", + "9090", + "--owner", + "test-owner", + "--max-archived-log-files", + "10", + "--max-log-files", + "10", + "--peer", + "/ip4/127.0.0.1/tcp/8080,/ip4/192.168.1.1/tcp/8081", + "--rewards-address", + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + "evm-custom", + "--rpc-url", + "http://localhost:8545/", + "--payment-token-address", + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + "--data-payments-address", + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + ]; + assert_eq!( + result + .args + .iter() + .map(|os| os.to_str().unwrap()) + .collect::>(), + expected_args + ); + assert_eq!(result.username, Some("safenode-user".to_string())); + } + + #[test] + fn build_should_assign_expected_values_when_environment_variables_are_provided() { + let mut builder = create_default_builder(); + builder.env_variables = Some(vec![ + ("VAR1".to_string(), "value1".to_string()), + ("VAR2".to_string(), "value2".to_string()), + ]); + + let result = builder.build().unwrap(); + + assert_eq!( + result.environment, + Some(vec![ + ("VAR1".to_string(), "value1".to_string()), + ("VAR2".to_string(), "value2".to_string()), + ]) + ); + } +} diff --git a/sn_node_manager/src/add_services/mod.rs b/sn_node_manager/src/add_services/mod.rs index 86137d881d..96c6cf37a7 100644 --- a/sn_node_manager/src/add_services/mod.rs +++ b/sn_node_manager/src/add_services/mod.rs @@ -222,6 +222,7 @@ pub async fn add_node( bootstrap_peers: options.bootstrap_peers.clone(), data_dir_path: service_data_dir_path.clone(), env_variables: options.env_variables.clone(), + evm_network: options.evm_network.clone(), genesis: options.genesis, home_network: options.home_network, local: options.local, @@ -234,6 +235,7 @@ pub async fn add_node( node_ip: options.node_ip, node_port, owner: owner.clone(), + rewards_address: options.rewards_address, rpc_socket_addr, safenode_path: service_safenode_path.clone(), service_user: options.user.clone(), @@ -256,6 +258,7 @@ pub async fn add_node( auto_restart: options.auto_restart, connected_peers: None, data_dir_path: service_data_dir_path.clone(), + evm_network: options.evm_network.clone(), genesis: options.genesis, home_network: options.home_network, listen_addr: None, @@ -268,6 +271,7 @@ pub async fn add_node( node_ip: options.node_ip, node_port, number: node_number, + rewards_address: options.rewards_address, reward_balance: None, rpc_socket_addr, owner: owner.clone(), diff --git a/sn_node_manager/src/add_services/tests.rs b/sn_node_manager/src/add_services/tests.rs index 34a572ffce..9833570929 100644 --- a/sn_node_manager/src/add_services/tests.rs +++ b/sn_node_manager/src/add_services/tests.rs @@ -23,7 +23,7 @@ use libp2p::Multiaddr; use mockall::{mock, predicate::*, Sequence}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; -use sn_evm::AttoTokens; +use sn_evm::{AttoTokens, CustomNetwork, EvmNetwork, RewardsAddress}; use sn_service_management::{auditor::AuditorServiceData, control::ServiceControl}; use sn_service_management::{error::Result as ServiceControlResult, NatDetectionStatus}; use sn_service_management::{ @@ -115,6 +115,15 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, local: true, @@ -127,6 +136,7 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -172,6 +182,18 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -203,6 +225,22 @@ async fn add_genesis_node_should_use_latest_version_and_add_one_service() -> Res node_data_dir.to_path_buf().join("safenode1") ); assert_matches!(node_registry.nodes[0].status, ServiceStatus::Added); + assert_eq!( + node_registry.nodes[0].evm_network, + EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3" + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC" + )?, + }) + ); + assert_eq!( + node_registry.nodes[0].rewards_address, + RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")? + ); Ok(()) } @@ -225,6 +263,15 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, listen_addr: None, @@ -240,6 +287,9 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n pid: None, peer_id: None, owner: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), status: ServiceStatus::Added, @@ -294,6 +344,18 @@ async fn add_genesis_node_should_return_an_error_if_there_is_already_a_genesis_n user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -365,6 +427,18 @@ async fn add_genesis_node_should_return_an_error_if_count_is_greater_than_1() -> user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -421,6 +495,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -438,6 +521,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( .to_path_buf() .join("safenode1") .join(SAFENODE_FILE_NAME), + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, service_user: Some(get_username()), upnp: false, } @@ -461,6 +545,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode2"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -473,6 +566,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), safenode_path: node_data_dir .to_path_buf() @@ -501,6 +595,15 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( data_dir_path: node_data_dir.to_path_buf().join("safenode3"), bootstrap_peers: vec![], env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -513,6 +616,7 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8085), safenode_path: node_data_dir .to_path_buf() @@ -559,6 +663,18 @@ async fn add_node_should_use_latest_version_and_add_three_services() -> Result<( user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -664,6 +780,15 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re bootstrap_peers: new_peers.clone(), data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -676,6 +801,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -685,6 +811,7 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re upnp: false, } .build()?; + mock_service_control .expect_install() .times(1) @@ -713,14 +840,26 @@ async fn add_node_should_update_the_bootstrap_peers_inside_node_registry() -> Re node_port: None, rpc_address: None, rpc_port: None, - safenode_dir_path: temp_dir.to_path_buf(), safenode_src_path: safenode_download_path.to_path_buf(), + safenode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -800,6 +939,15 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: env_variables.clone(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -812,6 +960,7 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -857,6 +1006,18 @@ async fn add_node_should_update_the_environment_variables_inside_node_registry() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -909,6 +1070,15 @@ async fn add_new_node_should_add_another_service() -> Result<()> { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: true, home_network: false, listen_addr: None, @@ -924,6 +1094,9 @@ async fn add_new_node_should_add_another_service() -> Result<()> { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -957,6 +1130,15 @@ async fn add_new_node_should_add_another_service() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode2"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -968,6 +1150,7 @@ async fn add_new_node_should_add_another_service() -> Result<()> { name: "safenode2".to_string(), node_ip: None, node_port: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8083), owner: None, safenode_path: node_data_dir @@ -1015,6 +1198,18 @@ async fn add_new_node_should_add_another_service() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1107,6 +1302,15 @@ async fn add_node_should_use_custom_ip() -> Result<()> { ), OsString::from("--ip"), OsString::from(custom_ip.to_string()), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1153,6 +1357,18 @@ async fn add_node_should_use_custom_ip() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1210,6 +1426,15 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -1222,6 +1447,7 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { node_ip: None, node_port: Some(custom_port), owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -1268,6 +1494,18 @@ async fn add_node_should_use_custom_ports_for_one_service() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1345,6 +1583,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12000"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1394,6 +1641,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1443,6 +1699,15 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { ), OsString::from("--port"), OsString::from("12002"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1489,6 +1754,18 @@ async fn add_node_should_use_a_custom_port_range() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -1521,6 +1798,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1536,6 +1822,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1588,6 +1877,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_is_used() -> R user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1618,6 +1919,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1633,6 +1943,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1685,6 +1998,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_port_in_range_is_us user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1746,14 +2071,26 @@ async fn add_node_should_return_an_error_if_port_and_node_count_do_not_match() - node_port: Some(PortRange::Range(12000, 12002)), rpc_address: None, rpc_port: None, - safenode_dir_path: temp_dir.to_path_buf(), safenode_src_path: safenode_download_path.to_path_buf(), + safenode_dir_path: temp_dir.to_path_buf(), service_data_dir_path: node_data_dir.to_path_buf(), service_log_dir_path: node_logs_dir.to_path_buf(), upnp: false, user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1828,6 +2165,18 @@ async fn add_node_should_return_an_error_if_multiple_services_are_specified_with user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -1909,6 +2258,15 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> ), OsString::from("--metrics-server-port"), OsString::from("15001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -1955,6 +2313,18 @@ async fn add_node_should_set_random_ports_if_enable_metrics_server_is_true() -> user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2028,6 +2398,15 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { ), OsString::from("--max-archived-log-files"), OsString::from("20"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2074,6 +2453,18 @@ async fn add_node_should_set_max_archived_log_files() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2148,6 +2539,15 @@ async fn add_node_should_set_max_log_files() -> Result<()> { ), OsString::from("--max-log-files"), OsString::from("20"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2194,6 +2594,18 @@ async fn add_node_should_set_max_log_files() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2266,6 +2678,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2315,6 +2736,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12001"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2364,6 +2794,15 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< ), OsString::from("--metrics-server-port"), OsString::from("12002"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2410,6 +2849,18 @@ async fn add_node_should_use_a_custom_port_range_for_metrics_server() -> Result< user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2439,6 +2890,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2454,6 +2914,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -2506,6 +2969,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_is_use user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2537,6 +3012,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2552,6 +3036,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -2604,6 +3091,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_metrics_port_in_ran user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2673,6 +3172,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2715,6 +3223,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2757,6 +3274,15 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< .to_string_lossy() .to_string(), ), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -2803,6 +3329,18 @@ async fn add_node_should_use_a_custom_port_range_for_the_rpc_server() -> Result< user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -2843,6 +3381,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2858,6 +3405,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -2910,6 +3460,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_is_used() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -2941,6 +3503,15 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2956,6 +3527,9 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -3008,6 +3582,18 @@ async fn add_node_should_return_an_error_if_duplicate_custom_rpc_port_in_range_i user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &MockServiceControl::new(), @@ -3063,6 +3649,15 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -3075,6 +3670,7 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3120,6 +3716,18 @@ async fn add_node_should_disable_upnp_and_home_network_if_nat_status_is_public() user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3172,6 +3780,15 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -3184,6 +3801,7 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3229,6 +3847,18 @@ async fn add_node_should_enable_upnp_if_nat_status_is_upnp() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3281,6 +3911,15 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -3293,6 +3932,7 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 12001), safenode_path: node_data_dir .to_path_buf() @@ -3338,6 +3978,18 @@ async fn add_node_should_enable_home_network_if_nat_status_is_private() -> Resul user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -3415,6 +4067,18 @@ async fn add_node_should_return_an_error_if_nat_status_is_none_but_auto_set_nat_ user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4008,6 +4672,15 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, local: false, @@ -4020,6 +4693,7 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4066,6 +4740,18 @@ async fn add_node_should_not_delete_the_source_binary_if_path_arg_is_used() -> R user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4122,6 +4808,15 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( genesis: false, home_network: true, local: false, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), log_dir_path: node_logs_dir.to_path_buf().join("safenode1"), log_format: None, max_archived_log_files: None, @@ -4131,6 +4826,7 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4177,6 +4873,18 @@ async fn add_node_should_apply_the_home_network_flag_if_it_is_used() -> Result<( user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4230,6 +4938,15 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -4242,6 +4959,7 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4288,6 +5006,18 @@ async fn add_node_should_add_the_node_in_user_mode() -> Result<()> { user: Some(get_username()), user_mode: true, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4338,6 +5068,15 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { bootstrap_peers: vec![], data_dir_path: node_data_dir.to_path_buf().join("safenode1"), env_variables: None, + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: true, local: false, @@ -4350,6 +5089,7 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { node_ip: None, node_port: None, owner: None, + rewards_address: RewardsAddress::from_str("0x03B770D9cD32077cC0bF330c13C114a87643B124")?, rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: node_data_dir .to_path_buf() @@ -4396,6 +5136,18 @@ async fn add_node_should_add_the_node_with_upnp_enabled() -> Result<()> { user: Some(get_username()), user_mode: true, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4467,6 +5219,15 @@ async fn add_node_should_assign_an_owner_in_lowercase() -> Result<()> { ), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: false, contents: None, @@ -4514,6 +5275,18 @@ async fn add_node_should_assign_an_owner_in_lowercase() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, @@ -4587,6 +5360,15 @@ async fn add_node_should_auto_restart() -> Result<()> { ), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("--rewards-address"), + OsString::from("0x03B770D9cD32077cC0bF330c13C114a87643B124"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), ], autostart: true, contents: None, @@ -4634,6 +5416,18 @@ async fn add_node_should_auto_restart() -> Result<()> { user: Some(get_username()), user_mode: false, version: latest_version.to_string(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }, &mut node_registry, &mock_service_control, diff --git a/sn_node_manager/src/bin/cli/main.rs b/sn_node_manager/src/bin/cli/main.rs index 5165eefae1..9269f76889 100644 --- a/sn_node_manager/src/bin/cli/main.rs +++ b/sn_node_manager/src/bin/cli/main.rs @@ -123,6 +123,9 @@ pub enum SubCmd { /// Example: --env SN_LOG=all,RUST_LOG=libp2p=debug #[clap(name = "env", long, use_value_delimiter = true, value_parser = parse_environment_variables)] env_variables: Option>, + /// Specify what EVM network to use for payments. + #[command(subcommand)] + evm_network: EvmNetworkCommand, /// Set this flag to use the safenode '--home-network' feature. /// /// This enables the use of safenode services from a home network with a router. @@ -188,13 +191,6 @@ pub enum SubCmd { /// services, which in this case would be 5. The range must also go from lower to higher. #[clap(long, value_parser = PortRange::parse)] node_port: Option, - /// Provide a path for the safenode binary to be used by the service. - /// - /// Useful for creating the service using a custom built binary. - #[clap(long)] - path: Option, - #[command(flatten)] - peers: PeersArgs, /// Specify the owner for the node service. /// /// This is mainly used for the 'Beta Rewards' programme, for linking your Discord username @@ -204,6 +200,16 @@ pub enum SubCmd { /// run as normal. #[clap(long)] owner: Option, + /// Provide a path for the safenode binary to be used by the service. + /// + /// Useful for creating the service using a custom built binary. + #[clap(long)] + path: Option, + #[command(flatten)] + peers: PeersArgs, + /// Specify the wallet address that will receive the node's earnings. + #[clap(long)] + rewards_address: RewardsAddress, /// Specify an Ipv4Addr for the node's RPC server to run on. /// /// Useful if you want to expose the RPC server pubilcly. Ports are assigned automatically. @@ -1082,6 +1088,7 @@ async fn main() -> Result<()> { data_dir_path, enable_metrics_server, env_variables, + evm_network, home_network, local, log_dir_path, @@ -1094,6 +1101,7 @@ async fn main() -> Result<()> { owner, path, peers, + rewards_address, rpc_address, rpc_port, url, @@ -1101,13 +1109,14 @@ async fn main() -> Result<()> { user, version, }) => { - let _ = cmd::node::add( + cmd::node::add( auto_restart, auto_set_nat_flags, count, data_dir_path, enable_metrics_server, env_variables, + Some(evm_network.try_into()?), home_network, local, log_dir_path, @@ -1119,6 +1128,7 @@ async fn main() -> Result<()> { node_port, owner, peers, + rewards_address, rpc_address, rpc_port, path, diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index ea30532c45..d28dbf7266 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -22,6 +22,7 @@ use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; use libp2p_identity::PeerId; use semver::Version; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -42,6 +43,7 @@ pub async fn add( data_dir_path: Option, enable_metrics_server: bool, env_variables: Option>, + evm_network: Option, home_network: bool, local: bool, log_dir_path: Option, @@ -53,6 +55,7 @@ pub async fn add( node_port: Option, owner: Option, peers_args: PeersArgs, + rewards_address: RewardsAddress, rpc_address: Option, rpc_port: Option, src_path: Option, @@ -142,6 +145,7 @@ pub async fn add( count, delete_safenode_src: src_path.is_none(), enable_metrics_server, + evm_network: evm_network.unwrap_or(EvmNetwork::ArbitrumOne), env_variables, genesis: is_first, home_network, @@ -153,6 +157,7 @@ pub async fn add( node_ip, node_port, owner, + rewards_address, rpc_address, rpc_port, safenode_src_path, @@ -616,6 +621,7 @@ pub async fn maintain_n_running_nodes( node_port: Option, owner: Option, peers: PeersArgs, + rewards_address: RewardsAddress, rpc_address: Option, rpc_port: Option, src_path: Option, @@ -708,6 +714,7 @@ pub async fn maintain_n_running_nodes( data_dir_path.clone(), enable_metrics_server, env_variables.clone(), + None, home_network, local, log_dir_path.clone(), @@ -719,6 +726,7 @@ pub async fn maintain_n_running_nodes( Some(PortRange::Single(port)), owner.clone(), peers.clone(), + rewards_address, rpc_address, rpc_port.clone(), src_path.clone(), diff --git a/sn_node_manager/src/lib.rs b/sn_node_manager/src/lib.rs index 64c32bb9bb..721015ed2f 100644 --- a/sn_node_manager/src/lib.rs +++ b/sn_node_manager/src/lib.rs @@ -673,7 +673,7 @@ mod tests { use mockall::{mock, predicate::*}; use predicates::prelude::*; use service_manager::ServiceInstallCtx; - use sn_evm::AttoTokens; + use sn_evm::{AttoTokens, CustomNetwork, EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ error::{Error as ServiceControlError, Result as ServiceControlResult}, @@ -768,6 +768,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -783,6 +792,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -872,6 +884,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -889,6 +910,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -941,6 +965,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -958,6 +991,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1053,6 +1089,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1070,6 +1115,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1135,6 +1183,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1150,6 +1207,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1227,6 +1287,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1242,6 +1311,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1318,6 +1390,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1333,6 +1414,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1379,6 +1463,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1396,6 +1489,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1430,6 +1526,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1445,6 +1550,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1479,6 +1587,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1496,6 +1613,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1531,6 +1651,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1546,6 +1675,9 @@ mod tests { owner: None, peer_id: None, pid: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1596,6 +1728,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1613,6 +1754,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -1724,6 +1868,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1741,6 +1894,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -1814,6 +1970,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1831,6 +1996,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -1949,6 +2117,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -1966,6 +2143,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2096,6 +2276,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2113,6 +2302,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2238,6 +2430,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2255,6 +2456,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2381,6 +2585,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -2398,6 +2611,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2498,6 +2714,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--upnp"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2554,6 +2771,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -2571,6 +2789,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2654,6 +2875,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--log-format"), OsString::from("json"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2710,6 +2932,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -2727,6 +2950,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2813,6 +3039,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--home-network"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -2869,6 +3096,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: true, listen_addr: None, @@ -2886,6 +3114,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -2969,6 +3200,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--ip"), OsString::from("192.168.1.1"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3025,6 +3257,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3042,6 +3275,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -3128,6 +3364,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3184,6 +3421,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3201,6 +3439,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -3284,6 +3525,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--max-archived-log-files"), OsString::from("20"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3366,6 +3608,10 @@ mod tests { user: Some("safe".to_string()), user_mode: false, version: current_version.to_string(), + evm_network: EvmNetwork::ArbitrumOne, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); @@ -3443,6 +3689,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--max-log-files"), OsString::from("20"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3525,6 +3772,10 @@ mod tests { user: Some("safe".to_string()), user_mode: false, version: current_version.to_string(), + evm_network: EvmNetwork::ArbitrumOne, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, }; let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); @@ -3599,6 +3850,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3655,6 +3907,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3672,6 +3925,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -3758,6 +4014,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--metrics-server-port"), OsString::from("12000"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3814,6 +4071,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3831,6 +4089,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -3917,6 +4178,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -3973,6 +4235,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -3990,6 +4253,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -4076,6 +4342,7 @@ mod tests { OsString::from("/var/log/safenode/safenode1"), OsString::from("--owner"), OsString::from("discord_username"), + OsString::from("evm-arbitrum-one"), ], autostart: true, contents: None, @@ -4132,6 +4399,7 @@ mod tests { auto_restart: true, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -4149,6 +4417,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -4184,6 +4455,182 @@ mod tests { Ok(()) } + #[tokio::test] + async fn upgrade_should_retain_evm_network_settings() -> Result<()> { + let current_version = "0.1.0"; + let target_version = "0.2.0"; + + let tmp_data_dir = assert_fs::TempDir::new()?; + let current_install_dir = tmp_data_dir.child("safenode_install"); + current_install_dir.create_dir_all()?; + + let current_node_bin = current_install_dir.child("safenode"); + current_node_bin.write_binary(b"fake safenode binary")?; + let target_node_bin = tmp_data_dir.child("safenode"); + target_node_bin.write_binary(b"fake safenode binary")?; + + let mut mock_service_control = MockServiceControl::new(); + let mut mock_rpc_client = MockRpcClient::new(); + + // before binary upgrade + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(1000)); + mock_service_control + .expect_stop() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + + // after binary upgrade + mock_service_control + .expect_uninstall() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_install() + .with( + eq(ServiceInstallCtx { + args: vec![ + OsString::from("--rpc"), + OsString::from("127.0.0.1:8081"), + OsString::from("--root-dir"), + OsString::from("/var/safenode-manager/services/safenode1"), + OsString::from("--log-output-dest"), + OsString::from("/var/log/safenode/safenode1"), + OsString::from("--owner"), + OsString::from("discord_username"), + OsString::from("evm-custom"), + OsString::from("--rpc-url"), + OsString::from("http://localhost:8545/"), + OsString::from("--payment-token-address"), + OsString::from("0x5FbDB2315678afecb367f032d93F642f64180aa3"), + OsString::from("--data-payments-address"), + OsString::from("0x8464135c8F25Da09e49BC8782676a84730C318bC"), + ], + autostart: true, + contents: None, + environment: None, + label: "safenode1".parse()?, + program: current_node_bin.to_path_buf(), + username: Some("safe".to_string()), + working_directory: None, + }), + eq(false), + ) + .times(1) + .returning(|_, _| Ok(())); + + // after service restart + mock_service_control + .expect_start() + .with(eq("safenode1"), eq(false)) + .times(1) + .returning(|_, _| Ok(())); + mock_service_control + .expect_wait() + .with(eq(3000)) + .times(1) + .returning(|_| ()); + mock_service_control + .expect_get_process_pid() + .with(eq(current_node_bin.to_path_buf().clone())) + .times(1) + .returning(|_| Ok(100)); + + mock_rpc_client.expect_node_info().times(1).returning(|| { + Ok(NodeInfo { + pid: 2000, + peer_id: PeerId::from_str("12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR")?, + data_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + log_path: PathBuf::from("/var/log/safenode/safenode1"), + version: target_version.to_string(), + uptime: std::time::Duration::from_secs(1), // the service was just started + wallet_balance: 0, + }) + }); + mock_rpc_client + .expect_network_info() + .times(1) + .returning(|| { + Ok(NetworkInfo { + connected_peers: Vec::new(), + listeners: Vec::new(), + }) + }); + + let mut service_data = NodeServiceData { + auto_restart: true, + connected_peers: None, + data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), + genesis: false, + home_network: false, + listen_addr: None, + local: false, + log_dir_path: PathBuf::from("/var/log/safenode/safenode1"), + log_format: None, + max_archived_log_files: None, + max_log_files: None, + metrics_port: None, + node_ip: None, + node_port: None, + number: 1, + owner: Some("discord_username".to_string()), + peer_id: Some(PeerId::from_str( + "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", + )?), + pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, + reward_balance: Some(AttoTokens::zero()), + + rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), + safenode_path: current_node_bin.to_path_buf(), + service_name: "safenode1".to_string(), + status: ServiceStatus::Running, + upnp: false, + user: Some("safe".to_string()), + user_mode: false, + version: current_version.to_string(), + }; + let service = NodeService::new(&mut service_data, Box::new(mock_rpc_client)); + + let mut service_manager = ServiceManager::new( + service, + Box::new(mock_service_control), + VerbosityLevel::Normal, + ); + + service_manager + .upgrade(UpgradeOptions { + auto_restart: true, + bootstrap_peers: Vec::new(), + env_variables: None, + force: false, + start_service: true, + target_bin_path: target_node_bin.to_path_buf(), + target_version: Version::parse(target_version).unwrap(), + }) + .await?; + + assert!(service_manager.service.service_data.auto_restart,); + + Ok(()) + } + #[tokio::test] async fn upgrade_should_use_dynamic_startup_delay_if_set() -> Result<()> { let current_version = "0.1.0"; @@ -4231,6 +4678,7 @@ mod tests { OsString::from("--log-output-dest"), OsString::from("/var/log/safenode/safenode1"), OsString::from("--upnp"), + OsString::from("evm-arbitrum-one"), ], autostart: false, contents: None, @@ -4290,6 +4738,7 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::ArbitrumOne, genesis: false, home_network: false, listen_addr: None, @@ -4307,6 +4756,9 @@ mod tests { "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), pid: Some(1000), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: current_node_bin.to_path_buf(), @@ -4364,6 +4816,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4379,6 +4840,9 @@ mod tests { owner: None, pid: None, peer_id: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), @@ -4423,6 +4887,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4440,6 +4913,9 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -4498,6 +4974,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: PathBuf::from("/var/safenode-manager/services/safenode1"), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4515,6 +5000,9 @@ mod tests { peer_id: Some(PeerId::from_str( "12D3KooWS2tpXGGTmg2AHFiDh57yPQnat49YHnyqoggzXZWpqkCR", )?), + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: PathBuf::from("/var/safenode-manager/services/safenode1/safenode"), @@ -4565,6 +5053,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4580,6 +5077,9 @@ mod tests { owner: None, pid: None, peer_id: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), @@ -4630,6 +5130,15 @@ mod tests { auto_restart: false, connected_peers: None, data_dir_path: data_dir.to_path_buf(), + evm_network: EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://localhost:8545".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + }), genesis: false, home_network: false, listen_addr: None, @@ -4645,6 +5154,9 @@ mod tests { owner: None, pid: None, peer_id: None, + rewards_address: RewardsAddress::from_str( + "0x03B770D9cD32077cC0bF330c13C114a87643B124", + )?, reward_balance: Some(AttoTokens::zero()), rpc_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081), safenode_path: safenode_bin.to_path_buf(), diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index e718d3dad6..5796cda354 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -313,7 +313,7 @@ pub async fn run_network( .nodes .iter() .find_map(|n| n.listen_addr.clone()) - .ok_or_else(|| eyre!("Unable to obtain a peer to connect to"))?; + .ok_or_eyre("Unable to obtain a peer to connect to")?; (peer, 1) } } else { @@ -474,7 +474,7 @@ pub async fn run_node( run_options.owner.clone(), run_options.rpc_socket_addr, run_options.rewards_address, - run_options.evm_network, + run_options.evm_network.clone(), )?; launcher.wait(run_options.interval); @@ -492,6 +492,7 @@ pub async fn run_node( auto_restart: false, connected_peers, data_dir_path: node_info.data_path, + evm_network: run_options.evm_network.unwrap_or(EvmNetwork::ArbitrumOne), genesis: run_options.genesis, home_network: false, listen_addr: Some(listen_addrs), @@ -507,6 +508,7 @@ pub async fn run_node( owner: run_options.owner, peer_id: Some(peer_id), pid: Some(node_info.pid), + rewards_address: run_options.rewards_address, reward_balance: None, rpc_socket_addr: run_options.rpc_socket_addr, safenode_path: launcher.get_safenode_path(), diff --git a/sn_node_manager/src/rpc.rs b/sn_node_manager/src/rpc.rs index b9fc50ced8..57147ccce4 100644 --- a/sn_node_manager/src/rpc.rs +++ b/sn_node_manager/src/rpc.rs @@ -66,6 +66,7 @@ pub async fn restart_node_service( bootstrap_peers: node_registry.bootstrap_peers.clone(), data_dir_path: current_node_clone.data_dir_path.clone(), env_variables: node_registry.environment_variables.clone(), + evm_network: current_node_clone.evm_network.clone(), genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, local: current_node_clone.local, @@ -78,6 +79,7 @@ pub async fn restart_node_service( name: current_node_clone.service_name.clone(), node_ip: current_node_clone.node_ip, node_port: current_node_clone.get_safenode_port(), + rewards_address: current_node_clone.rewards_address, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path: current_node_clone.safenode_path.clone(), service_user: current_node_clone.user.clone(), @@ -184,6 +186,7 @@ pub async fn restart_node_service( bootstrap_peers: node_registry.bootstrap_peers.clone(), data_dir_path: data_dir_path.clone(), env_variables: node_registry.environment_variables.clone(), + evm_network: current_node_clone.evm_network.clone(), genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, local: current_node_clone.local, @@ -196,6 +199,7 @@ pub async fn restart_node_service( node_ip: current_node_clone.node_ip, node_port: None, owner: None, + rewards_address: current_node_clone.rewards_address, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path: safenode_path.clone(), service_user: current_node_clone.user.clone(), @@ -210,6 +214,7 @@ pub async fn restart_node_service( auto_restart: current_node_clone.auto_restart, connected_peers: None, data_dir_path, + evm_network: current_node_clone.evm_network, genesis: current_node_clone.genesis, home_network: current_node_clone.home_network, listen_addr: None, @@ -225,6 +230,7 @@ pub async fn restart_node_service( owner: None, peer_id: None, pid: None, + rewards_address: current_node_clone.rewards_address, reward_balance: current_node_clone.reward_balance, rpc_socket_addr: current_node_clone.rpc_socket_addr, safenode_path, diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index ee109cc15c..b2e4af4eaa 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -11,7 +11,7 @@ use async_trait::async_trait; use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; use service_manager::{ServiceInstallCtx, ServiceLabel}; -use sn_evm::AttoTokens; +use sn_evm::{AttoTokens, EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_protocol::get_port_from_multiaddr; use std::{ @@ -126,6 +126,20 @@ impl<'a> ServiceStateActions for NodeService<'a> { args.push(OsString::from(peers_str)); } + args.push(OsString::from(self.service_data.evm_network.to_string())); + if let EvmNetwork::Custom(custom_network) = &self.service_data.evm_network { + args.push(OsString::from("--rpc-url")); + args.push(OsString::from(custom_network.rpc_url_http.to_string())); + args.push(OsString::from("--payment-token-address")); + args.push(OsString::from( + custom_network.payment_token_address.to_string(), + )); + args.push(OsString::from("--data-payments-address")); + args.push(OsString::from( + custom_network.data_payments_address.to_string(), + )); + } + Ok(ServiceInstallCtx { args, autostart: options.auto_restart, @@ -269,6 +283,7 @@ pub struct NodeServiceData { )] pub connected_peers: Option>, pub data_dir_path: PathBuf, + pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, pub listen_addr: Option>, @@ -292,6 +307,7 @@ pub struct NodeServiceData { )] pub peer_id: Option, pub pid: Option, + pub rewards_address: RewardsAddress, pub reward_balance: Option, pub rpc_socket_addr: SocketAddr, pub safenode_path: PathBuf, From c338819126ae6649b03e6eb5d4f2f9e8f7f22596 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 15 Oct 2024 22:30:36 +0100 Subject: [PATCH 190/255] chore: rename autonomi crate and binary The `autonomi_cli` binary is renamed to `autonomi` because the `_cli` suffix is not necessary. The crate is named `autonomi-cli` rather than `autonomi_cli` because we agreed we prefer underscores. --- .../workflows/generate-benchmark-charts.yml | 8 ++--- .github/workflows/merge.yml | 32 +++++++++---------- Cargo.lock | 2 +- Cargo.toml | 6 +--- README.md | 4 +-- {autonomi_cli => autonomi-cli}/Cargo.toml | 6 +++- {autonomi_cli => autonomi-cli}/README.md | 0 .../benches/files.rs | 0 .../src/access/data_dir.rs | 0 .../src/access/keys.rs | 0 .../src/access/mod.rs | 0 .../src/access/network.rs | 0 .../src/actions/connect.rs | 0 .../src/actions/download.rs | 0 .../src/actions/mod.rs | 0 .../src/actions/progress_bar.rs | 0 .../src/commands.rs | 0 .../src/commands/file.rs | 0 .../src/commands/register.rs | 0 .../src/commands/vault.rs | 0 {autonomi_cli => autonomi-cli}/src/main.rs | 2 +- {autonomi_cli => autonomi-cli}/src/opt.rs | 0 {autonomi_cli => autonomi-cli}/src/utils.rs | 0 sn_logging/src/lib.rs | 2 +- 24 files changed, 31 insertions(+), 31 deletions(-) rename {autonomi_cli => autonomi-cli}/Cargo.toml (94%) rename {autonomi_cli => autonomi-cli}/README.md (100%) rename {autonomi_cli => autonomi-cli}/benches/files.rs (100%) rename {autonomi_cli => autonomi-cli}/src/access/data_dir.rs (100%) rename {autonomi_cli => autonomi-cli}/src/access/keys.rs (100%) rename {autonomi_cli => autonomi-cli}/src/access/mod.rs (100%) rename {autonomi_cli => autonomi-cli}/src/access/network.rs (100%) rename {autonomi_cli => autonomi-cli}/src/actions/connect.rs (100%) rename {autonomi_cli => autonomi-cli}/src/actions/download.rs (100%) rename {autonomi_cli => autonomi-cli}/src/actions/mod.rs (100%) rename {autonomi_cli => autonomi-cli}/src/actions/progress_bar.rs (100%) rename {autonomi_cli => autonomi-cli}/src/commands.rs (100%) rename {autonomi_cli => autonomi-cli}/src/commands/file.rs (100%) rename {autonomi_cli => autonomi-cli}/src/commands/register.rs (100%) rename {autonomi_cli => autonomi-cli}/src/commands/vault.rs (100%) rename {autonomi_cli => autonomi-cli}/src/main.rs (97%) rename {autonomi_cli => autonomi-cli}/src/opt.rs (100%) rename {autonomi_cli => autonomi-cli}/src/utils.rs (100%) diff --git a/.github/workflows/generate-benchmark-charts.yml b/.github/workflows/generate-benchmark-charts.yml index 4b077c7cc8..27a737a7a7 100644 --- a/.github/workflows/generate-benchmark-charts.yml +++ b/.github/workflows/generate-benchmark-charts.yml @@ -15,7 +15,7 @@ permissions: env: CARGO_INCREMENTAL: "0" RUST_BACKTRACE: 1 - CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi_cli + CLIENT_DATA_PATH: /home/runner/.local/share/safe/autonomi NODE_DATA_PATH: /home/runner/.local/share/safe/node jobs: @@ -46,7 +46,7 @@ jobs: run: wget https://sn-node.s3.eu-west-2.amazonaws.com/the-test-data.zip - name: Build node and cli binaries - run: cargo build --release --features local --bin safenode --bin autonomi_cli + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network @@ -67,7 +67,7 @@ jobs: # Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr, # passes to tee which displays it in the terminal and writes to output.txt run: | - cargo criterion --features=local --message-format=json 2>&1 -p autonomi_cli | tee -a output.txt + cargo criterion --features=local --message-format=json 2>&1 -p autonomi | tee -a output.txt cat output.txt | rg benchmark-complete | jq -s 'map({ name: (.id | split("/"))[-1], unit: "MiB/s", @@ -100,7 +100,7 @@ jobs: - name: Start a client instance to compare memory usage shell: bash - run: cargo run --bin autonomi_cli --release -- --log-output-dest=data-dir file upload the-test-data.zip + run: cargo run --bin autonomi --release -- --log-output-dest=data-dir file upload the-test-data.zip env: SN_LOG: "all" diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 2a90a5e7ae..98ee999b06 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -160,7 +160,7 @@ jobs: - uses: Swatinem/rust-cache@v2 - name: Build binaries - run: cargo build --release --features local --bin safenode --bin autonomi_cli + run: cargo build --release --features local --bin safenode --bin autonomi timeout-minutes: 30 - name: Start a local network @@ -207,13 +207,13 @@ jobs: shell: pwsh - name: Get file cost - run: ./target/release/autonomi_cli --log-output-dest=data-dir file cost "./resources" + run: ./target/release/autonomi --log-output-dest=data-dir file cost "./resources" env: SN_LOG: "v" timeout-minutes: 15 - name: File upload - run: ./target/release/autonomi_cli --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 + run: ./target/release/autonomi --log-output-dest=data-dir file upload "./resources" > ./upload_output 2>&1 env: SN_LOG: "v" timeout-minutes: 15 @@ -233,16 +233,16 @@ jobs: shell: pwsh - name: File Download - run: ./target/release/autonomi_cli --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources + run: ./target/release/autonomi --log-output-dest=data-dir file download ${{ env.UPLOAD_ADDRESS }} ./downloaded_resources env: SN_LOG: "v" timeout-minutes: 5 - name: Generate register signing key - run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key - name: Create register (writeable by owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 + run: ./target/release/autonomi --log-output-dest=data-dir register create baobao 123 > ./register_create_output 2>&1 env: SN_LOG: "v" timeout-minutes: 10 @@ -262,25 +262,25 @@ jobs: shell: pwsh - name: Get register - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 5 - name: Edit register - run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.REGISTER_ADDRESS }} 456 env: SN_LOG: "v" timeout-minutes: 10 - name: Get register (after edit) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 5 - name: Create Public Register (writeable by anyone) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 + run: ./target/release/autonomi --log-output-dest=data-dir register create bao 111 --public > ./register_public_create_output 2>&1 env: SN_LOG: "v" timeout-minutes: 5 @@ -300,13 +300,13 @@ jobs: shell: pwsh - name: Get Public Register (current key is the owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 5 - name: Edit Public Register (current key is the owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 222 env: SN_LOG: "v" timeout-minutes: 10 @@ -316,22 +316,22 @@ jobs: run: rm -rf ${{ matrix.safe_path }}/client - name: Generate new register signing key - run: ./target/release/autonomi_cli --log-output-dest=data-dir register generate-key + run: ./target/release/autonomi --log-output-dest=data-dir register generate-key - name: Get Public Register (new signing key is not the owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 2 - name: Edit Public Register (new signing key is not the owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 + run: ./target/release/autonomi --log-output-dest=data-dir register edit ${{ env.PUBLIC_REGISTER_ADDRESS }} 333 env: SN_LOG: "v" timeout-minutes: 10 - name: Get Public Register (new signing key is not the owner) - run: ./target/release/autonomi_cli --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} + run: ./target/release/autonomi --log-output-dest=data-dir register get ${{ env.PUBLIC_REGISTER_ADDRESS }} env: SN_LOG: "v" timeout-minutes: 2 diff --git a/Cargo.lock b/Cargo.lock index 8191c7ec60..5d397e2a98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1112,7 +1112,7 @@ dependencies = [ ] [[package]] -name = "autonomi_cli" +name = "autonomi-cli" version = "0.1.0" dependencies = [ "autonomi", diff --git a/Cargo.toml b/Cargo.toml index c34946d706..779485a2c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,15 +2,11 @@ resolver = "2" members = [ "autonomi", - "autonomi_cli", + "autonomi-cli", "evmlib", "evm_testnet", - # "sn_auditor", "sn_build_info", "sn_evm", - # "sn_cli", - # "sn_client", - # "sn_faucet", "sn_logging", "sn_metrics", "nat-detection", diff --git a/README.md b/README.md index 60f5cd84b3..31d6c73e43 100644 --- a/README.md +++ b/README.md @@ -158,7 +158,7 @@ To upload a file or a directory, you need to set the `SECRET_KEY` environment va > When running a local network, you can use the `SECRET_KEY` printed by the `evm_testnet` command [step 2](#2-run-a-local-evm-node) as it has all the money. ```bash -SECRET_KEY= cargo run --bin autonomi_cli --features local -- file upload +SECRET_KEY= cargo run --bin autonomi --features local -- file upload ``` The output will print out the address at which the content was uploaded. @@ -166,7 +166,7 @@ The output will print out the address at which the content was uploaded. Now to download the files again: ```bash -cargo run --bin autonomi_cli --features local -- file download +cargo run --bin autonomi --features local -- file download ``` ### Registers diff --git a/autonomi_cli/Cargo.toml b/autonomi-cli/Cargo.toml similarity index 94% rename from autonomi_cli/Cargo.toml rename to autonomi-cli/Cargo.toml index 55a7caad32..0eea336fdf 100644 --- a/autonomi_cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -1,8 +1,12 @@ [package] -name = "autonomi_cli" +name = "autonomi-cli" version = "0.1.0" edition = "2021" +[[bin]] +name = "autonomi" +path = "src/main.rs" + [features] default = ["metrics"] local = ["sn_peers_acquisition/local", "autonomi/local"] diff --git a/autonomi_cli/README.md b/autonomi-cli/README.md similarity index 100% rename from autonomi_cli/README.md rename to autonomi-cli/README.md diff --git a/autonomi_cli/benches/files.rs b/autonomi-cli/benches/files.rs similarity index 100% rename from autonomi_cli/benches/files.rs rename to autonomi-cli/benches/files.rs diff --git a/autonomi_cli/src/access/data_dir.rs b/autonomi-cli/src/access/data_dir.rs similarity index 100% rename from autonomi_cli/src/access/data_dir.rs rename to autonomi-cli/src/access/data_dir.rs diff --git a/autonomi_cli/src/access/keys.rs b/autonomi-cli/src/access/keys.rs similarity index 100% rename from autonomi_cli/src/access/keys.rs rename to autonomi-cli/src/access/keys.rs diff --git a/autonomi_cli/src/access/mod.rs b/autonomi-cli/src/access/mod.rs similarity index 100% rename from autonomi_cli/src/access/mod.rs rename to autonomi-cli/src/access/mod.rs diff --git a/autonomi_cli/src/access/network.rs b/autonomi-cli/src/access/network.rs similarity index 100% rename from autonomi_cli/src/access/network.rs rename to autonomi-cli/src/access/network.rs diff --git a/autonomi_cli/src/actions/connect.rs b/autonomi-cli/src/actions/connect.rs similarity index 100% rename from autonomi_cli/src/actions/connect.rs rename to autonomi-cli/src/actions/connect.rs diff --git a/autonomi_cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs similarity index 100% rename from autonomi_cli/src/actions/download.rs rename to autonomi-cli/src/actions/download.rs diff --git a/autonomi_cli/src/actions/mod.rs b/autonomi-cli/src/actions/mod.rs similarity index 100% rename from autonomi_cli/src/actions/mod.rs rename to autonomi-cli/src/actions/mod.rs diff --git a/autonomi_cli/src/actions/progress_bar.rs b/autonomi-cli/src/actions/progress_bar.rs similarity index 100% rename from autonomi_cli/src/actions/progress_bar.rs rename to autonomi-cli/src/actions/progress_bar.rs diff --git a/autonomi_cli/src/commands.rs b/autonomi-cli/src/commands.rs similarity index 100% rename from autonomi_cli/src/commands.rs rename to autonomi-cli/src/commands.rs diff --git a/autonomi_cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs similarity index 100% rename from autonomi_cli/src/commands/file.rs rename to autonomi-cli/src/commands/file.rs diff --git a/autonomi_cli/src/commands/register.rs b/autonomi-cli/src/commands/register.rs similarity index 100% rename from autonomi_cli/src/commands/register.rs rename to autonomi-cli/src/commands/register.rs diff --git a/autonomi_cli/src/commands/vault.rs b/autonomi-cli/src/commands/vault.rs similarity index 100% rename from autonomi_cli/src/commands/vault.rs rename to autonomi-cli/src/commands/vault.rs diff --git a/autonomi_cli/src/main.rs b/autonomi-cli/src/main.rs similarity index 97% rename from autonomi_cli/src/main.rs rename to autonomi-cli/src/main.rs index 2c15b82a66..2cf4e9cd61 100644 --- a/autonomi_cli/src/main.rs +++ b/autonomi-cli/src/main.rs @@ -51,7 +51,7 @@ fn init_logging_and_metrics(opt: &Opt) -> Result<(ReloadHandle, Option Date: Tue, 15 Oct 2024 14:45:58 +0900 Subject: [PATCH 191/255] feat(node): kill nodes running on host with little spare CPU uses node control flow to exit a node if we consistently detect high CPU --- Cargo.lock | 1 + sn_node/Cargo.toml | 1 + sn_node/src/bin/safenode/main.rs | 53 ++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index f5267b6c79..8ea74c2bc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8256,6 +8256,7 @@ dependencies = [ "sn_service_management", "sn_transfers", "strum", + "sysinfo", "tempfile", "test_utils", "thiserror", diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 46a90789d6..2df87dcb0e 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -60,6 +60,7 @@ sn_registers = { path = "../sn_registers", version = "0.3.21" } sn_transfers = { path = "../sn_transfers", version = "0.19.3" } sn_service_management = { path = "../sn_service_management", version = "0.3.14" } sn_evm = { path = "../sn_evm", version = "0.1" } +sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 802c6696a8..487eec9a69 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -34,6 +34,7 @@ use std::{ process::Command, time::Duration, }; +use sysinfo::{self, System}; use tokio::{ runtime::Runtime, sync::{broadcast::error::RecvError, mpsc}, @@ -387,6 +388,58 @@ You can check your reward balance by running: error!("Failed to send node control msg to safenode bin main thread: {err}"); } }); + let ctrl_tx_clone_cpu = ctrl_tx.clone(); + // Monitor host CPU usage + tokio::spawn(async move { + use rand::{thread_rng, Rng}; + + const CPU_CHECK_INTERVAL: Duration = Duration::from_secs(60); + const CPU_USAGE_THRESHOLD: f32 = 50.0; + const HIGH_CPU_CONSECUTIVE_LIMIT: u8 = 5; + const NODE_STOP_DELAY: Duration = Duration::from_secs(1); + const INITIAL_DELAY_MIN_S: u64 = 10; + const INITIAL_DELAY_MAX_S: u64 = + HIGH_CPU_CONSECUTIVE_LIMIT as u64 * CPU_CHECK_INTERVAL.as_secs(); + const JITTER_MIN_S: u64 = 1; + const JITTER_MAX_S: u64 = 15; + + let mut sys = System::new_all(); + + let mut high_cpu_count: u8 = 0; + + // Random initial delay between 1 and 5 minutes + let initial_delay = + Duration::from_secs(thread_rng().gen_range(INITIAL_DELAY_MIN_S..=INITIAL_DELAY_MAX_S)); + tokio::time::sleep(initial_delay).await; + + loop { + sys.refresh_cpu(); + let cpu_usage = sys.global_cpu_info().cpu_usage(); + + if cpu_usage > CPU_USAGE_THRESHOLD { + high_cpu_count += 1; + } else { + high_cpu_count = 0; + } + + if high_cpu_count >= HIGH_CPU_CONSECUTIVE_LIMIT { + if let Err(err) = ctrl_tx_clone_cpu + .send(NodeCtrl::Stop { + delay: NODE_STOP_DELAY, + cause: eyre!("Excess host CPU detected for {HIGH_CPU_CONSECUTIVE_LIMIT} consecutive minutes!"), + }) + .await + { + error!("Failed to send node control msg to safenode bin main thread: {err}"); + } + break; + } + + // Add jitter to the interval + let jitter = Duration::from_secs(thread_rng().gen_range(JITTER_MIN_S..=JITTER_MAX_S)); + tokio::time::sleep(CPU_CHECK_INTERVAL + jitter).await; + } + }); // Start up gRPC interface if enabled by user if let Some(addr) = rpc { From 6a0e16e29d383936222ca074ec2553b991c12264 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 15 Jul 2024 12:13:02 +0900 Subject: [PATCH 192/255] feat(networking): initial range based gets implementation --- sn_networking/src/cmd.rs | 37 +++++++----- sn_networking/src/driver.rs | 101 ++++++++++++++++++++++++++++++++- sn_networking/src/event/mod.rs | 2 + 3 files changed, 125 insertions(+), 15 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index b0eda19190..bdeae6bdb8 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -12,12 +12,11 @@ use crate::{ event::TerminateNodeReason, log_markers::Marker, multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, - REPLICATION_PEERS_COUNT, }; use libp2p::{ kad::{ store::{Error as StoreError, RecordStore}, - Quorum, Record, RecordKey, + KBucketDistance, Quorum, Record, RecordKey, }, Multiaddr, PeerId, }; @@ -547,7 +546,9 @@ impl SwarmDriver { match cmd { LocalSwarmCmd::TriggerIntervalReplication => { cmd_string = "TriggerIntervalReplication"; - self.try_interval_replication()?; + + let our_acceptable_range = self.get_peers_within_get_range(); + self.try_interval_replication(our_acceptable_range)?; } LocalSwarmCmd::GetLocalStoreCost { key, sender } => { cmd_string = "GetLocalStoreCost"; @@ -981,22 +982,32 @@ impl SwarmDriver { let _ = self.quotes_history.insert(peer_id, quote); } - fn try_interval_replication(&mut self) -> Result<()> { + fn try_interval_replication( + &mut self, + acceptable_distance_range: Option, + ) -> Result<()> { // get closest peers from buckets, sorted by increasing distance to us let our_peer_id = self.self_peer_id.into(); - let closest_k_peers = self + + let our_address = NetworkAddress::from_peer(self.self_peer_id); + let our_key = our_address.as_kbucket_key(); + + let mut replicate_targets = self .swarm .behaviour_mut() .kademlia .get_closest_local_peers(&our_peer_id) - // Map KBucketKey to PeerId. - .map(|key| key.into_preimage()); - - // Only grab the closest nodes within the REPLICATE_RANGE - let mut replicate_targets = closest_k_peers - .into_iter() - // add some leeway to allow for divergent knowledge - .take(REPLICATION_PEERS_COUNT) + .filter_map(|key| { + // Map KBucketKey to PeerId. + if let Some(distance) = acceptable_distance_range { + if distance < our_key.distance(&key) { + return None; + } + } + + let peer_id = key.into_preimage(); + Some(peer_id) + }) .collect::>(); let now = Instant::now(); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index d8d71c5601..f92f2a5333 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -32,7 +32,6 @@ use futures::future::Either; use futures::StreamExt; #[cfg(feature = "local")] use libp2p::mdns; -use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; use libp2p::{ identity::Keypair, @@ -45,6 +44,7 @@ use libp2p::{ }, Multiaddr, PeerId, }; +use libp2p::{kad::KBucketDistance, Transport as _}; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::info::Info; use sn_evm::PaymentQuote; @@ -59,7 +59,7 @@ use sn_protocol::{ }; use sn_registers::SignedRegister; use std::{ - collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, fmt::Debug, net::SocketAddr, num::NonZeroUsize, @@ -77,6 +77,9 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); +// Number of range distances to keep in the circular buffer +pub const X_RANGE_STORAGE_LIMIT: usize = 100; + const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); /// The ways in which the Get Closest queries are used. @@ -698,6 +701,7 @@ impl NetworkBuilder { bad_nodes: Default::default(), quotes_history: Default::default(), replication_targets: Default::default(), + range_distances: VecDeque::with_capacity(X_RANGE_STORAGE_LIMIT), }; let network = Network::new( @@ -756,6 +760,11 @@ pub struct SwarmDriver { pub(crate) bad_nodes: BadNodes, pub(crate) quotes_history: BTreeMap, pub(crate) replication_targets: BTreeMap, + + // The recent range_distances calculated by the node + // Each update is generated when there is a routing table change + // We use the largest of these X_STORAGE_LIMIT values as our X distance. + pub(crate) range_distances: VecDeque, } impl SwarmDriver { @@ -839,6 +848,94 @@ impl SwarmDriver { // ---------- Crate helpers ------------------- // -------------------------------------------- + /// Defines a new X distance range to be used for GETs and data replication + pub(crate) fn add_distance_range_for_gets(&mut self) { + // TODO: define how/where this distance comes from + + const TARGET_PEER: usize = 42; + + let our_address = NetworkAddress::from_peer(self.self_peer_id); + let our_key = our_address.as_kbucket_key(); + let mut sorted_peers_iter = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&our_key); + + let mut last_peers_distance = KBucketDistance::default(); + let mut prior_peer = sorted_peers_iter.next(); + + // get 42nd or farthest + for (i, peer) in sorted_peers_iter.enumerate() { + if let Some(prior_peer) = prior_peer { + let this_last_peers_distance = prior_peer.distance(&peer); + + // only override it if it's larger! + // + // how does this play with keeping 100? + // We only update with peers changes... Perhaps this negates the need for a buffer? + // + // + // if this_last_peers_distance > last_peers_distance { + last_peers_distance = this_last_peers_distance; + // } + } + + // info!("Peeeeeer {i}: {peer:?} - distance: {last_peers_distance:?}"); + prior_peer = Some(peer); + + if i == TARGET_PEER { + break; + } + } + + // last_peers_distance = last_peers_distance * DISTANCE_MULTIPLIER; + + if last_peers_distance == KBucketDistance::default() { + warn!("No peers found, no range distance can be set/added"); + return; + } + + if self.range_distances.len() == X_RANGE_STORAGE_LIMIT { + self.range_distances.pop_front(); + } + + info!("Adding new distance range: {last_peers_distance:?}"); + + self.range_distances.push_back(last_peers_distance); + } + + pub(crate) fn get_peers_within_get_range(&mut self) -> Option { + // TODO: Is this the correct keytype for comparisons? + let our_address = NetworkAddress::from_peer(self.self_peer_id); + let our_key = our_address.as_kbucket_key(); + + let sorted_peers_iter = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&our_key); + + let farthest_get_range_record_distance = self.range_distances.iter().max(); + + if let Some(farthest_range) = farthest_get_range_record_distance { + // lets print how many are within range + for (i, peer) in sorted_peers_iter.enumerate() { + let peer_distance_from_us = peer.distance(&our_key); + + if &peer_distance_from_us < farthest_range { + info!("Peer {peer:?} is {peer_distance_from_us:?} and would be within the range based search group!"); + info!("That's {i:?} peers within the range!"); + } + } + } else { + warn!("No range distance has been set, no peers can be found within the range"); + return None; + } + + farthest_get_range_record_distance.copied() + } + /// Uses the closest k peers to estimate the farthest address as /// `K_VALUE / 2`th peer's bucket. fn get_responsbile_range_estimate( diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 4fa0f51b86..2396babf64 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -254,6 +254,8 @@ impl SwarmDriver { self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); + self.add_distance_range_for_gets(); + self.get_peers_within_get_range(); #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); From 2499b46cb380f28bd109d643c4d2f828a7aeb022 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 15 Jul 2024 14:14:42 +0900 Subject: [PATCH 193/255] chore(networking): rename sort_peers fns to clarify the accepted limit arg --- sn_networking/src/event/request_response.rs | 6 +++--- sn_networking/src/lib.rs | 8 ++++---- sn_node/src/replication.rs | 6 ++++-- sn_node/tests/verify_data_location.rs | 15 ++++++++------- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 4550772bf4..75afbfdfb5 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address, MsgResponder, NetworkError, - NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address_and_limit, MsgResponder, + NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; use libp2p::request_response::{self, Message}; @@ -298,7 +298,7 @@ impl SwarmDriver { .values() .filter_map(|(addr, record_type)| { if RecordType::Chunk == *record_type { - match sort_peers_by_address(&closest_peers, addr, CLOSE_GROUP_SIZE) { + match sort_peers_by_address_and_limit(&closest_peers, addr, CLOSE_GROUP_SIZE) { Ok(close_group) => { if close_group.contains(&&target_peer) { Some(addr.clone()) diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 27f07bdb3e..eeb76cf04d 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -97,17 +97,17 @@ const MIN_WAIT_BEFORE_READING_A_PUT: Duration = Duration::from_millis(300); /// Sort the provided peers by their distance to the given `NetworkAddress`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_address<'a>( +pub fn sort_peers_by_address_and_limit<'a>( peers: &'a Vec, address: &NetworkAddress, expected_entries: usize, ) -> Result> { - sort_peers_by_key(peers, &address.as_kbucket_key(), expected_entries) + sort_peers_by_key_and_limit(peers, &address.as_kbucket_key(), expected_entries) } /// Sort the provided peers by their distance to the given `KBucketKey`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_key<'a, T>( +pub fn sort_peers_by_key_and_limit<'a, T>( peers: &'a Vec, key: &KBucketKey, expected_entries: usize, @@ -903,7 +903,7 @@ impl Network { debug!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); } - let closest_peers = sort_peers_by_address(&closest_peers, key, CLOSE_GROUP_SIZE)?; + let closest_peers = sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; Ok(closest_peers.into_iter().cloned().collect()) } diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 59e0cff078..37819df38d 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -11,7 +11,9 @@ use libp2p::{ kad::{Quorum, Record, RecordKey}, PeerId, }; -use sn_networking::{sort_peers_by_address, GetRecordCfg, Network, REPLICATION_PEERS_COUNT}; +use sn_networking::{ + sort_peers_by_address_and_limit, GetRecordCfg, Network, REPLICATION_PEERS_COUNT, +}; use sn_protocol::{ messages::{Cmd, Query, QueryResponse, Request, Response}, storage::RecordType, @@ -160,7 +162,7 @@ impl Node { let data_addr = NetworkAddress::from_record_key(&paid_key); - let sorted_based_on_addr = match sort_peers_by_address( + let sorted_based_on_addr = match sort_peers_by_address_and_limit( &closest_k_peers, &data_addr, REPLICATION_PEERS_COUNT, diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 641756fa2c..130254d6e5 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -22,7 +22,7 @@ use libp2p::{ }; use rand::{rngs::OsRng, Rng}; use sn_logging::LogBuilder; -use sn_networking::{sleep, sort_peers_by_key}; +use sn_networking::{sleep, sort_peers_by_key_and_limit}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, @@ -160,8 +160,8 @@ fn print_node_close_groups(all_peers: &[PeerId]) { for (node_index, peer) in all_peers.iter().enumerate() { let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = - sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); + let closest_peers = sort_peers_by_key_and_limit(&all_peers, &key, CLOSE_GROUP_SIZE) + .expect("failed to sort peer"); let closest_peers_idx = closest_peers .iter() .map(|&&peer| { @@ -213,10 +213,11 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); let record_key = KBucketKey::from(key.to_vec()); - let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); + let expected_holders = + sort_peers_by_key_and_limit(all_peers, &record_key, CLOSE_GROUP_SIZE)? + .into_iter() + .cloned() + .collect::>(); let actual_holders = actual_holders_idx .iter() From adae8c5e76233528479911e833f4def026c4ddd9 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 25 Jul 2024 11:57:59 +0900 Subject: [PATCH 194/255] chore(networking): remove REPLICATION_PEER_COUNT --- .github/workflows/merge.yml | 6 +- sn_client/src/api.rs | 29 +- sn_client/src/error.rs | 7 +- sn_client/src/test_utils.rs | 2 +- sn_client/src/wallet.rs | 38 ++- sn_networking/src/bootstrap.rs | 120 +-------- sn_networking/src/cmd.rs | 177 ++++++++----- sn_networking/src/driver.rs | 193 ++++++-------- sn_networking/src/error.rs | 11 +- sn_networking/src/event/kad.rs | 278 ++++++++++++-------- sn_networking/src/event/mod.rs | 2 - sn_networking/src/event/request_response.rs | 156 ++++++----- sn_networking/src/event/swarm.rs | 50 ++-- sn_networking/src/lib.rs | 236 +++++++++++++++-- sn_networking/src/network_discovery.rs | 37 ++- sn_networking/src/record_store.rs | 23 +- sn_networking/src/record_store_api.rs | 14 +- sn_networking/src/replication_fetcher.rs | 56 +++- sn_networking/src/transfers.rs | 38 +-- sn_node/src/put_validation.rs | 13 +- sn_node/src/replication.rs | 124 +++------ sn_node/tests/double_spend.rs | 196 ++++++++------ sn_node/tests/verify_data_location.rs | 24 +- sn_transfers/src/wallet/error.rs | 10 + 24 files changed, 1048 insertions(+), 792 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 98ee999b06..d37b04a679 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -743,6 +743,10 @@ jobs: echo "EVM_NETWORK has been set to $EVM_NETWORK" fi + - name: Wait from network to stabilise + shell: bash + run: sleep 30 + - name: Verify the routing tables of the nodes run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: @@ -768,7 +772,7 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: stop - log_file_prefix: safe_test_logs_data_location + log_file_prefix: safe_test_logs_data_location_routing_table platform: ${{ matrix.os }} - name: Verify restart of nodes using rg diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 54bf53f8a2..0f64b2ee06 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -14,7 +14,7 @@ use super::{ use bls::{PublicKey, SecretKey, Signature}; use libp2p::{ identity::Keypair, - kad::{Quorum, Record}, + kad::{KBucketDistance, Quorum, Record}, Multiaddr, PeerId, }; use rand::{thread_rng, Rng}; @@ -305,6 +305,11 @@ impl Client { self.events_broadcaster.subscribe() } + /// Return the underlying network GetRange + pub async fn get_range(&self) -> Result { + self.network.get_range().await.map_err(Error::from) + } + /// Sign the given data. /// /// # Arguments @@ -823,18 +828,26 @@ impl Client { // When there is retry on Put side, no need to have a retry on Get let verification_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, + get_quorum: Quorum::All, retry_strategy: None, target_record: record_to_verify, expected_holders, is_register: false, }; + + let verification = if verify_store { + Some((VerificationKind::Network, verification_cfg)) + } else { + None + }; + let put_cfg = PutRecordCfg { - put_quorum: Quorum::Majority, + put_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Persistent), use_put_record_to: None, - verification: Some((VerificationKind::Network, verification_cfg)), + verification, }; + Ok(self.network.put_record(record, &put_cfg).await?) } @@ -871,7 +884,7 @@ impl Client { self.try_fetch_spend_from_network( address, GetRecordCfg { - get_quorum: Quorum::Majority, + get_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Balanced), target_record: None, expected_holders: Default::default(), @@ -904,7 +917,7 @@ impl Client { self.try_fetch_spend_from_network( address, GetRecordCfg { - get_quorum: Quorum::Majority, + get_quorum: Quorum::All, retry_strategy: None, target_record: None, expected_holders: Default::default(), @@ -961,9 +974,7 @@ impl Client { } Err(err) => { warn!("Invalid signed spend got from network for {address:?}: {err:?}."); - Err(Error::CouldNotVerifyTransfer(format!( - "Verification failed for spent at {address:?} with error {err:?}" - ))) + Err(Error::from(err)) } } } diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs index d5af8bb22f..618632303c 100644 --- a/sn_client/src/error.rs +++ b/sn_client/src/error.rs @@ -13,6 +13,7 @@ use crate::UploadSummary; use super::ClientEvent; use sn_protocol::NetworkAddress; use sn_registers::{Entry, EntryHash}; +use sn_transfers::SpendAddress; use std::collections::BTreeSet; use thiserror::Error; use tokio::time::Duration; @@ -45,6 +46,9 @@ pub enum Error { #[error("Chunks error {0}.")] Chunks(#[from] super::chunks::Error), + #[error("No cashnote found at {0:?}.")] + NoCashNoteFound(SpendAddress), + #[error("Decrypting a Folder's item failed: {0}")] FolderEntryDecryption(EntryHash), @@ -63,9 +67,6 @@ pub enum Error { #[error(transparent)] JoinError(#[from] tokio::task::JoinError), - /// A general error when verifying a transfer validity in the network. - #[error("Failed to verify transfer validity in the network {0}")] - CouldNotVerifyTransfer(String), #[error("Invalid DAG")] InvalidDag, #[error("Serialization error: {0:?}")] diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs index 5560b1e0b8..5e0485e543 100644 --- a/sn_client/src/test_utils.rs +++ b/sn_client/src/test_utils.rs @@ -29,7 +29,7 @@ use tracing::{info, warn}; pub const AMOUNT_TO_FUND_WALLETS: u64 = 100 * 1_000_000_000; // The number of times to try to load the faucet wallet -const LOAD_FAUCET_WALLET_RETRIES: usize = 6; +const LOAD_FAUCET_WALLET_RETRIES: usize = 10; // mutex to restrict access to faucet wallet from concurrent tests static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs index 93c6439b3a..c2078b824e 100644 --- a/sn_client/src/wallet.rs +++ b/sn_client/src/wallet.rs @@ -1009,9 +1009,7 @@ impl Client { } if cash_notes.is_empty() { - return Err(WalletError::CouldNotVerifyTransfer( - "All the redeemed CashNotes are already spent".to_string(), - )); + return Err(WalletError::AllRedeemedCashnotesSpent); } Ok(cash_notes) @@ -1049,14 +1047,22 @@ impl Client { /// # } /// ``` pub async fn verify_cashnote(&self, cash_note: &CashNote) -> WalletResult<()> { + let address = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); + // We need to get all the spends in the cash_note from the network, // and compare them to the spends in the cash_note, to know if the // transfer is considered valid in the network. let mut tasks = Vec::new(); + + info!( + "parent spends for cn; {address:?}: {:?}", + &cash_note.parent_spends.len() + ); + for spend in &cash_note.parent_spends { let address = SpendAddress::from_unique_pubkey(spend.unique_pubkey()); - debug!( - "Getting spend for pubkey {:?} from network at {address:?}", + warn!( + "Getting parent spend for cn {address:?} pubkey {:?} from network at {address:?}", spend.unique_pubkey() ); tasks.push(self.get_spend_from_network(address)); @@ -1064,8 +1070,17 @@ impl Client { let mut received_spends = std::collections::BTreeSet::new(); for result in join_all(tasks).await { - let network_valid_spend = - result.map_err(|err| WalletError::CouldNotVerifyTransfer(err.to_string()))?; + let network_valid_spend = match result { + Ok(spend) => Ok(spend), + Err(error) => match error { + Error::Network(sn_networking::NetworkError::DoubleSpendAttempt(spends)) => { + warn!("DoubleSpentAttempt found with {spends:?}"); + Err(WalletError::BurntSpend) + } + err => Err(WalletError::CouldNotVerifyTransfer(format!("{err:?}"))), + }, + }?; + let _ = received_spends.insert(network_valid_spend); } @@ -1074,9 +1089,12 @@ impl Client { if received_spends == cash_note.parent_spends { return Ok(()); } - Err(WalletError::CouldNotVerifyTransfer( - "The spends in network were not the same as the ones in the CashNote. The parents of this CashNote are probably double spends.".into(), - )) + + warn!( + "Unexpected parent spends found in CashNote verification at {:?}: {received_spends:?}.", + address + ); + Err(WalletError::UnexpectedParentSpends(address)) } } diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index f8b7cf1e59..5c28c9a4d2 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -7,45 +7,19 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{driver::PendingGetClosestType, SwarmDriver}; -use rand::{rngs::OsRng, Rng}; use tokio::time::Duration; -use crate::target_arch::{interval, Instant, Interval}; +use crate::target_arch::Instant; /// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the /// routing table. -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(10); - -/// Every BOOTSTRAP_CONNECTED_PEERS_STEP connected peer, we step up the BOOTSTRAP_INTERVAL to slow down bootstrapping -/// process -const BOOTSTRAP_CONNECTED_PEERS_STEP: u32 = 5; - -/// If the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT, then we should slowdown the bootstrapping -/// process. This is to make sure we don't flood the network with `FindNode` msgs. -const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); - -/// A minimum interval to prevent bootstrap got triggered too often -const LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(30); - -/// The bootstrap interval to use if we haven't added any new peers in a while. -const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(30); impl SwarmDriver { /// This functions triggers network discovery based on when the last peer was added to the RT and the number of - /// peers in RT. The function also returns a new bootstrap interval that is proportional to the number of - /// peers in RT, so more peers in RT, the longer the interval. - pub(crate) async fn run_bootstrap_continuously( - &mut self, - current_bootstrap_interval: Duration, - ) -> Option { - let (should_bootstrap, new_interval) = self - .bootstrap - .should_we_bootstrap(self.peers_in_rt as u32, current_bootstrap_interval) - .await; - if should_bootstrap { - self.trigger_network_discovery(); - } - new_interval + /// peers in RT. + pub(crate) fn run_bootstrap_continuously(&mut self) { + self.trigger_network_discovery(); } pub(crate) fn trigger_network_discovery(&mut self) { @@ -61,27 +35,27 @@ impl SwarmDriver { .get_closest_peers(addr.as_bytes()); let _ = self.pending_get_closest_peers.insert( query_id, - (PendingGetClosestType::NetworkDiscovery, Default::default()), + ( + addr, + PendingGetClosestType::NetworkDiscovery, + Default::default(), + ), ); } self.bootstrap.initiated(); - debug!("Trigger network discovery took {:?}", now.elapsed()); + info!("Trigger network discovery took {:?}", now.elapsed()); } } /// Tracks and helps with the continuous kad::bootstrapping process pub(crate) struct ContinuousBootstrap { - initial_bootstrap_done: bool, - last_peer_added_instant: Instant, last_bootstrap_triggered: Option, } impl ContinuousBootstrap { pub(crate) fn new() -> Self { Self { - initial_bootstrap_done: false, - last_peer_added_instant: Instant::now(), last_bootstrap_triggered: None, } } @@ -90,76 +64,4 @@ impl ContinuousBootstrap { pub(crate) fn initiated(&mut self) { self.last_bootstrap_triggered = Some(Instant::now()); } - - /// Notify about a newly added peer to the RT. This will help with slowing down the bootstrap process. - /// Returns `true` if we have to perform the initial bootstrapping. - pub(crate) fn notify_new_peer(&mut self) -> bool { - self.last_peer_added_instant = Instant::now(); - // true to kick off the initial bootstrapping. `run_bootstrap_continuously` might kick of so soon that we might - // not have a single peer in the RT and we'd not perform any bootstrapping for a while. - if !self.initial_bootstrap_done { - self.initial_bootstrap_done = true; - true - } else { - false - } - } - - /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. - /// Also optionally returns the new interval to re-bootstrap. - pub(crate) async fn should_we_bootstrap( - &self, - peers_in_rt: u32, - current_interval: Duration, - ) -> (bool, Option) { - let is_ongoing = if let Some(last_bootstrap_triggered) = self.last_bootstrap_triggered { - last_bootstrap_triggered.elapsed() < LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT - } else { - false - }; - let should_bootstrap = !is_ongoing && peers_in_rt >= 1; - - // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer to our RT, then, slowdown - // the bootstrapping process. - // Don't slow down if we haven't even added one peer to our RT. - if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { - // To avoid a heart beat like cpu usage due to the 1K candidates generation, - // randomize the interval within certain range - let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( - NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, - ); - let no_peer_added_slowdown_interval_duration = - Duration::from_secs(no_peer_added_slowdown_interval); - info!( - "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" - ); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut new_interval = interval(no_peer_added_slowdown_interval_duration); - #[cfg(not(target_arch = "wasm32"))] - new_interval.tick().await; - - return (should_bootstrap, Some(new_interval)); - } - - // increment bootstrap_interval in steps of BOOTSTRAP_INTERVAL every BOOTSTRAP_CONNECTED_PEERS_STEP - let step = peers_in_rt / BOOTSTRAP_CONNECTED_PEERS_STEP; - let step = std::cmp::max(1, step); - let new_interval = BOOTSTRAP_INTERVAL * step; - let new_interval = if new_interval > current_interval { - info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); - - // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. - #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] - let mut interval = interval(new_interval); - #[cfg(not(target_arch = "wasm32"))] - interval.tick().await; - - Some(interval) - } else { - None - }; - (should_bootstrap, new_interval) - } } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index bdeae6bdb8..8ab88121ea 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -11,7 +11,8 @@ use crate::{ error::{NetworkError, Result}, event::TerminateNodeReason, log_markers::Marker, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, + multiaddr_pop_p2p, sort_peers_by_address_and_limit, GetRecordCfg, GetRecordError, MsgResponder, + NetworkEvent, CLOSE_GROUP_SIZE, }; use libp2p::{ kad::{ @@ -55,6 +56,15 @@ pub enum NodeIssue { /// Commands to send to the Swarm pub enum LocalSwarmCmd { + // Returns all the peers from all the k-buckets from the local Routing Table. + // This includes our PeerId as well. + GetAllLocalPeersExcludingSelf { + sender: oneshot::Sender>, + }, + /// Return the current GetRange as determined by the SwarmDriver + GetCurrentRange { + sender: oneshot::Sender, + }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -66,8 +76,8 @@ pub enum LocalSwarmCmd { sender: oneshot::Sender>, }, // Get closest peers from the local RoutingTable - GetCloseGroupLocalPeers { - key: NetworkAddress, + GetCloseRangeLocalPeers { + address: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), @@ -212,15 +222,11 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } - LocalSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { - write!( - f, - "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" - ) + LocalSwarmCmd::GetCloseRangeLocalPeers { address: key, .. } => { + write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") } LocalSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "LocalSwarmCmd::GetLocalStoreCost") @@ -241,6 +247,12 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "LocalSwarmCmd::GetKBuckets") } + LocalSwarmCmd::GetCurrentRange { .. } => { + write!(f, "SwarmCmd::GetCurrentRange") + } + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { .. } => { + write!(f, "SwarmCmd::GetAllLocalPeers") + } LocalSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "LocalSwarmCmd::GetSwarmLocalState") } @@ -471,6 +483,7 @@ impl SwarmDriver { let _ = self.pending_get_closest_peers.insert( query_id, ( + key, PendingGetClosestType::FunctionCall(sender), Default::default(), ), @@ -540,15 +553,41 @@ impl SwarmDriver { Ok(()) } + + /// Return the RecordType + pub(crate) fn get_type_from_record(record: &Record) -> Result { + let key = record.key.clone(); + let record_key = PrettyPrintRecordKey::from(&key); + + match RecordHeader::from_record(record) { + Ok(record_header) => match record_header.kind { + RecordKind::Chunk => Ok(RecordType::Chunk), + RecordKind::Scratchpad => Ok(RecordType::Scratchpad), + RecordKind::Spend | RecordKind::Register => { + let content_hash = XorName::from_content(&record.value); + Ok(RecordType::NonChunk(content_hash)) + } + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { + error!("Record {record_key:?} with payment shall not be stored locally."); + Err(NetworkError::InCorrectRecordHeader) + } + }, + Err(err) => { + error!("For record {record_key:?}, failed to parse record_header {err:?}"); + Err(NetworkError::InCorrectRecordHeader) + } + } + } + pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; match cmd { LocalSwarmCmd::TriggerIntervalReplication => { cmd_string = "TriggerIntervalReplication"; - - let our_acceptable_range = self.get_peers_within_get_range(); - self.try_interval_replication(our_acceptable_range)?; + self.try_interval_replication()?; } LocalSwarmCmd::GetLocalStoreCost { key, sender } => { cmd_string = "GetLocalStoreCost"; @@ -625,28 +664,7 @@ impl SwarmDriver { let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); - let record_type = match RecordHeader::from_record(&record) { - Ok(record_header) => { - match record_header.kind { - RecordKind::Chunk => RecordType::Chunk, - RecordKind::Scratchpad => RecordType::Scratchpad, - RecordKind::Spend | RecordKind::Register => { - let content_hash = XorName::from_content(&record.value); - RecordType::NonChunk(content_hash) - } - RecordKind::ChunkWithPayment - | RecordKind::RegisterWithPayment - | RecordKind::ScratchpadWithPayment => { - error!("Record {record_key:?} with payment shall not be stored locally."); - return Err(NetworkError::InCorrectRecordHeader); - } - } - } - Err(err) => { - error!("For record {record_key:?}, failed to parse record_header {err:?}"); - return Err(NetworkError::InCorrectRecordHeader); - } - }; + let record_type = Self::get_type_from_record(&record)?; let result = self .swarm @@ -695,16 +713,8 @@ impl SwarmDriver { // The record_store will prune far records and setup a `distance range`, // once reached the `max_records` cap. - if let Some(distance) = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .get_farthest_replication_distance_bucket() - { - self.replication_fetcher - .set_replication_distance_range(distance); - } + self.replication_fetcher + .set_replication_distance_range(self.get_request_range()); if let Err(err) = result { error!("Can't store verified record {record_key:?} locally: {err:?}"); @@ -761,6 +771,10 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } + LocalSwarmCmd::GetCurrentRange { sender } => { + cmd_string = "GetCurrentRange"; + let _ = sender.send(self.get_request_range()); + } LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); @@ -779,9 +793,13 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { - cmd_string = "GetCloseGroupLocalPeers"; - let key = key.as_kbucket_key(); + LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender } => { + cmd_string = "GetAllLocalPeersExcludingSelf"; + let _ = sender.send(self.get_all_local_peers_excluding_self()); + } + LocalSwarmCmd::GetCloseRangeLocalPeers { address, sender } => { + cmd_string = "GetCloseRangeLocalPeers"; + let key = address.as_kbucket_key(); // calls `kbuckets.closest_keys(key)` internally, which orders the peers by // increasing distance // Note it will return all peers, heance a chop down is required. @@ -791,7 +809,6 @@ impl SwarmDriver { .kademlia .get_closest_local_peers(&key) .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) .collect(); let _ = sender.send(closest_peers); @@ -982,34 +999,68 @@ impl SwarmDriver { let _ = self.quotes_history.insert(peer_id, quote); } - fn try_interval_replication( + /// From all local peers, returns any within (and just exceeding) current get_range for a given key + pub(crate) fn get_filtered_peers_exceeding_range( &mut self, - acceptable_distance_range: Option, - ) -> Result<()> { - // get closest peers from buckets, sorted by increasing distance to us - let our_peer_id = self.self_peer_id.into(); - - let our_address = NetworkAddress::from_peer(self.self_peer_id); - let our_key = our_address.as_kbucket_key(); + target_address: &NetworkAddress, + ) -> Vec { + let acceptable_distance_range = self.get_request_range(); + let target_key = target_address.as_kbucket_key(); - let mut replicate_targets = self + let peers = self .swarm .behaviour_mut() .kademlia - .get_closest_local_peers(&our_peer_id) + .get_closest_local_peers(&target_key) .filter_map(|key| { - // Map KBucketKey to PeerId. - if let Some(distance) = acceptable_distance_range { - if distance < our_key.distance(&key) { - return None; - } + // here we compare _bucket_, not the exact distance. + // We want to include peers that are just outside the range + // Such that we can and will exceed the range in a search eventually + if acceptable_distance_range.ilog2() < target_key.distance(&key).ilog2() { + return None; } + // Map KBucketKey to PeerId. let peer_id = key.into_preimage(); Some(peer_id) }) .collect::>(); + peers + } + + /// From all local peers, returns any within current get_range for a given key + /// Excludes self + pub(crate) fn get_filtered_peers_exceeding_range_or_close_group( + &mut self, + target_address: &NetworkAddress, + ) -> Vec { + let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); + + if filtered_peers.len() >= CLOSE_GROUP_SIZE { + filtered_peers + } else { + warn!("Insufficient peers within replication range. Falling back to use CLOSE_GROUP closest nodes"); + let all_peers = self.get_all_local_peers_excluding_self(); + match sort_peers_by_address_and_limit(&all_peers, target_address, CLOSE_GROUP_SIZE) { + Ok(peers) => peers.iter().map(|p| **p).collect(), + Err(err) => { + error!("sorting peers close to {target_address:?} failed, sort error: {err:?}"); + warn!( + "Using all peers within range even though it's less than CLOSE_GROUP_SIZE." + ); + filtered_peers + } + } + } + } + + fn try_interval_replication(&mut self) -> Result<()> { + let our_address = NetworkAddress::from_peer(self.self_peer_id); + + let mut replicate_targets = + self.get_filtered_peers_exceeding_range_or_close_group(&our_address); + let now = Instant::now(); self.replication_targets .retain(|_peer_id, timestamp| *timestamp > now); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index f92f2a5333..9bcd1a1ad9 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -20,6 +20,7 @@ use crate::{ record_store_api::UnifiedRecordStore, relay_manager::RelayManager, replication_fetcher::ReplicationFetcher, + sort_peers_by_distance_to, target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; @@ -78,7 +79,7 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); // Number of range distances to keep in the circular buffer -pub const X_RANGE_STORAGE_LIMIT: usize = 100; +pub const GET_RANGE_STORAGE_LIMIT: usize = 100; const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); @@ -90,7 +91,9 @@ pub(crate) enum PendingGetClosestType { /// These are queries made by a function at the upper layers and contains a channel to send the result back. FunctionCall(oneshot::Sender>), } -type PendingGetClosest = HashMap)>; + +/// Maps a query to the address, the type of query and the peers that are being queried. +type PendingGetClosest = HashMap)>; /// Using XorName to differentiate different record content under the same key. type GetRecordResultMap = HashMap)>; @@ -701,7 +704,7 @@ impl NetworkBuilder { bad_nodes: Default::default(), quotes_history: Default::default(), replication_targets: Default::default(), - range_distances: VecDeque::with_capacity(X_RANGE_STORAGE_LIMIT), + range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), }; let network = Network::new( @@ -737,7 +740,7 @@ pub struct SwarmDriver { pub(crate) local_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, - event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. + pub(crate) event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events pub(crate) pending_get_closest_peers: PendingGetClosest, @@ -815,28 +818,25 @@ impl SwarmDriver { // logging for handling events happens inside handle_swarm_events // otherwise we're rewriting match statements etc around this anwyay if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Error while handling swarm event: {err}"); + trace!("Issue while handling swarm event: {err}"); } }, // thereafter we can check our intervals // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { - if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { - bootstrap_interval = new_interval; - } + self.run_bootstrap_continuously(); } _ = set_farthest_record_interval.tick() => { if !self.is_client { - let closest_k_peers = self.get_closest_k_value_local_peers(); - - if let Some(distance) = self.get_responsbile_range_estimate(&closest_k_peers) { - info!("Set responsible range to {distance}"); - // set any new distance to farthest record in the store - self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(distance); - // the distance range within the replication_fetcher shall be in sync as well - self.replication_fetcher.set_replication_distance_range(distance); - } + let get_range = self.get_request_range(); + // set any new distance to farthest record in the store + self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(get_range); + + // the distance range within the replication_fetcher shall be in sync as well + self.replication_fetcher.set_replication_distance_range(get_range); + + } } _ = relay_manager_reservation_interval.tick() => self.relay_manager.try_connecting_to_relay(&mut self.swarm, &self.bad_nodes), @@ -849,119 +849,92 @@ impl SwarmDriver { // -------------------------------------------- /// Defines a new X distance range to be used for GETs and data replication - pub(crate) fn add_distance_range_for_gets(&mut self) { - // TODO: define how/where this distance comes from + /// + /// Enumerates buckets and generates a random distance in the first bucket + /// that has at least `MIN_PEERS_IN_BUCKET` peers. + /// + pub(crate) fn set_request_range( + &mut self, + queried_address: NetworkAddress, + network_discovery_peers: &[PeerId], + ) { + info!( + "Adding a GetRange to our stash deriving from {:?} peers", + network_discovery_peers.len() + ); - const TARGET_PEER: usize = 42; + let sorted_distances = sort_peers_by_distance_to(network_discovery_peers, queried_address); - let our_address = NetworkAddress::from_peer(self.self_peer_id); - let our_key = our_address.as_kbucket_key(); - let mut sorted_peers_iter = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&our_key); - - let mut last_peers_distance = KBucketDistance::default(); - let mut prior_peer = sorted_peers_iter.next(); - - // get 42nd or farthest - for (i, peer) in sorted_peers_iter.enumerate() { - if let Some(prior_peer) = prior_peer { - let this_last_peers_distance = prior_peer.distance(&peer); - - // only override it if it's larger! - // - // how does this play with keeping 100? - // We only update with peers changes... Perhaps this negates the need for a buffer? - // - // - // if this_last_peers_distance > last_peers_distance { - last_peers_distance = this_last_peers_distance; - // } - } + let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); + info!("Sorted distances: {:?}", mapped); - // info!("Peeeeeer {i}: {peer:?} - distance: {last_peers_distance:?}"); - prior_peer = Some(peer); + // TODO: Test this calculation in larger networks + // We get around 5-7 peers returned here... We want to take further in larger networks + let farthest_peer_to_check = self + .get_all_local_peers_excluding_self() + .len() + .checked_div(3 * CLOSE_GROUP_SIZE) + .unwrap_or(1); - if i == TARGET_PEER { - break; - } - } + info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); - // last_peers_distance = last_peers_distance * DISTANCE_MULTIPLIER; + let yardstick = if sorted_distances.len() >= farthest_peer_to_check { + sorted_distances.get(farthest_peer_to_check - 1) + } else { + sorted_distances.last() + }; + if let Some(distance) = yardstick { + if self.range_distances.len() >= GET_RANGE_STORAGE_LIMIT { + if let Some(distance) = self.range_distances.pop_front() { + trace!("Removed distance range: {:?}", distance.ilog2()); + } + } - if last_peers_distance == KBucketDistance::default() { - warn!("No peers found, no range distance can be set/added"); - return; - } + info!("Adding new distance range: {:?}", distance.ilog2()); - if self.range_distances.len() == X_RANGE_STORAGE_LIMIT { - self.range_distances.pop_front(); + self.range_distances.push_back(*distance); } - info!("Adding new distance range: {last_peers_distance:?}"); - - self.range_distances.push_back(last_peers_distance); + info!( + "Distance between peers in set_request_range call: {:?}", + yardstick + ); } - pub(crate) fn get_peers_within_get_range(&mut self) -> Option { - // TODO: Is this the correct keytype for comparisons? - let our_address = NetworkAddress::from_peer(self.self_peer_id); - let our_key = our_address.as_kbucket_key(); + /// Returns the KBucketDistance we are currently using as our X value + /// for range based search. + pub(crate) fn get_request_range(&self) -> KBucketDistance { + let mut sorted_distances = self.range_distances.iter().collect::>(); - let sorted_peers_iter = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&our_key); + sorted_distances.sort_unstable(); - let farthest_get_range_record_distance = self.range_distances.iter().max(); + let median_index = sorted_distances.len() / 2; - if let Some(farthest_range) = farthest_get_range_record_distance { - // lets print how many are within range - for (i, peer) in sorted_peers_iter.enumerate() { - let peer_distance_from_us = peer.distance(&our_key); + let default = KBucketDistance::default(); + let median = sorted_distances.get(median_index).cloned(); - if &peer_distance_from_us < farthest_range { - info!("Peer {peer:?} is {peer_distance_from_us:?} and would be within the range based search group!"); - info!("That's {i:?} peers within the range!"); - } - } + if let Some(dist) = median { + *dist } else { - warn!("No range distance has been set, no peers can be found within the range"); - return None; + default } - - farthest_get_range_record_distance.copied() } - /// Uses the closest k peers to estimate the farthest address as - /// `K_VALUE / 2`th peer's bucket. - fn get_responsbile_range_estimate( - &mut self, - // Sorted list of closest k peers to our peer id. - closest_k_peers: &[PeerId], - ) -> Option { - // if we don't have enough peers we don't set the distance range yet. - let mut farthest_distance = None; - - if closest_k_peers.is_empty() { - return farthest_distance; - } - - let our_address = NetworkAddress::from_peer(self.self_peer_id); + /// get all the peers from our local RoutingTable. Excluding self + pub(crate) fn get_all_local_peers_excluding_self(&mut self) -> Vec { + let our_peer_id = self.self_peer_id; + let mut all_peers: Vec = vec![]; + for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { + for entry in kbucket.iter() { + let id = entry.node.key.clone().into_preimage(); - // get `K_VALUE / 2`th peer's address distance - // This is a rough estimate of the farthest address we might be responsible for. - // We want this to be higher than actually necessary, so we retain more data - // and can be sure to pass bad node checks - let target_index = std::cmp::min(K_VALUE.get() / 2, closest_k_peers.len()) - 1; - - let address = NetworkAddress::from_peer(closest_k_peers[target_index]); - farthest_distance = our_address.distance(&address).ilog2(); - - farthest_distance + if id != our_peer_id { + all_peers.push(id); + } + } + } + all_peers.push(self.self_peer_id); + all_peers } /// Pushes NetworkSwarmCmd off thread so as to be non-blocking diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 6534c84017..09bf26de1f 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -30,10 +30,11 @@ pub(super) type Result = std::result::Result; #[derive(Error, Clone)] pub enum GetRecordError { #[error("Get Record completed with non enough copies")] - NotEnoughCopies { + NotEnoughCopiesInRange { record: Record, expected: usize, got: usize, + range: u32, }, #[error("Record not found in the network")] @@ -55,16 +56,18 @@ pub enum GetRecordError { impl Debug for GetRecordError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::NotEnoughCopies { + Self::NotEnoughCopiesInRange { record, expected, got, + range, } => { let pretty_key = PrettyPrintRecordKey::from(&record.key); - f.debug_struct("NotEnoughCopies") + f.debug_struct("NotEnoughCopiesInRange") .field("record_key", &pretty_key) .field("expected", &expected) .field("got", &got) + .field("range", &range) .finish() } Self::RecordNotFound => write!(f, "RecordNotFound"), @@ -138,7 +141,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] + #[error("Double SpendAttempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 6551f6e5f0..f772e35904 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -7,21 +7,22 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, - target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, - CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, GetRecordCfg, + GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, }; use itertools::Itertools; -use libp2p::kad::{ - self, GetClosestPeersError, InboundRequest, PeerRecord, ProgressStep, QueryId, QueryResult, - QueryStats, Record, K_VALUE, +use libp2p::{ + kad::{ + self, GetClosestPeersError, InboundRequest, KBucketDistance, PeerRecord, ProgressStep, + QueryId, QueryResult, QueryStats, Quorum, Record, K_VALUE, + }, + PeerId, }; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind}, - PrettyPrintRecordKey, +use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; +use std::{ + collections::{hash_map::Entry, HashSet}, + time::Instant, }; -use sn_transfers::SignedSpend; -use std::collections::{hash_map::Entry, BTreeSet, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; @@ -31,6 +32,9 @@ impl SwarmDriver { let event_string; match kad_event { + // We use this query both to bootstrap and populate our routing table, + // but also to define our GetRange as defined by the largest distance between + // peers in any recent GetClosest call. kad::Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ref closest_peers)), @@ -45,7 +49,7 @@ impl SwarmDriver { ); if let Entry::Occupied(mut entry) = self.pending_get_closest_peers.entry(id) { - let (_, current_closest) = entry.get_mut(); + let (_, _, current_closest) = entry.get_mut(); // TODO: consider order the result and terminate when reach any of the // following criteria: @@ -53,16 +57,19 @@ impl SwarmDriver { // 2, `stats.duration()` is longer than a defined period current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { - let (get_closest_type, current_closest) = entry.remove(); - match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(current_closest), - PendingGetClosestType::FunctionCall(sender) => { - sender - .send(current_closest) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } + let (address, get_closest_type, current_closest) = entry.remove(); + self.network_discovery + .handle_get_closest_query(¤t_closest); + + if let PendingGetClosestType::FunctionCall(sender) = get_closest_type { + sender + .send(current_closest) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } else { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); } } } else { @@ -83,7 +90,7 @@ impl SwarmDriver { event_string = "kad_event::get_closest_peers_err"; error!("GetClosest Query task {id:?} errored with {err:?}, {stats:?} - {step:?}"); - let (get_closest_type, mut current_closest) = + let (_address, get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { debug!( "Can't locate query task {id:?}, it has likely been completed already." @@ -106,7 +113,7 @@ impl SwarmDriver { match get_closest_type { PendingGetClosestType::NetworkDiscovery => self .network_discovery - .handle_get_closest_query(current_closest), + .handle_get_closest_query(¤t_closest), PendingGetClosestType::FunctionCall(sender) => { sender .send(current_closest) @@ -250,10 +257,8 @@ impl SwarmDriver { self.update_on_peer_addition(peer); // This should only happen once - if self.bootstrap.notify_new_peer() { - info!("Performing the first bootstrap"); - self.trigger_network_discovery(); - } + info!("Performing the first bootstrap"); + self.trigger_network_discovery(); } info!("kad_event::RoutingUpdated {:?}: {peer:?}, is_new_peer: {is_new_peer:?} old_peer: {old_peer:?}", self.peers_in_rt); @@ -320,6 +325,7 @@ impl SwarmDriver { // `QueryStats::requests` to be 20 (K-Value) // `QueryStats::success` to be over majority of the requests // `err::NotFound::closest_peers` contains a list of CLOSE_GROUP_SIZE peers + // // 2, targeting an existing entry // there will a sequence of (at least CLOSE_GROUP_SIZE) events of // `kad::Event::OutboundQueryProgressed` to be received @@ -333,26 +339,32 @@ impl SwarmDriver { // where: `cache_candidates`: being the peers supposed to hold the record but not // `ProgressStep::count`: to be `number of received copies plus one` // `ProgressStep::last` to be `true` + // + // /// Accumulates the GetRecord query results - /// If we get enough responses (quorum) for a record with the same content hash: + /// If we get enough responses (ie exceed GetRange) for a record with the same content hash: /// - we return the Record after comparing with the target record. This might return RecordDoesNotMatch if the /// check fails. /// - if multiple content hashes are found, we return a SplitRecord Error /// And then we stop the kad query as we are done here. + /// We do not need to wait for GetRange to be exceeded here and should return early. fn accumulate_get_record_found( &mut self, query_id: QueryId, peer_record: PeerRecord, _stats: QueryStats, - step: ProgressStep, + _step: ProgressStep, ) -> Result<()> { + let expected_get_range = self.get_request_range(); + let key = peer_record.record.key.clone(); + let peer_id = if let Some(peer_id) = peer_record.peer { peer_id } else { self.self_peer_id }; - let pretty_key = PrettyPrintRecordKey::from(&peer_record.record.key).into_owned(); + let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); if let Entry::Occupied(mut entry) = self.pending_get_record.entry(query_id) { let (_key, _senders, result_map, cfg) = entry.get_mut(); @@ -367,85 +379,28 @@ impl SwarmDriver { // Insert the record and the peer into the result_map. let record_content_hash = XorName::from_content(&peer_record.record.value); - let responded_peers = + + let peer_list = if let Entry::Occupied(mut entry) = result_map.entry(record_content_hash) { let (_, peer_list) = entry.get_mut(); + let _ = peer_list.insert(peer_id); - peer_list.len() + peer_list.clone() } else { let mut peer_list = HashSet::new(); let _ = peer_list.insert(peer_id); - result_map.insert(record_content_hash, (peer_record.record.clone(), peer_list)); - 1 - }; - - let expected_answers = get_quorum_value(&cfg.get_quorum); - - debug!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); - - if responded_peers >= expected_answers { - if !cfg.expected_holders.is_empty() { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with non-responded expected holders {:?}", cfg.expected_holders); - } - let cfg = cfg.clone(); - - // Remove the query task and consume the variables. - let (_key, senders, result_map, _) = entry.remove(); + result_map.insert( + record_content_hash, + (peer_record.record.clone(), peer_list.clone()), + ); - if result_map.len() == 1 { - Self::send_record_after_checking_target(senders, peer_record.record, &cfg)?; - } else { - debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with split record"); - let mut accumulated_spends = BTreeSet::new(); - for (record, _) in result_map.values() { - match get_raw_signed_spends_from_record(record) { - Ok(spends) => { - accumulated_spends.extend(spends); - } - Err(_) => { - continue; - } - } - } - if !accumulated_spends.is_empty() { - info!("For record {pretty_key:?} task {query_id:?}, found split record for a spend, accumulated and sending them as a single record"); - let accumulated_spends = - accumulated_spends.into_iter().collect::>(); - - let bytes = try_serialize_record(&accumulated_spends, RecordKind::Spend)?; - - let new_accumulated_record = Record { - key: peer_record.record.key, - value: bytes.to_vec(), - publisher: None, - expires: None, - }; - for sender in senders { - let new_accumulated_record = new_accumulated_record.clone(); + peer_list + }; - sender - .send(Ok(new_accumulated_record)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } else { - for sender in senders { - let result_map = result_map.clone(); - sender - .send(Err(GetRecordError::SplitRecord { result_map })) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - } - } + let responded_peers = peer_list.len(); - // Stop the query; possibly stops more nodes from being queried. - if let Some(mut query) = self.swarm.behaviour_mut().kademlia.query_mut(&query_id) { - query.finish(); - } - } else if usize::from(step.count) >= CLOSE_GROUP_SIZE { - debug!("For record {pretty_key:?} task {query_id:?}, got {:?} with {} versions so far.", - step.count, result_map.len()); - } - } else { + let expected_answers = get_quorum_value(&cfg.get_quorum); + trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); // return error if the entry cannot be found return Err(NetworkError::ReceivedKademliaEventDropped { query_id, @@ -455,6 +410,68 @@ impl SwarmDriver { Ok(()) } + /// Checks passed peers from a request and checks they are sufficiently spaced to + /// ensure we have searched enough of the network range as determined by our `get_range` + /// + /// We expect any conflicting records to have been reported prior ti this check, + /// so we assume we're returning unique records only. + fn have_we_have_searched_thoroughly_for_quorum( + expected_get_range: KBucketDistance, + searched_peers_list: &HashSet, + data_key_address: &NetworkAddress, + quorum: &Quorum, + ) -> bool { + warn!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); + + let required_quorum = get_quorum_value(quorum); + + // get the farthest distance between peers in the response + let mut current_distance_searched = KBucketDistance::default(); + + // iterate over peers and see if the distance to the data is greater than the get_range + for peer_id in searched_peers_list.iter() { + let peer_address = NetworkAddress::from_peer(*peer_id); + let distance_to_data = peer_address.distance(data_key_address); + if current_distance_searched < distance_to_data { + current_distance_searched = distance_to_data; + } + } + + // use ilog2 as simplified distance check + // It allows us to say "we've searched up to and including this bucket" + // as opposed to the concrete distance itself (which statistically seems like we can fall outwith a range + // quite easily with a small number of peers) + let exceeded_request_range = if current_distance_searched.ilog2() + < expected_get_range.ilog2() + { + let dist = current_distance_searched.ilog2(); + let expected_dist = expected_get_range.ilog2(); + + warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {current_distance_searched:?} is less than expcted GetRange of {expected_get_range:?}"); + + false + } else { + true + }; + + let is_sensitive_data = matches!(quorum, Quorum::All); + // We assume a finalised query has searched as far as it can in libp2p + // TODO: Do we only allow this if quorum is from known peers? + // TODO: Do we only bail early if NOT Quorum::All? (And so we need to search the full range?) + // + // we only enforce range if we have sensitive data...for data spends quorum::all + if searched_peers_list.len() >= required_quorum && !is_sensitive_data { + return true; + } + + if exceeded_request_range { + warn!("RANGE: {data_key_address:?} Request satisfied as exceeded request range : {exceeded_request_range:?} and Quorum satisfied with {:?} peers exceeding quorum {required_quorum:?}", searched_peers_list.len()); + return true; + } + + false + } + /// Handles the possible cases when a GetRecord Query completes. /// The accumulate_get_record_found returns the record if the quorum is satisfied, but, if we have reached this point /// then we did not get enough records or we got split records (which prevented the quorum to pass). @@ -469,16 +486,59 @@ impl SwarmDriver { let (result, log_string) = if let Some((record, from_peers)) = result_map.values().next() { - let result = if num_of_versions == 1 { - Err(GetRecordError::NotEnoughCopies { - record: record.clone(), - expected: get_quorum_value(&cfg.get_quorum), - got: from_peers.len(), - }) - } else { + let data_key_address = NetworkAddress::from_record_key(&record.key); + let expected_get_range = self.get_request_range(); + + let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( + expected_get_range, + from_peers, + &data_key_address, + &cfg.get_quorum, + ); + + let pretty_key = PrettyPrintRecordKey::from(&record.key); + info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); + + let result = if num_of_versions > 1 { + warn!("RANGE: more than one version found!"); Err(GetRecordError::SplitRecord { result_map: result_map.clone(), }) + } else if we_have_searched_thoroughly { + warn!("RANGE: Get record finished: {pretty_key:?} Enough of the network has responded or it's not sensitive data... and we only have one copy..."); + + Ok(record.clone()) + } else { + // + // We have not searched enough of the network range. + let result = Err(GetRecordError::NotEnoughCopiesInRange { + record: record.clone(), + expected: get_quorum_value(&cfg.get_quorum), + got: from_peers.len(), + range: expected_get_range.ilog2().unwrap_or(0), + }); + + // This should be a backstop... Quorum::All is the only one that enforces + // a full search of the network range. + if matches!(cfg.get_quorum, Quorum::All) { + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need to extend the range and PUT the data. {result:?}"); + + warn!("Reputting data to network {pretty_key:?}..."); + + // let's ensure we have an updated network view + self.trigger_network_discovery(); + + let (sender, _receiver) = oneshot::channel(); + + // nodes will try/fail to replicate it from us, but grab from the network thereafter + self.queue_network_swarm_cmd(NetworkSwarmCmd::PutRecord { + record: record.clone(), + sender, + quorum: cfg.get_quorum, + }); + } + + result }; ( @@ -508,8 +568,6 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } } else { - // We manually perform `query.finish()` if we return early from accumulate fn. - // Thus we will still get FinishedWithNoAdditionalRecord. debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); } Ok(()) diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs index 2396babf64..4fa0f51b86 100644 --- a/sn_networking/src/event/mod.rs +++ b/sn_networking/src/event/mod.rs @@ -254,8 +254,6 @@ impl SwarmDriver { self.log_kbuckets(&added_peer); self.send_event(NetworkEvent::PeerAdded(added_peer, self.peers_in_rt)); - self.add_distance_range_for_gets(); - self.get_peers_within_get_range(); #[cfg(feature = "open-metrics")] if self.metrics_recorder.is_some() { self.check_for_change_in_our_close_group(); diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 75afbfdfb5..9675bffe1c 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -10,14 +10,18 @@ use crate::{ cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address_and_limit, MsgResponder, NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; -use itertools::Itertools; -use libp2p::request_response::{self, Message}; -use rand::{rngs::OsRng, thread_rng, Rng}; +use libp2p::{ + kad::RecordKey, + request_response::{self, Message}, + PeerId, +}; +use rand::{rngs::OsRng, Rng}; use sn_protocol::{ messages::{CmdResponse, Request, Response}, storage::RecordType, NetworkAddress, }; +use std::collections::HashMap; impl SwarmDriver { /// Forwards `Request` to the upper layers using `Sender`. Sends `Response` to the peers @@ -190,6 +194,9 @@ impl SwarmDriver { sender: NetworkAddress, incoming_keys: Vec<(NetworkAddress, RecordType)>, ) { + let peers = self.get_all_local_peers_excluding_self(); + let our_peer_id = self.self_peer_id; + let holder = if let Some(peer_id) = sender.as_peer_id() { peer_id } else { @@ -202,16 +209,13 @@ impl SwarmDriver { incoming_keys.len() ); - // accept replication requests from the K_VALUE peers away, - // giving us some margin for replication - let closest_k_peers = self.get_closest_k_value_local_peers(); - if !closest_k_peers.contains(&holder) || holder == self.self_peer_id { - debug!("Holder {holder:?} is self or not in replication range."); + let more_than_one_key = incoming_keys.len() > 1; + // accept replication requests from all peers known peers within our GetRange + if !peers.contains(&holder) || holder == our_peer_id { + trace!("Holder {holder:?} is self or not in replication range."); return; } - let more_than_one_key = incoming_keys.len() > 1; - // On receive a replication_list from a close_group peer, we undertake two tasks: // 1, For those keys that we don't have: // fetch them if close enough to us @@ -224,10 +228,13 @@ impl SwarmDriver { .behaviour_mut() .kademlia .store_mut() - .record_addresses_ref(); - let keys_to_fetch = self - .replication_fetcher - .add_keys(holder, incoming_keys, all_keys); + .record_addresses_ref() + .clone(); + + let keys_to_fetch = + self.replication_fetcher + .add_keys(holder, incoming_keys, &all_keys, &peers); + if keys_to_fetch.is_empty() { debug!("no waiting keys to fetch from the network"); } else { @@ -235,70 +242,94 @@ impl SwarmDriver { } // Only trigger chunk_proof check based every X% of the time - let mut rng = thread_rng(); + let mut rng = OsRng; // 5% probability if more_than_one_key && rng.gen_bool(0.05) { - self.verify_peer_storage(sender.clone()); + let event_sender = self.event_sender.clone(); + let _handle = tokio::spawn(async move { + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &sender); + + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {holder:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } + + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + if rng.gen_bool(0.2) { + let close_group_peers = if let Ok(peers) = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) { + peers + } else { + vec![] + }; + + if close_group_peers.len() == CLOSE_GROUP_SIZE { + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = Self::select_verification_data_candidates( + &peers, &all_keys, &candidate, + ); - // In additon to verify the sender, we also verify a random close node. - // This is to avoid malicious node escaping the check by never send a replication_list. - // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.2) { - let close_group_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(CLOSE_GROUP_SIZE) - .collect_vec(); - if close_group_peers.len() == CLOSE_GROUP_SIZE { - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate = NetworkAddress::from_peer(close_group_peers[index]); - if sender != candidate { - self.verify_peer_storage(candidate); - break; + if keys_to_verify.is_empty() { + debug!( + "No valid candidate to be checked against peer {candidate:?}" + ); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } + + break; + } } } } - } + }); } } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn verify_peer_storage(&mut self, peer: NetworkAddress) { - let mut closest_peers = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_local_peers(&self.self_peer_id.into()) - .map(|peer| peer.into_preimage()) - .take(20) - .collect_vec(); - closest_peers.push(self.self_peer_id); - + fn select_verification_data_candidates( + all_peers: &Vec, + all_keys: &HashMap, + peer: &NetworkAddress, + ) -> Vec { let target_peer = if let Some(peer_id) = peer.as_peer_id() { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return; + return vec![]; }; - let all_keys = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .record_addresses_ref(); - // Targeted chunk type record shall be expected within the close range from our perspective. let mut verify_candidates: Vec = all_keys .values() .filter_map(|(addr, record_type)| { if RecordType::Chunk == *record_type { - match sort_peers_by_address_and_limit(&closest_peers, addr, CLOSE_GROUP_SIZE) { + // Here we take the actual closest, as this is where we want to be + // strict about who does have the data... + match sort_peers_by_address_and_limit(all_peers, addr, CLOSE_GROUP_SIZE) { Ok(close_group) => { if close_group.contains(&&target_peer) { Some(addr.clone()) @@ -319,17 +350,6 @@ impl SwarmDriver { verify_candidates.sort_by_key(|a| peer.distance(a)); - // To ensure the candidate mush have to be held by the peer, - // we only carry out check when there are already certain amount of chunks uploaded - // AND choose candidate from certain reduced range. - if verify_candidates.len() > 50 { - let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); - self.send_event(NetworkEvent::ChunkProofVerification { - peer_id: target_peer, - keys_to_verify: vec![verify_candidates[index].clone()], - }); - } else { - debug!("No valid candidate to be checked against peer {peer:?}"); - } + verify_candidates } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 982088f102..3d5dd62ab6 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -393,7 +393,7 @@ impl SwarmDriver { self.record_connection_metrics(); // we need to decide if this was a critical error and the peer should be removed from the routing table - let should_clean_peer = match error { + let (should_clean_peer, should_track_issue) = match error { DialError::Transport(errors) => { // as it's an outgoing error, if it's transport based we can assume it is _our_ fault // @@ -401,7 +401,7 @@ impl SwarmDriver { // so we default to it not being a real issue // unless there are _specific_ errors (connection refused eg) error!("Dial errors len : {:?}", errors.len()); - let mut there_is_a_serious_issue = false; + let mut remove_peer_track_peer_issue = (false, false); for (_addr, err) in errors { error!("OutgoingTransport error : {err:?}"); @@ -414,14 +414,13 @@ impl SwarmDriver { println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); } // if we can't dial a peer on a given address, we should remove it from the routing table - there_is_a_serious_issue = true + remove_peer_track_peer_issue = (false, true) } TransportError::Other(err) => { - let problematic_errors = [ - "ConnectionRefused", - "HostUnreachable", - "HandshakeTimedOut", - ]; + let problematic_errors = + ["ConnectionRefused", "HostUnreachable"]; + + let intermittent_errors = ["HandshakeTimedOut"]; let is_bootstrap_peer = self .bootstrap_peers @@ -432,7 +431,7 @@ impl SwarmDriver { && self.peers_in_rt < self.bootstrap_peers.len() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); - there_is_a_serious_issue = false; + remove_peer_track_peer_issue = (false, false); } else { // It is really difficult to match this error, due to being eg: // Custom { kind: Other, error: Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) } @@ -443,29 +442,35 @@ impl SwarmDriver { .any(|err| error_msg.contains(err)) { warn!("Problematic error encountered: {error_msg}"); - there_is_a_serious_issue = true; + remove_peer_track_peer_issue = (true, true); + } else if intermittent_errors + .iter() + .any(|err| error_msg.contains(err)) + { + warn!("Intermittent error encountered: {error_msg}"); + remove_peer_track_peer_issue = (false, true); } } } } } - there_is_a_serious_issue + remove_peer_track_peer_issue } DialError::NoAddresses => { // We provided no address, and while we can't really blame the peer // we also can't connect, so we opt to cleanup... warn!("OutgoingConnectionError: No address provided"); - true + (true, false) } DialError::Aborted => { // not their fault warn!("OutgoingConnectionError: Aborted"); - false + (false, false) } DialError::DialPeerConditionFalse(_) => { // we could not dial due to an internal condition, so not their issue warn!("OutgoingConnectionError: DialPeerConditionFalse"); - false + (false, false) } DialError::LocalPeerId { endpoint, .. } => { // This is actually _us_ So we should remove this from the RT @@ -473,24 +478,24 @@ impl SwarmDriver { "OutgoingConnectionError: LocalPeerId: {}", endpoint_str(&endpoint) ); - true + (true, true) } DialError::WrongPeerId { obtained, endpoint } => { // The peer id we attempted to dial was not the one we expected // cleanup error!("OutgoingConnectionError: WrongPeerId: obtained: {obtained:?}, endpoint: {endpoint:?}"); - true + (true, true) } DialError::Denied { cause } => { // The peer denied our connection // cleanup error!("OutgoingConnectionError: Denied: {cause:?}"); - true + (true, true) } }; if should_clean_peer { - warn!("Tracking issue of {failed_peer_id:?}. Clearing it out for now"); + warn!("Serious issue with {failed_peer_id:?}. Clearing it out for now"); if let Some(dead_peer) = self .swarm @@ -501,6 +506,15 @@ impl SwarmDriver { self.update_on_peer_removal(*dead_peer.node.key.preimage()); } } + + if should_track_issue { + warn!("Tracking issue of {failed_peer_id:?}."); + + self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { + peer_id: failed_peer_id, + issue: crate::NodeIssue::ConnectionIssue, + })?; + } } SwarmEvent::IncomingConnectionError { connection_id, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index eeb76cf04d..c9244dbc46 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -78,10 +78,6 @@ use tokio::time::Duration; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); -/// The count of peers that will be considered as close to a record target, -/// that a replication of the record shall be sent/accepted to/by the peer. -pub const REPLICATION_PEERS_COUNT: usize = CLOSE_GROUP_SIZE + 2; - /// Majority of a given group (i.e. > 1/2). #[inline] pub const fn close_group_majority() -> usize { @@ -105,6 +101,36 @@ pub fn sort_peers_by_address_and_limit<'a>( sort_peers_by_key_and_limit(peers, &address.as_kbucket_key(), expected_entries) } +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +pub fn sort_peers_by_distance_to( + peers: &[PeerId], + queried_address: NetworkAddress, +) -> Vec { + let mut sorted_distances: Vec<_> = peers + .iter() + .map(|peer| { + let addr = NetworkAddress::from_peer(*peer); + queried_address.distance(&addr) + }) + .collect(); + + sorted_distances.sort(); + + sorted_distances +} + +/// Sort the provided peers by their distance to the given `NetworkAddress`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn sort_peers_by_address_and_limit_by_distance<'a>( + peers: &'a Vec, + address: &NetworkAddress, + distance: KBucketDistance, +) -> Result> { + limit_peers_by_distance(peers, &address.as_kbucket_key(), distance) +} + /// Sort the provided peers by their distance to the given `KBucketKey`. /// Return with the closest expected number of entries if has. pub fn sort_peers_by_key_and_limit<'a, T>( @@ -144,6 +170,40 @@ pub fn sort_peers_by_key_and_limit<'a, T>( Ok(sorted_peers) } +/// Only return peers closer to key than the provided distance +/// Their distance is measured by closeness to the given `KBucketKey`. +/// Return with the closest expected number of entries if has. +#[allow(clippy::result_large_err)] +pub fn limit_peers_by_distance<'a, T>( + peers: &'a Vec, + key: &KBucketKey, + distance: KBucketDistance, +) -> Result> { + // Check if there are enough peers to satisfy the request. + // bail early if that's not the case + if CLOSE_GROUP_SIZE > peers.len() { + warn!("Not enough peers in the k-bucket to satisfy the request"); + return Err(NetworkError::NotEnoughPeers { + found: peers.len(), + required: CLOSE_GROUP_SIZE, + }); + } + + // Create a vector of tuples where each tuple is a reference to a peer and its distance to the key. + // This avoids multiple computations of the same distance in the sorting process. + let mut peers_within_distance: Vec<&PeerId> = Vec::with_capacity(peers.len()); + + for peer_id in peers { + let addr = NetworkAddress::from_peer(*peer_id); + let peer_distance = key.distance(&addr.as_kbucket_key()); + + if peer_distance < distance { + peers_within_distance.push(peer_id); + } + } + + Ok(peers_within_distance) +} #[derive(Clone, Debug)] /// API to interact with the underlying Swarm @@ -197,6 +257,13 @@ impl Network { &self.inner.local_swarm_cmd_sender } + /// Return the GetRange as determined by the internal SwarmDriver + pub async fn get_range(&self) -> Result { + let (sender, receiver) = oneshot::channel(); + self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRange { sender }); + receiver.await.map_err(NetworkError::from) + } + /// Signs the given data with the node's keypair. pub fn sign(&self, msg: &[u8]) -> Result> { self.keypair().sign(msg).map_err(NetworkError::from) @@ -220,19 +287,121 @@ impl Network { receiver.await? } + /// Replicate a fresh record to its close group peers. + /// This should not be triggered by a record we receive via replicaiton fetch + pub async fn replicate_valid_fresh_record(&self, paid_key: RecordKey, record_type: RecordType) { + let network = self; + + let start = std::time::Instant::now(); + let pretty_key = PrettyPrintRecordKey::from(&paid_key); + + // first we wait until our own network store can return the record + // otherwise it may not be fully written yet + let mut retry_count = 0; + trace!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); + loop { + let record = match network.get_local_record(&paid_key).await { + Ok(record) => record, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" + ); + None + } + }; + + if record.is_some() { + break; + } + + if retry_count > 10 { + error!( + "Could not get record from store for replication: {pretty_key:?} after 10 retries" + ); + return; + } + + retry_count += 1; + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + trace!("Start replication of fresh record {pretty_key:?} from store"); + + let all_peers = match network.get_all_local_peers_excluding_self().await { + Ok(peers) => peers, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_all_local_peers errored: {err:?}" + ); + return; + } + }; + + let data_addr = NetworkAddress::from_record_key(&paid_key); + let mut peers_to_replicate_to = match network.get_range().await { + Err(error) => { + error!("Replicating fresh record {pretty_key:?} get_range errored: {error:?}"); + + return; + } + + Ok(our_get_range) => { + match sort_peers_by_address_and_limit_by_distance( + &all_peers, + &data_addr, + our_get_range, + ) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + } + } + }; + + if peers_to_replicate_to.len() < CLOSE_GROUP_SIZE { + warn!( + "Replicating fresh record {pretty_key:?} current GetRange insufficient for secure replication. Falling back to CLOSE_GROUP_SIZE" + ); + + peers_to_replicate_to = + match sort_peers_by_address_and_limit(&all_peers, &data_addr, CLOSE_GROUP_SIZE) { + Ok(result) => result, + Err(err) => { + error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); + return; + } + }; + } + + let our_peer_id = network.peer_id(); + let our_address = NetworkAddress::from_peer(our_peer_id); + #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress + let keys = vec![(data_addr.clone(), record_type.clone())]; + + for peer_id in &peers_to_replicate_to { + trace!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); + let request = Request::Cmd(Cmd::Replicate { + holder: our_address.clone(), + keys: keys.clone(), + }); + + network.send_req_ignore_reply(request, **peer_id); + } + trace!( + "Completed replicate fresh record {pretty_key:?} to {:?} peers on store, in {:?}", + peers_to_replicate_to.len(), + start.elapsed() + ); + } + /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { self.get_closest_peers(key, true).await } - /// Returns the closest peers to the given `NetworkAddress`, sorted by their distance to the key. - /// - /// Includes our node's `PeerId` while calculating the closest peers. - pub async fn node_get_closest_peers(&self, key: &NetworkAddress) -> Result> { - self.get_closest_peers(key, false).await - } - /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. /// Does not include self @@ -245,10 +414,10 @@ impl Network { } /// Returns all the PeerId from all the KBuckets from our local Routing Table - /// Also contains our own PeerId. - pub async fn get_closest_k_value_local_peers(&self) -> Result> { + /// Excludes our own PeerId. + pub async fn get_all_local_peers_excluding_self(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender }); receiver .await @@ -498,6 +667,10 @@ impl Network { key: RecordKey, cfg: &GetRecordCfg, ) -> Result { + use std::collections::BTreeSet; + + use sn_transfers::SignedSpend; + let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( backoff::ExponentialBackoff { @@ -528,7 +701,7 @@ impl Network { Err(GetRecordError::RecordDoesNotMatch(_)) => { warn!("The returned record does not match target {pretty_key:?}."); } - Err(GetRecordError::NotEnoughCopies { expected, got, .. }) => { + Err(GetRecordError::NotEnoughCopiesInRange { expected, got, .. }) => { warn!("Not enough copies ({got}/{expected}) found yet for {pretty_key:?}."); } // libp2p RecordNotFound does mean no holders answered. @@ -537,8 +710,39 @@ impl Network { Err(GetRecordError::RecordNotFound) => { warn!("No holder of record '{pretty_key:?}' found."); } - Err(GetRecordError::SplitRecord { .. }) => { + Err(GetRecordError::SplitRecord { result_map }) => { error!("Encountered a split record for {pretty_key:?}."); + + // attempt to deserialise and accumulate any spends + let mut accumulated_spends = BTreeSet::new(); + let results_count = result_map.len(); + // try and accumulate any SpendAttempts + if results_count > 1 { + info!("For record {pretty_key:?}, we have more than one result returned."); + // Allow for early bail if we've already seen a split SpendAttempt + for (record, _) in result_map.values() { + match get_raw_signed_spends_from_record(record) { + Ok(spends) => { + accumulated_spends.extend(spends); + } + Err(_) => { + continue; + } + } + } + } + + // we have a Double SpendAttempt and will exit + if accumulated_spends.len() > 1 { + info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); + let accumulated_spends = + accumulated_spends.into_iter().collect::>(); + + return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( + accumulated_spends, + ))); + } + } Err(GetRecordError::QueryTimeout) => { error!("Encountered query timeout for {pretty_key:?}."); diff --git a/sn_networking/src/network_discovery.rs b/sn_networking/src/network_discovery.rs index f3f4986134..3d82c944fb 100644 --- a/sn_networking/src/network_discovery.rs +++ b/sn_networking/src/network_discovery.rs @@ -8,7 +8,6 @@ use crate::target_arch::Instant; use libp2p::{kad::KBucketKey, PeerId}; -use rand::{thread_rng, Rng}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use sn_protocol::NetworkAddress; use std::collections::{btree_map::Entry, BTreeMap}; @@ -52,13 +51,13 @@ impl NetworkDiscovery { } /// The result from the kad::GetClosestPeers are again used to update our kbucket. - pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { + pub(crate) fn handle_get_closest_query(&mut self, closest_peers: &[PeerId]) { let now = Instant::now(); let candidates_map: BTreeMap> = closest_peers - .into_iter() + .iter() .filter_map(|peer| { - let peer = NetworkAddress::from_peer(peer); + let peer = NetworkAddress::from_peer(*peer); let peer_key = peer.as_kbucket_key(); peer_key .distance(&self.self_key) @@ -83,18 +82,28 @@ impl NetworkDiscovery { /// Returns one random candidate per bucket. Also tries to refresh the candidate list. /// Todo: Limit the candidates to return. Favor the closest buckets. - pub(crate) fn candidates(&mut self) -> Vec<&NetworkAddress> { - self.try_refresh_candidates(); - - let mut rng = thread_rng(); + pub(crate) fn candidates(&mut self) -> Vec { let mut op = Vec::with_capacity(self.candidates.len()); - let candidates = self.candidates.values().filter_map(|candidates| { - // get a random index each time - let random_index = rng.gen::() % candidates.len(); - candidates.get(random_index) - }); - op.extend(candidates); + let mut generate_fresh_candidates = false; + for addresses in self.candidates.values_mut() { + // get a random candidate from each bucket each time + if addresses.is_empty() { + generate_fresh_candidates = true; + continue; + } + + // remove the first each time + let address = addresses.remove(0); + op.push(address); + } + + if generate_fresh_candidates { + // we only refresh when we are running low on candidates + self.try_refresh_candidates(); + } + + debug!("Candidates returned: {}", op.len()); op } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 599dee835b..35b1cdec59 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -90,7 +90,7 @@ pub struct NodeRecordStore { /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. - responsible_distance_range: Option, + responsible_distance_range: Option, #[cfg(feature = "open-metrics")] /// Used to report the number of records held by the store to the metrics server. record_count_metric: Option, @@ -315,11 +315,6 @@ impl NodeRecordStore { self } - /// Returns the current distance ilog2 (aka bucket) range of CLOSE_GROUP nodes. - pub fn get_responsible_distance_range(&self) -> Option { - self.responsible_distance_range - } - // Converts a Key into a Hex string. fn generate_filename(key: &Key) -> String { hex::encode(key.as_ref()) @@ -474,8 +469,7 @@ impl NodeRecordStore { let mut removed_keys = Vec::new(); self.records.retain(|key, _val| { let kbucket_key = KBucketKey::new(key.to_vec()); - let is_in_range = - responsible_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0); + let is_in_range = responsible_range >= self.local_key.distance(&kbucket_key); if !is_in_range { removed_keys.push(key.clone()); } @@ -699,7 +693,7 @@ impl NodeRecordStore { pub fn get_records_within_distance_range( &self, records: HashSet<&Key>, - distance_range: u32, + distance_range: Distance, ) -> usize { debug!( "Total record count is {:?}. Distance is: {distance_range:?}", @@ -710,7 +704,7 @@ impl NodeRecordStore { .iter() .filter(|key| { let kbucket_key = KBucketKey::new(key.to_vec()); - distance_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0) + distance_range >= self.local_key.distance(&kbucket_key) }) .count(); @@ -719,8 +713,8 @@ impl NodeRecordStore { } /// Setup the distance range. - pub(crate) fn set_responsible_distance_range(&mut self, farthest_responsible_bucket: u32) { - self.responsible_distance_range = Some(farthest_responsible_bucket); + pub(crate) fn set_responsible_distance_range(&mut self, farthest_distance: Distance) { + self.responsible_distance_range = Some(farthest_distance); } } @@ -1500,10 +1494,7 @@ mod tests { .wrap_err("Could not parse record store key")?, ); // get the distance to this record from our local key - let distance = self_address - .distance(&halfway_record_address) - .ilog2() - .unwrap_or(0); + let distance = self_address.distance(&halfway_record_address); // must be plus one bucket from the halfway record store.set_responsible_distance_range(distance); diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 8e3bc67364..64fd790ccd 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -10,7 +10,7 @@ use crate::record_store::{ClientRecordStore, NodeRecordStore}; use libp2p::kad::{ store::{RecordStore, Result}, - ProviderRecord, Record, RecordKey, + KBucketDistance, ProviderRecord, Record, RecordKey, }; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; @@ -130,17 +130,7 @@ impl UnifiedRecordStore { } } - pub(crate) fn get_farthest_replication_distance_bucket(&self) -> Option { - match self { - Self::Client(_store) => { - warn!("Calling get_distance_range at Client. This should not happen"); - None - } - Self::Node(store) => store.get_responsible_distance_range(), - } - } - - pub(crate) fn set_distance_range(&mut self, distance: u32) { + pub(crate) fn set_distance_range(&mut self, distance: KBucketDistance) { match self { Self::Client(_store) => { warn!("Calling set_distance_range at Client. This should not happen"); diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 1b90ac9a53..0df4f1c29b 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -8,7 +8,9 @@ #![allow(clippy::mutable_key_type)] use crate::target_arch::spawn; +use crate::CLOSE_GROUP_SIZE; use crate::{event::NetworkEvent, target_arch::Instant}; +use itertools::Itertools; use libp2p::{ kad::{KBucketDistance as Distance, RecordKey, K_VALUE}, PeerId, @@ -41,8 +43,8 @@ pub(crate) struct ReplicationFetcher { // Avoid fetching same chunk from different nodes AND carry out too many parallel tasks. on_going_fetches: HashMap<(RecordKey, RecordType), (PeerId, ReplicationTimeout)>, event_sender: mpsc::Sender, - /// ilog2 bucket distance range that the incoming key shall be fetched - distance_range: Option, + /// KBucketDistance range that the incoming key shall be fetched + distance_range: Option, /// Restrict fetch range to closer than this value /// used when the node is full, but we still have "close" data coming in /// that is _not_ closer than our farthest max record @@ -63,7 +65,7 @@ impl ReplicationFetcher { } /// Set the distance range. - pub(crate) fn set_replication_distance_range(&mut self, distance_range: u32) { + pub(crate) fn set_replication_distance_range(&mut self, distance_range: Distance) { self.distance_range = Some(distance_range); } @@ -76,6 +78,7 @@ impl ReplicationFetcher { holder: PeerId, incoming_keys: Vec<(NetworkAddress, RecordType)>, locally_stored_keys: &HashMap, + all_local_peers: &[PeerId], ) -> Vec<(PeerId, RecordKey)> { // remove locally stored from incoming_keys let mut new_incoming_keys: Vec<_> = incoming_keys @@ -133,12 +136,30 @@ impl ReplicationFetcher { .retain(|_, time_out| *time_out > Instant::now()); let mut out_of_range_keys = vec![]; + // Filter out those out_of_range ones among the incoming_keys. if let Some(ref distance_range) = self.distance_range { new_incoming_keys.retain(|(addr, _record_type)| { - let is_in_range = - self_address.distance(addr).ilog2().unwrap_or(0) <= *distance_range; + // find all closer peers to the data + let closer_peers_len = all_local_peers + .iter() + .filter(|peer_id| { + let peer_address = NetworkAddress::from_peer(**peer_id); + addr.distance(&peer_address) <= *distance_range + }) + .collect_vec() + .len(); + + // we consider ourselves in range if + // A) We don't know enough closer peers than ourselves + // or B) The distance to the data is within our GetRange + let is_in_range = closer_peers_len <= CLOSE_GROUP_SIZE + || self_address.distance(addr).ilog2() <= distance_range.ilog2(); if !is_in_range { + warn!( + "Rejecting incoming key: {addr:?} as out of range. {:?} is larger than {:?} ", + self_address.distance(addr).ilog2(), + distance_range.ilog2()); out_of_range_keys.push(addr.clone()); } is_in_range @@ -428,8 +449,12 @@ mod tests { incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &locally_stored_keys); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &locally_stored_keys, + &[], + ); assert_eq!(keys_to_fetch.len(), MAX_PARALLEL_FETCH); // we should not fetch anymore keys @@ -441,6 +466,7 @@ mod tests { PeerId::random(), vec![(key_1, RecordType::Chunk), (key_2, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(keys_to_fetch.is_empty()); @@ -451,6 +477,7 @@ mod tests { PeerId::random(), vec![(key, RecordType::Chunk)], &locally_stored_keys, + &[], ); assert!(!keys_to_fetch.is_empty()); @@ -477,24 +504,31 @@ mod tests { // Set distance range let distance_target = NetworkAddress::from_peer(PeerId::random()); - let distance_range = self_address.distance(&distance_target).ilog2().unwrap_or(1); + let distance_range = self_address.distance(&distance_target); replication_fetcher.set_replication_distance_range(distance_range); + // generate a list of close peers + let close_peers = (0..100).map(|_| PeerId::random()).collect::>(); + let mut incoming_keys = Vec::new(); let mut in_range_keys = 0; (0..100).for_each(|_| { let random_data: Vec = (0..50).map(|_| rand::random::()).collect(); let key = NetworkAddress::from_record_key(&RecordKey::from(random_data)); - if key.distance(&self_address).ilog2().unwrap_or(0) <= distance_range { + if key.distance(&self_address).ilog2() <= distance_range.ilog2() { in_range_keys += 1; } incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = - replication_fetcher.add_keys(PeerId::random(), incoming_keys, &Default::default()); + let keys_to_fetch = replication_fetcher.add_keys( + PeerId::random(), + incoming_keys, + &Default::default(), + &close_peers, + ); assert_eq!( keys_to_fetch.len(), replication_fetcher.on_going_fetches.len(), diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 76b6349ce1..4fcaf7b1b4 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -6,9 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - close_group_majority, driver::GetRecordCfg, GetRecordError, Network, NetworkError, Result, -}; +use crate::{driver::GetRecordCfg, Network, NetworkError, Result}; use libp2p::kad::{Quorum, Record}; use sn_protocol::{ storage::{try_deserialize_record, RecordHeader, RecordKind, RetryStrategy, SpendAddress}, @@ -27,8 +25,10 @@ impl Network { /// If we get a quorum error, we enable re-try pub async fn get_raw_spends(&self, address: SpendAddress) -> Result> { let key = NetworkAddress::from_spend_address(address).to_record_key(); + // use Quorum::One not All here, as we want to collect and return all the spends we can find + // Quorum all may be prohibitive to this end let get_cfg = GetRecordCfg { - get_quorum: Quorum::Majority, + get_quorum: Quorum::One, retry_strategy: None, // This should not be set here. This function is used as a quick check to find the spends around the key during // validation. The returned records might possibly be double spend attempt and the record will not match @@ -39,7 +39,7 @@ impl Network { }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( - "Got record from the network, {:?}", + "Got raw spends from the network, {:?}", PrettyPrintRecordKey::from(&record.key) ); get_raw_signed_spends_from_record(&record) @@ -51,38 +51,14 @@ impl Network { /// If we get a quorum error, we increase the RetryStrategy pub async fn get_spend(&self, address: SpendAddress) -> Result { let key = NetworkAddress::from_spend_address(address).to_record_key(); - let mut get_cfg = GetRecordCfg { + let get_cfg = GetRecordCfg { get_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), is_register: false, }; - let record = match self.get_record_from_network(key.clone(), &get_cfg).await { - Ok(record) => record, - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { - record, - expected, - got, - })) => { - // if majority holds the spend, it might be worth to be trusted. - if got >= close_group_majority() { - debug!("At least a majority nodes hold the spend {address:?}, going to trust it if can fetch with majority again."); - get_cfg.get_quorum = Quorum::Majority; - get_cfg.retry_strategy = Some(RetryStrategy::Balanced); - self.get_record_from_network(key, &get_cfg).await? - } else { - return Err(NetworkError::GetRecordError( - GetRecordError::NotEnoughCopies { - record, - expected, - got, - }, - )); - } - } - Err(err) => return Err(err), - }; + let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( "Got record from the network, {:?}", PrettyPrintRecordKey::from(&record.key) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 3f3343f403..b0dd3f6857 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -552,7 +552,7 @@ impl Node { }; debug!( - "Got {} validated spends with key: {unique_pubkey:?} at {pretty_key:?}", + "Found {} spends with key: {unique_pubkey:?} at {pretty_key:?}", validated_spends.len() ); @@ -564,14 +564,12 @@ impl Node { expires: None, }; self.network().put_local_record(record); - debug!( - "Successfully stored validated spends with key: {unique_pubkey:?} at {pretty_key:?}" - ); + debug!("Successfully stored spends with key: {unique_pubkey:?} at {pretty_key:?}"); // Just log the double spend attempt. DoubleSpend error during PUT is not used and would just lead to // RecordRejected marker (which is incorrect, since we store double spends). if validated_spends.len() > 1 { - warn!("Got double spend(s) of len {} for the Spend PUT with unique_pubkey {unique_pubkey}", validated_spends.len()); + warn!("Got Burnt SpendAttempts of len {} for the Spend PUT with unique_pubkey {unique_pubkey} at {pretty_key:?}", validated_spends.len()); } self.record_metrics(Marker::ValidSpendRecordPutFromNetwork(&pretty_key)); @@ -756,13 +754,14 @@ impl Node { } spends } - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { + Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopiesInRange { record, got, + range, .. })) => { info!( - "Retrieved {got} copies of the record for {unique_pubkey:?} from the network" + "Retrieved {got} copies of the record for {unique_pubkey:?} from the network in range {range}" ); match get_raw_signed_spends_from_record(&record) { Ok(spends) => spends, diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 37819df38d..a2e4a079cf 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -6,17 +6,18 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{error::Result, node::Node}; +use crate::{ + error::{Error, Result}, + node::Node, +}; use libp2p::{ kad::{Quorum, Record, RecordKey}, PeerId, }; -use sn_networking::{ - sort_peers_by_address_and_limit, GetRecordCfg, Network, REPLICATION_PEERS_COUNT, -}; +use sn_networking::{GetRecordCfg, Network}; use sn_protocol::{ - messages::{Cmd, Query, QueryResponse, Request, Response}, - storage::RecordType, + messages::{Query, QueryResponse, Request, Response}, + storage::{try_serialize_record, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use tokio::task::spawn; @@ -81,12 +82,29 @@ impl Node { // Hence value of the flag actually doesn't matter. is_register: false, }; - match node.network().get_record_from_network(key, &get_cfg).await { + match node + .network() + .get_record_from_network(key.clone(), &get_cfg) + .await + { Ok(record) => record, - Err(err) => { - error!("During replication fetch of {pretty_key:?}, failed in re-attempt of get from network {err:?}"); - return; - } + // TODO: do we need to handle SplitRecord anywhere else? + Err(error) => match error { + sn_networking::NetworkError::DoubleSpendAttempt(spends) => { + debug!("Failed to fetch record {pretty_key:?} from the network, double spend attempt {spends:?}"); + + let bytes = try_serialize_record(&spends, RecordKind::Spend)?; + + // TODO: does this need merged with any local copy? + Record { + key, + value: bytes.to_vec(), + publisher: None, + expires: None, + } + } + other_error => return Err(other_error.into()), + }, } }; @@ -98,6 +116,7 @@ impl Node { } else { debug!("Completed storing Replication Record {pretty_key:?} from network."); } + Ok::<(), Error>(()) }); } Ok(()) @@ -113,86 +132,9 @@ impl Node { let network = self.network().clone(); let _handle = spawn(async move { - let start = std::time::Instant::now(); - let pretty_key = PrettyPrintRecordKey::from(&paid_key); - - // first we wait until our own network store can return the record - // otherwise it may not be fully written yet - let mut retry_count = 0; - debug!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); - loop { - let record = match network.get_local_record(&paid_key).await { - Ok(record) => record, - Err(err) => { - error!( - "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" - ); - None - } - }; - - if record.is_some() { - break; - } - - if retry_count > 10 { - error!( - "Could not get record from store for replication: {pretty_key:?} after 10 retries" - ); - return; - } - - retry_count += 1; - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - - debug!("Start replication of fresh record {pretty_key:?} from store"); - - // Already contains self_peer_id - let mut closest_k_peers = match network.get_closest_k_value_local_peers().await { - Ok(peers) => peers, - Err(err) => { - error!("Replicating fresh record {pretty_key:?} get_closest_local_peers errored: {err:?}"); - return; - } - }; - - // remove ourself from these calculations - closest_k_peers.retain(|peer_id| peer_id != &network.peer_id()); - - let data_addr = NetworkAddress::from_record_key(&paid_key); - - let sorted_based_on_addr = match sort_peers_by_address_and_limit( - &closest_k_peers, - &data_addr, - REPLICATION_PEERS_COUNT, - ) { - Ok(result) => result, - Err(err) => { - error!( - "When replicating fresh record {pretty_key:?}, having error when sort {err:?}" - ); - return; - } - }; - - let our_peer_id = network.peer_id(); - let our_address = NetworkAddress::from_peer(our_peer_id); - let keys = vec![(data_addr.clone(), record_type.clone())]; - - for peer_id in sorted_based_on_addr { - debug!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); - let request = Request::Cmd(Cmd::Replicate { - holder: our_address.clone(), - keys: keys.clone(), - }); - - network.send_req_ignore_reply(request, *peer_id); - } - debug!( - "Completed replicate fresh record {pretty_key:?} on store, in {:?}", - start.elapsed() - ); + network + .replicate_valid_fresh_record(paid_key, record_type) + .await; }); } } diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 8d06a87187..21ba72d619 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -13,18 +13,19 @@ // use common::client::{get_client_and_funded_wallet, get_wallet}; // use eyre::{bail, Result}; // use itertools::Itertools; -// use sn_transfers::{ -// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, -// SpendReason, WalletError, GENESIS_CASHNOTE, -// }; // use sn_logging::LogBuilder; // use sn_networking::NetworkError; +// use sn_transfers::{ +// get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, +// WalletError, GENESIS_CASHNOTE, +// }; // use std::time::Duration; // use tracing::*; // #[tokio::test] // async fn cash_note_transfer_double_spend_fail() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); +// let _log_guards = +// LogBuilder::init_single_threaded_tokio_test("cash_note_transfer_double_spend_fail", true); // // create 1 wallet add money from faucet // let first_wallet_dir = TempDir::new()?; @@ -40,7 +41,7 @@ // assert_eq!(third_wallet.balance(), NanoTokens::zero()); // // manually forge two transfers of the same source -// let amount = first_wallet_balance / 3; +// let amount = NanoTokens::from(first_wallet_balance / 3); // let to1 = first_wallet.address(); // let to2 = second_wallet.address(); // let to3 = third_wallet.address(); @@ -70,31 +71,50 @@ // )?; // // send both transfers to the network -// // upload won't error out, only error out during verification. + // info!("Sending both transfers to the network..."); -// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; -// assert!(res.is_ok()); -// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; -// assert!(res.is_ok()); +// // These may error (but may not depending on network speed) +// // so we're not going to rely on it here. +// let _ = client.send_spends(transfer_to_2.spends.iter(), true).await; -// // we wait 5s to ensure that the double spend attempt is detected and accumulated -// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); -// tokio::time::sleep(Duration::from_secs(10)).await; +// let _ = client.send_spends(transfer_to_3.spends.iter(), true).await; + +// // check the CashNotes, it should fail +// info!("Verifying the transfers from first wallet..."); // let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// // check the CashNotes, it should fail -// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; -// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; -// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); +// let mut should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let mut should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; + +// for i in 0..5 { +// if should_err1.is_err() && should_err2.is_err() { +// break; +// } + +// tokio::time::sleep(Duration::from_secs(1)).await; +// info!("Retrying verification.{i}... for should_err1+2"); +// println!("Retrying verification{i} ... for should_err1+2"); +// should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// } + +// info!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); +// println!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); // assert!(should_err1.is_err() && should_err2.is_err()); -// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); -// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); + +// assert_eq!( +// format!("{should_err1:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err1:?}" +// ); + +// assert_eq!( +// format!("{should_err2:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpend error, was: {should_err2:?}" +// ); // Ok(()) // } @@ -168,7 +188,7 @@ // )?; // // send the transfer to the network which should reject it -// let res = client.send_spends(transfer2.spends.iter(), false).await; +// let res = client.send_spends(transfer2.spends.iter(), true).await; // std::mem::drop(exclusive_access); // assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); @@ -184,8 +204,8 @@ // let wallet_dir_1 = TempDir::new()?; // let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; -// let balance_1 = wallet_1.balance(); -// let amount = balance_1 / 2; +// let balance_1 = wallet_1.balance().as_nano(); +// let amount = NanoTokens::from(balance_1 / 2); // let to1 = wallet_1.address(); // // Send from 1 -> 2 @@ -262,14 +282,18 @@ // reason.clone(), // wallet_1.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_3.spends.iter(), false) -// .await?; +// // ignore response in case it errors out early, we verify below +// let _res = client.send_spends(transfer_to_3.spends.iter(), true).await; // info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_3[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned + // info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); -// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + +// let res = client.verify_cashnote(&cash_notes_for_2[0]).await; +// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned // // The old spend has been poisoned, but spends from 22 -> 222 should still work // let wallet_dir_222 = TempDir::new()?; @@ -300,16 +324,16 @@ // client.verify_cashnote(&cash_notes_for_222[0]).await?; // // finally assert that we have a double spend attempt error here -// // we wait 1s to ensure that the double spend attempt is detected and accumulated +// // we wait to ensure that the double spend attempt is detected and accumulated // tokio::time::sleep(Duration::from_secs(5)).await; // match client.verify_cashnote(&cash_notes_for_2[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -317,10 +341,10 @@ // match client.verify_cashnote(&cash_notes_for_3[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert!( -// e.to_string() -// .contains("Network Error Double spend(s) attempt was detected"), -// "error should reflect double spend attempt", +// assert_eq!( +// e.to_string(), +// format!("{}", WalletError::BurntSpend), +// "error should reflect double spend attempt was: {e:?}", // ); // } // } @@ -339,7 +363,7 @@ // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; // let balance_a = wallet_a.balance().as_nano(); -// let amount = balance_a / 2; +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -428,12 +452,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_secs(10)).await; - -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); // poisoned +// assert!( +// format!("{result:?}").starts_with("Err(UnexpectedParentSpends"), +// "Should have been UnexpectedParentSpends error, was: {result:?}" +// ); // // Try to double spend from B -> Y // let wallet_dir_y = TempDir::new()?; @@ -470,32 +492,48 @@ // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from B -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // info!("Verifying the original cashnote of A -> B"); + +// // arbitrary time sleep to allow for network accumulation of double spend. +// tokio::time::sleep(Duration::from_secs(1)).await; + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; // info!("Got result while verifying the original spend from A -> B: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + +// println!("Verifying the original cashnote of B -> C"); -// info!("Verifying the original cashnote of B -> C"); // let result = client.verify_cashnote(&cash_notes_for_c[0]).await; // info!("Got result while verifying the original spend from B -> C: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); + // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }, "result should be verify error, it was {result:?}"); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend), +// "Should have been BurntSpent error, was: {result:?}" +// ); // Ok(()) // } @@ -511,8 +549,8 @@ // let wallet_dir_a = TempDir::new()?; // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; -// let balance_a = wallet_a.balance(); -// let amount = balance_a / 2; +// let balance_a = wallet_a.balance().as_nano(); +// let amount = NanoTokens::from(balance_a / 2); // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -574,7 +612,7 @@ // )?; // client -// .send_spends(transfer_to_c.spends.iter(), false) +// .send_spends(transfer_to_c.spends.iter(), true) // .await?; // info!("Verifying the transfers from B -> C wallet..."); @@ -611,9 +649,10 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client @@ -649,20 +688,23 @@ // reason.clone(), // wallet_a.key(), // )?; // reuse the old cash notes -// client -// .send_spends(transfer_to_y.spends.iter(), false) -// .await?; + +// // we actually don't care about the result here, we just want to spam the network with double spends +// let _ = client.send_spends(transfer_to_y.spends.iter(), false).await; + +// // and then we verify the double spend attempt // info!("Verifying the transfers from A -> Y wallet... It should error out."); // let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); // // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_millis(500)).await; +// tokio::time::sleep(Duration::from_millis(1500)).await; // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from A -> Y: {result:?}"); -// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { -// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); -// }); +// assert_eq!( +// format!("{result:?}"), +// format!("Err({:?})", WalletError::BurntSpend) +// ); // // the original A should still be present as one of the double spends // let res = client diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 130254d6e5..419f49aa64 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -16,13 +16,10 @@ use common::{ get_all_peer_ids, get_safenode_rpc_client, NodeRestart, }; use eyre::{eyre, Result}; -use libp2p::{ - kad::{KBucketKey, RecordKey}, - PeerId, -}; +use libp2p::{kad::RecordKey, PeerId}; use rand::{rngs::OsRng, Rng}; use sn_logging::LogBuilder; -use sn_networking::{sleep, sort_peers_by_key_and_limit}; +use sn_networking::{sleep, sort_peers_by_address_and_limit, sort_peers_by_key_and_limit}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, @@ -99,8 +96,17 @@ async fn verify_data_location() -> Result<()> { let (client, wallet) = get_client_and_funded_wallet().await; - store_chunks(&client, chunk_count, &wallet).await?; - store_registers(&client, register_count, &wallet).await?; + let paying_wallet_dir = TempDir::new()?; + + let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + + store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; + store_registers( + client.clone(), + register_count, + paying_wallet_dir.to_path_buf(), + ) + .await?; // Verify data location initially verify_location(&all_peers, &node_rpc_address).await?; @@ -212,9 +218,9 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd for (key, actual_holders_idx) in record_holders.iter() { println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_key = KBucketKey::from(key.to_vec()); + let record_address = NetworkAddress::from_record_key(key); let expected_holders = - sort_peers_by_key_and_limit(all_peers, &record_key, CLOSE_GROUP_SIZE)? + sort_peers_by_address_and_limit(all_peers, &record_address, CLOSE_GROUP_SIZE)? .into_iter() .cloned() .collect::>(); diff --git a/sn_transfers/src/wallet/error.rs b/sn_transfers/src/wallet/error.rs index 5a57b7434a..f60b718f42 100644 --- a/sn_transfers/src/wallet/error.rs +++ b/sn_transfers/src/wallet/error.rs @@ -40,9 +40,19 @@ pub enum Error { /// A general error when receiving a transfer fails #[error("Failed to receive transfer due to {0}")] CouldNotReceiveMoney(String), + /// A spend has been burnt (ie there was a DoubleSpendAttempt) + #[error("Failed to verify transfer validity in the network, a burnt SpendAttempt was found")] + BurntSpend, + /// Parents of a spend were not as expected in a provided cash note + #[error("Failed to verify transfer's parents in the network, transfer could be invalid or a parent double spent")] + UnexpectedParentSpends(crate::SpendAddress), + ///No valid unspent cashnotes found + #[error("All the redeemed CashNotes are already spent")] + AllRedeemedCashnotesSpent, /// A general error when verifying a transfer validity in the network #[error("Failed to verify transfer validity in the network {0}")] CouldNotVerifyTransfer(String), + /// Failed to fetch spend from network #[error("Failed to fetch spend from network: {0}")] FailedToGetSpend(String), From a615f8ae8ada2d3e0c05e27f8dd011d820faa5df Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 9 Aug 2024 14:42:54 +0900 Subject: [PATCH 195/255] fix(networking): reset raw spend get to be Majority --- sn_networking/src/transfers.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 4fcaf7b1b4..40c6182f94 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -25,10 +25,8 @@ impl Network { /// If we get a quorum error, we enable re-try pub async fn get_raw_spends(&self, address: SpendAddress) -> Result> { let key = NetworkAddress::from_spend_address(address).to_record_key(); - // use Quorum::One not All here, as we want to collect and return all the spends we can find - // Quorum all may be prohibitive to this end let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, + get_quorum: Quorum::Majority, retry_strategy: None, // This should not be set here. This function is used as a quick check to find the spends around the key during // validation. The returned records might possibly be double spend attempt and the record will not match From e9d77ee652c2ef8aa65cfd103fddfce6a8d08024 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Fri, 9 Aug 2024 14:46:23 +0900 Subject: [PATCH 196/255] fix(networking): return true for exceeded quorum for non sensitive data --- sn_networking/src/driver.rs | 3 + sn_networking/src/event/kad.rs | 88 +++++--- sn_networking/src/replication_fetcher.rs | 2 + sn_node/tests/storage_payments.rs | 257 ++++++++++++----------- 4 files changed, 199 insertions(+), 151 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 9bcd1a1ad9..d90c838778 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -437,6 +437,7 @@ impl NetworkBuilder { .set_max_packet_size(MAX_PACKET_SIZE) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) + // TODO: Do we want to reduce this from default still? // How many nodes _should_ store data. .set_replication_factor(REPLICATION_FACTOR); @@ -870,6 +871,8 @@ impl SwarmDriver { // TODO: Test this calculation in larger networks // We get around 5-7 peers returned here... We want to take further in larger networks + // + // This value let farthest_peer_to_check = self .get_all_local_peers_excluding_self() .len() diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index f772e35904..5707f812b7 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -18,7 +18,10 @@ use libp2p::{ }, PeerId, }; -use sn_protocol::{NetworkAddress, PrettyPrintRecordKey}; +use sn_protocol::{ + messages::{Cmd, Request}, + NetworkAddress, PrettyPrintRecordKey, +}; use std::{ collections::{hash_map::Entry, HashSet}, time::Instant, @@ -422,18 +425,27 @@ impl SwarmDriver { quorum: &Quorum, ) -> bool { warn!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); + let is_sensitive_data = matches!(quorum, Quorum::All); let required_quorum = get_quorum_value(quorum); + let met_quorum = searched_peers_list.len() >= required_quorum; + + // we only enforce range if we have sensitive data...for data spends quorum::all + if met_quorum && !is_sensitive_data { + return true; + } + // get the farthest distance between peers in the response - let mut current_distance_searched = KBucketDistance::default(); + let mut max_distance_to_data_from_responded_nodes = KBucketDistance::default(); // iterate over peers and see if the distance to the data is greater than the get_range + // Fathest peer from the data that has returned it for peer_id in searched_peers_list.iter() { let peer_address = NetworkAddress::from_peer(*peer_id); let distance_to_data = peer_address.distance(data_key_address); - if current_distance_searched < distance_to_data { - current_distance_searched = distance_to_data; + if max_distance_to_data_from_responded_nodes < distance_to_data { + max_distance_to_data_from_responded_nodes = distance_to_data; } } @@ -441,30 +453,22 @@ impl SwarmDriver { // It allows us to say "we've searched up to and including this bucket" // as opposed to the concrete distance itself (which statistically seems like we can fall outwith a range // quite easily with a small number of peers) - let exceeded_request_range = if current_distance_searched.ilog2() + let exceeded_request_range = if max_distance_to_data_from_responded_nodes.ilog2() < expected_get_range.ilog2() { - let dist = current_distance_searched.ilog2(); - let expected_dist = expected_get_range.ilog2(); + let dist = max_distance_to_data_from_responded_nodes.ilog2(); + let expected_dist = expected_get_range.ilog2(); // 253 // 122 - warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {current_distance_searched:?} is less than expcted GetRange of {expected_get_range:?}"); + warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {max_distance_to_data_from_responded_nodes:?} is less than expcted GetRange of {expected_get_range:?}"); false } else { true }; - let is_sensitive_data = matches!(quorum, Quorum::All); // We assume a finalised query has searched as far as it can in libp2p - // TODO: Do we only allow this if quorum is from known peers? - // TODO: Do we only bail early if NOT Quorum::All? (And so we need to search the full range?) - // - // we only enforce range if we have sensitive data...for data spends quorum::all - if searched_peers_list.len() >= required_quorum && !is_sensitive_data { - return true; - } - if exceeded_request_range { + if exceeded_request_range && met_quorum { warn!("RANGE: {data_key_address:?} Request satisfied as exceeded request range : {exceeded_request_range:?} and Quorum satisfied with {:?} peers exceeding quorum {required_quorum:?}", searched_peers_list.len()); return true; } @@ -500,6 +504,7 @@ impl SwarmDriver { info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); let result = if num_of_versions > 1 { + // TODO: Do we want to repopulate a split record.and under what conditions? warn!("RANGE: more than one version found!"); Err(GetRecordError::SplitRecord { result_map: result_map.clone(), @@ -509,7 +514,6 @@ impl SwarmDriver { Ok(record.clone()) } else { - // // We have not searched enough of the network range. let result = Err(GetRecordError::NotEnoughCopiesInRange { record: record.clone(), @@ -528,14 +532,48 @@ impl SwarmDriver { // let's ensure we have an updated network view self.trigger_network_discovery(); - let (sender, _receiver) = oneshot::channel(); + warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); + + let record_type = Self::get_type_from_record(record)?; - // nodes will try/fail to replicate it from us, but grab from the network thereafter - self.queue_network_swarm_cmd(NetworkSwarmCmd::PutRecord { - record: record.clone(), - sender, - quorum: cfg.get_quorum, - }); + let replicate_targets: HashSet<_> = self + .get_filtered_peers_exceeding_range_or_close_group(&data_key_address) + .iter() + .cloned() + .collect(); + + if from_peers == &replicate_targets { + warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); + } + + // set holder to someone that has the data + let holder = NetworkAddress::from_peer( + from_peers + .iter() + .next() + .cloned() + .unwrap_or(self.self_peer_id), + ); + + for peer in replicate_targets { + warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); + // Do not send to any peer that has already informed us + if from_peers.contains(&peer) { + continue; + } + + debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); + + // nodes will try/fail to trplicate it from us, but grab from the network thereafter + self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: Request::Cmd(Cmd::Replicate { + holder: holder.clone(), + keys: vec![(data_key_address.clone(), record_type.clone())], + }), + peer, + sender: None, + }); + } } result diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 0df4f1c29b..2d675cfdcc 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -503,6 +503,8 @@ mod tests { let mut replication_fetcher = ReplicationFetcher::new(peer_id, event_sender); // Set distance range + // TODO: close peers can break the distance range check here... we need a proper + // way to update this test let distance_target = NetworkAddress::from_peer(PeerId::random()); let distance_range = self_address.distance(&distance_target); replication_fetcher.set_replication_distance_range(distance_range); diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index 23fe9c53b0..d36f680ca2 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -14,7 +14,6 @@ // use libp2p::PeerId; // use rand::Rng; // use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; -// use sn_evm::{Amount, AttoTokens, PaymentQuote}; // use sn_logging::LogBuilder; // use sn_networking::{GetRecordError, NetworkError}; // use sn_protocol::{ @@ -23,6 +22,7 @@ // NetworkAddress, // }; // use sn_registers::Permissions; +// use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; // use std::collections::BTreeMap; // use tokio::time::{sleep, Duration}; // use tracing::info; @@ -80,7 +80,7 @@ // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // let subset_len = chunks.len() / 3; -// let _storage_cost = wallet_client +// let res = wallet_client // .pay_for_storage( // chunks // .clone() @@ -88,7 +88,15 @@ // .take(subset_len) // .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), // ) -// .await?; +// .await; + +// // if the payment failed, we can log that +// if let Err(error) = res { +// tracing::warn!( +// "Payment failed, (though that doesn't really break this test): {:?}", +// error +// ); +// } // // now let's request to upload all addresses, even that we've already paid for a subset of them // let verify_store = false; @@ -111,7 +119,7 @@ // let paying_wallet_dir: TempDir = TempDir::new()?; // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let wallet_original_balance = paying_wallet.balance().as_atto(); +// let wallet_original_balance = paying_wallet.balance().as_nano(); // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // // generate a random number (between 50 and 100) of random addresses @@ -135,10 +143,10 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for the subset of addresses, 1 nano per addr -// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); +// let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); // info!("Verifying new balance on paying wallet is {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs for the subset have been cached in the wallet // assert!(random_content_addrs @@ -160,13 +168,12 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for addresses we haven't previously paid for, 1 nano per addr -// let new_balance = AttoTokens::from_atto( -// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), +// let new_balance = NanoTokens::from( +// wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), // ); // println!("Verifying new balance on paying wallet is now {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// // TODO adapt to evm -// // assert_eq!(paying_wallet.balance(), new_balance); +// assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs now for all addresses have been cached in the wallet // // assert!(random_content_addrs @@ -229,18 +236,16 @@ // no_data_payments.insert( // *chunk_name, // ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), +// MainPubkey::new(bls::SecretKey::random().public_key()), +// PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), // PeerId::random().to_bytes(), // ), // ); // } -// // TODO adapt to evm -// // let _ = wallet_client -// // .mut_wallet() -// // .send_storage_payment(&no_data_payments) -// // .await?; +// let _ = wallet_client +// .mut_wallet() +// .local_send_storage_payment(&no_data_payments)?; // sleep(Duration::from_secs(5)).await; @@ -248,131 +253,131 @@ // .upload_test_bytes(content_bytes.clone(), false) // .await?; -// info!("Reading {content_addr:?} expected to fail"); -// let mut files_download = FilesDownload::new(files_api); -// assert!( -// matches!( -// files_download.download_file(content_addr, None).await, -// Err(ClientError::Network(NetworkError::GetRecordError( -// GetRecordError::RecordNotFound -// ))) -// ), -// "read bytes should fail as we didn't store them" -// ); +// // info!("Reading {content_addr:?} expected to fail"); +// // let mut files_download = FilesDownload::new(files_api); +// // assert!( +// // matches!( +// // files_download.download_file(content_addr, None).await, +// // Err(ClientError::Network(NetworkError::GetRecordError( +// // GetRecordError::RecordNotFound +// // ))) +// // ), +// // "read bytes should fail as we didn't store them" +// // ); -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// async fn storage_payment_register_creation_succeeds() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // async fn storage_payment_register_creation_succeeds() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_addr = NetworkAddress::from_register_address(address); -// info!("Paying for random Register address {net_addr:?} ..."); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_addr = NetworkAddress::from_register_address(address); +// // info!("Paying for random Register address {net_addr:?} ..."); -// let _cost = wallet_client -// .pay_for_storage(std::iter::once(net_addr)) -// .await?; +// // let _cost = wallet_client +// // .pay_for_storage(std::iter::once(net_addr)) +// // .await?; -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) -// .await?; +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// // .await?; -// println!("Newly created register has {} ops", register.read().len()); +// // println!("Newly created register has {} ops", register.read().len()); -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(register.read(), retrieved_reg.read()); +// // assert_eq!(register.read(), retrieved_reg.read()); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; +// // register.write(&random_entry)?; -// println!( -// "Register has {} ops after first write", -// register.read().len() -// ); +// // println!( +// // "Register has {} ops after first write", +// // register.read().len() +// // ); -// register.sync(&mut wallet_client, true, None).await?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// assert_eq!(retrieved_reg.read().len(), 1); +// // assert_eq!(retrieved_reg.read().len(), 1); -// for index in 1..10 { -// println!("current index is {index}"); -// let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// // for index in 1..10 { +// // println!("current index is {index}"); +// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// register.write(&random_entry)?; -// register.sync(&mut wallet_client, true, None).await?; +// // register.write(&random_entry)?; +// // register.sync(&mut wallet_client, true, None).await?; -// let retrieved_reg = client.get_register(address).await?; +// // let retrieved_reg = client.get_register(address).await?; -// println!( -// "current retrieved register entry length is {}", -// retrieved_reg.read().len() -// ); -// println!("current expected entry length is {}", register.read().len()); +// // println!( +// // "current retrieved register entry length is {}", +// // retrieved_reg.read().len() +// // ); +// // println!("current expected entry length is {}", register.read().len()); -// println!( -// "current retrieved register ops length is {}", -// retrieved_reg.ops.len() -// ); -// println!("current local cached ops length is {}", register.ops.len()); +// // println!( +// // "current retrieved register ops length is {}", +// // retrieved_reg.ops.len() +// // ); +// // println!("current local cached ops length is {}", register.ops.len()); -// assert_eq!(retrieved_reg.read().len(), register.read().len()); +// // assert_eq!(retrieved_reg.read().len(), register.read().len()); -// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// println!("Current fetched register is {:?}", retrieved_reg.register); -// println!( -// "Fetched register has update history of {}", -// retrieved_reg.register.log_update_history() -// ); +// // println!("Current fetched register is {:?}", retrieved_reg.register); +// // println!( +// // "Fetched register has update history of {}", +// // retrieved_reg.register.log_update_history() +// // ); -// std::thread::sleep(std::time::Duration::from_millis(1000)); -// } +// // std::thread::sleep(std::time::Duration::from_millis(1000)); +// // } -// Ok(()) -// } +// // Ok(()) +// // } -// #[tokio::test] -// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { -// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// // #[tokio::test] +// // #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// // async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// let paying_wallet_dir = TempDir::new()?; +// // let paying_wallet_dir = TempDir::new()?; -// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// let mut rng = rand::thread_rng(); -// let xor_name = XorName::random(&mut rng); -// let address = RegisterAddress::new(xor_name, client.signer_pk()); -// let net_address = -// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); +// // let mut rng = rand::thread_rng(); +// // let xor_name = XorName::random(&mut rng); +// // let address = RegisterAddress::new(xor_name, client.signer_pk()); +// // let net_address = +// // NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); -// let mut no_data_payments = BTreeMap::default(); -// no_data_payments.insert( -// net_address -// .as_xorname() -// .expect("RegisterAddress should convert to XorName"), -// ( -// sn_evm::utils::dummy_address(), -// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), -// vec![], -// ), -// ); +// // let mut no_data_payments = BTreeMap::default(); +// // no_data_payments.insert( +// // net_address +// // .as_xorname() +// // .expect("RegisterAddress should convert to XorName"), +// // ( +// // sn_evm::utils::dummy_address(), +// // PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// // vec![], +// // ), +// // ); // println!( // "current retrieved register entry length is {}", @@ -395,16 +400,16 @@ // // .send_storage_payment(&no_data_payments) // // .await?; -// // this should fail to store as the amount paid is not enough -// let (mut register, _cost, _royalties_fees) = client -// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) -// .await?; +// // // this should fail to store as the amount paid is not enough +// // let (mut register, _cost, _royalties_fees) = client +// // .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// // .await?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// client.get_register(address).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // client.get_register(address).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); // println!("Current fetched register is {:?}", retrieved_reg.address()); // println!( @@ -415,11 +420,11 @@ // let random_entry = rng.gen::<[u8; 32]>().to_vec(); // register.write(&random_entry)?; -// sleep(Duration::from_secs(5)).await; -// assert!(matches!( -// register.sync(&mut wallet_client, false, None).await, -// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// )); +// // sleep(Duration::from_secs(5)).await; +// // assert!(matches!( +// // register.sync(&mut wallet_client, false, None).await, +// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// // )); -// Ok(()) -// } +// // Ok(()) +// // } From 548ba9263c537d5e3531c4f7617cdc2af79aae0c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 3 Sep 2024 14:11:47 +0900 Subject: [PATCH 197/255] fix(networking): use cloest_node_buffer_zone when selecting nodes for range replication --- sn_networking/src/cmd.rs | 17 +++++++++++------ sn_networking/src/driver.rs | 2 -- sn_networking/src/event/kad.rs | 5 ++--- sn_node/src/replication.rs | 2 -- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 8ab88121ea..ef6a3185b0 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -7,6 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ + close_group_majority, driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, @@ -1031,18 +1032,22 @@ impl SwarmDriver { /// From all local peers, returns any within current get_range for a given key /// Excludes self - pub(crate) fn get_filtered_peers_exceeding_range_or_close_group( + pub(crate) fn get_filtered_peers_exceeding_range_or_closest_nodes( &mut self, target_address: &NetworkAddress, ) -> Vec { let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); - - if filtered_peers.len() >= CLOSE_GROUP_SIZE { + let cloest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); + if filtered_peers.len() >= cloest_node_buffer_zone { filtered_peers } else { - warn!("Insufficient peers within replication range. Falling back to use CLOSE_GROUP closest nodes"); + warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {cloest_node_buffer_zone:?} closest nodes"); let all_peers = self.get_all_local_peers_excluding_self(); - match sort_peers_by_address_and_limit(&all_peers, target_address, CLOSE_GROUP_SIZE) { + match sort_peers_by_address_and_limit( + &all_peers, + target_address, + cloest_node_buffer_zone, + ) { Ok(peers) => peers.iter().map(|p| **p).collect(), Err(err) => { error!("sorting peers close to {target_address:?} failed, sort error: {err:?}"); @@ -1059,7 +1064,7 @@ impl SwarmDriver { let our_address = NetworkAddress::from_peer(self.self_peer_id); let mut replicate_targets = - self.get_filtered_peers_exceeding_range_or_close_group(&our_address); + self.get_filtered_peers_exceeding_range_or_closest_nodes(&our_address); let now = Instant::now(); self.replication_targets diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index d90c838778..37a7fe60ab 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -437,7 +437,6 @@ impl NetworkBuilder { .set_max_packet_size(MAX_PACKET_SIZE) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) - // TODO: Do we want to reduce this from default still? // How many nodes _should_ store data. .set_replication_factor(REPLICATION_FACTOR); @@ -869,7 +868,6 @@ impl SwarmDriver { let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); info!("Sorted distances: {:?}", mapped); - // TODO: Test this calculation in larger networks // We get around 5-7 peers returned here... We want to take further in larger networks // // This value diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 5707f812b7..5b80a4262d 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -457,7 +457,7 @@ impl SwarmDriver { < expected_get_range.ilog2() { let dist = max_distance_to_data_from_responded_nodes.ilog2(); - let expected_dist = expected_get_range.ilog2(); // 253 // 122 + let expected_dist = expected_get_range.ilog2(); warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {max_distance_to_data_from_responded_nodes:?} is less than expcted GetRange of {expected_get_range:?}"); @@ -504,7 +504,6 @@ impl SwarmDriver { info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); let result = if num_of_versions > 1 { - // TODO: Do we want to repopulate a split record.and under what conditions? warn!("RANGE: more than one version found!"); Err(GetRecordError::SplitRecord { result_map: result_map.clone(), @@ -537,7 +536,7 @@ impl SwarmDriver { let record_type = Self::get_type_from_record(record)?; let replicate_targets: HashSet<_> = self - .get_filtered_peers_exceeding_range_or_close_group(&data_key_address) + .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) .iter() .cloned() .collect(); diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index a2e4a079cf..80ec25b157 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -88,14 +88,12 @@ impl Node { .await { Ok(record) => record, - // TODO: do we need to handle SplitRecord anywhere else? Err(error) => match error { sn_networking::NetworkError::DoubleSpendAttempt(spends) => { debug!("Failed to fetch record {pretty_key:?} from the network, double spend attempt {spends:?}"); let bytes = try_serialize_record(&spends, RecordKind::Spend)?; - // TODO: does this need merged with any local copy? Record { key, value: bytes.to_vec(), From 184d8bc842ef1fc8a86f534204000f0ac708380c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 3 Sep 2024 16:06:42 +0900 Subject: [PATCH 198/255] fix(networking): use libp2p for register retrieval merge the results after we see split record. Use Quorum::Majority at least (instead of Quorum::One) --- sn_client/src/api.rs | 54 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 0f64b2ee06..4ac1fb322f 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -21,7 +21,8 @@ use rand::{thread_rng, Rng}; use sn_networking::{ get_signed_spend_from_record, multiaddr_is_global, target_arch::{interval, spawn, timeout, Instant}, - GetRecordCfg, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, VerificationKind, + GetRecordCfg, GetRecordError, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, + VerificationKind, }; use sn_protocol::{ error::Error as ProtocolError, @@ -410,18 +411,59 @@ impl Client { /// let xorname = XorName::random(&mut rng); /// let address = RegisterAddress::new(xorname, owner); /// // Get a signed register - /// let signed_register = client.get_signed_register_from_network(address); + /// let signed_register = client.get_signed_register_from_network(address, true); /// # Ok(()) /// # } /// ``` pub async fn get_signed_register_from_network( &self, address: RegisterAddress, + is_verifying: bool, ) -> Result { let key = NetworkAddress::from_register_address(address).to_record_key(); + let get_quorum = if is_verifying { + Quorum::All + } else { + Quorum::Majority + }; + let retry_strategy = if is_verifying { + Some(RetryStrategy::Balanced) + } else { + Some(RetryStrategy::Quick) + }; + let get_cfg = GetRecordCfg { + get_quorum, + retry_strategy, + target_record: None, + expected_holders: Default::default(), + }; + + let maybe_record = self.network.get_record_from_network(key, &get_cfg).await; + let record = match &maybe_record { + Ok(r) => r, + Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { + let mut results_to_merge = HashMap::default(); + + for (address, (r, _peers)) in result_map { + results_to_merge.insert(*address, r.clone()); + } + + return merge_register_records(address, &results_to_merge); + } + Err(e) => { + warn!("Failed to get record at {address:?} from the network: {e:?}"); + return Err(ProtocolError::RegisterNotFound(Box::new(address)).into()); + } + }; - let maybe_records = self.network.get_register_record_from_network(key).await?; - merge_register_records(address, &maybe_records) + debug!( + "Got record from the network, {:?}", + PrettyPrintRecordKey::from(&record.key) + ); + + let register = get_register_from_record(record) + .map_err(|_| ProtocolError::RegisterNotFound(Box::new(address)))?; + Ok(register) } /// Retrieve a Register from the network. @@ -747,7 +789,7 @@ impl Client { /// ``` pub async fn verify_register_stored(&self, address: RegisterAddress) -> Result { info!("Verifying register: {address:?}"); - self.get_signed_register_from_network(address).await + self.get_signed_register_from_network(address, true).await } /// Quickly checks if a `Register` is stored by expected nodes on the network. @@ -781,7 +823,7 @@ impl Client { address: RegisterAddress, ) -> Result { info!("Quickly checking for existing register : {address:?}"); - self.get_signed_register_from_network(address).await + self.get_signed_register_from_network(address, false).await } /// Send a `SpendCashNote` request to the network. Protected method. From f7ceb5d1e73e55e331d1c01c22a51d464306f8d1 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 4 Sep 2024 09:56:00 +0900 Subject: [PATCH 199/255] chore(protocol): move get_type_from_record to protocol --- sn_client/src/wallet.rs | 4 ++-- sn_networking/src/cmd.rs | 32 ++----------------------------- sn_networking/src/error.rs | 3 --- sn_networking/src/event/kad.rs | 3 ++- sn_protocol/src/error.rs | 3 +++ sn_protocol/src/lib.rs | 2 ++ sn_protocol/src/storage.rs | 5 ++++- sn_protocol/src/storage/header.rs | 27 ++++++++++++++++++++++++++ 8 files changed, 42 insertions(+), 37 deletions(-) diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs index c2078b824e..9a32382142 100644 --- a/sn_client/src/wallet.rs +++ b/sn_client/src/wallet.rs @@ -1061,7 +1061,7 @@ impl Client { for spend in &cash_note.parent_spends { let address = SpendAddress::from_unique_pubkey(spend.unique_pubkey()); - warn!( + info!( "Getting parent spend for cn {address:?} pubkey {:?} from network at {address:?}", spend.unique_pubkey() ); @@ -1074,7 +1074,7 @@ impl Client { Ok(spend) => Ok(spend), Err(error) => match error { Error::Network(sn_networking::NetworkError::DoubleSpendAttempt(spends)) => { - warn!("DoubleSpentAttempt found with {spends:?}"); + warn!("BurntSpend found with {spends:?}"); Err(WalletError::BurntSpend) } err => Err(WalletError::CouldNotVerifyTransfer(format!("{err:?}"))), diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index ef6a3185b0..cccb679e0e 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -25,7 +25,7 @@ use libp2p::{ use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, - storage::{RecordHeader, RecordKind, RecordType}, + storage::{get_type_from_record, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use std::{ @@ -34,7 +34,6 @@ use std::{ time::Duration, }; use tokio::sync::oneshot; -use xor_name::XorName; use crate::target_arch::Instant; @@ -555,33 +554,6 @@ impl SwarmDriver { Ok(()) } - /// Return the RecordType - pub(crate) fn get_type_from_record(record: &Record) -> Result { - let key = record.key.clone(); - let record_key = PrettyPrintRecordKey::from(&key); - - match RecordHeader::from_record(record) { - Ok(record_header) => match record_header.kind { - RecordKind::Chunk => Ok(RecordType::Chunk), - RecordKind::Scratchpad => Ok(RecordType::Scratchpad), - RecordKind::Spend | RecordKind::Register => { - let content_hash = XorName::from_content(&record.value); - Ok(RecordType::NonChunk(content_hash)) - } - RecordKind::ChunkWithPayment - | RecordKind::RegisterWithPayment - | RecordKind::ScratchpadWithPayment => { - error!("Record {record_key:?} with payment shall not be stored locally."); - Err(NetworkError::InCorrectRecordHeader) - } - }, - Err(err) => { - error!("For record {record_key:?}, failed to parse record_header {err:?}"); - Err(NetworkError::InCorrectRecordHeader) - } - } - } - pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; @@ -665,7 +637,7 @@ impl SwarmDriver { let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); - let record_type = Self::get_type_from_record(&record)?; + let record_type = get_type_from_record(&record)?; let result = self .swarm diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 09bf26de1f..99bf1fbe92 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -125,9 +125,6 @@ pub enum NetworkError { #[error("The RecordKind obtained from the Record did not match with the expected kind: {0}")] RecordKindMismatch(RecordKind), - #[error("Record header is incorrect")] - InCorrectRecordHeader, - // ---------- Transfer Errors #[error("Failed to get spend: {0}")] FailedToGetSpend(String), diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 5b80a4262d..ed0cc37045 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -20,6 +20,7 @@ use libp2p::{ }; use sn_protocol::{ messages::{Cmd, Request}, + storage::get_type_from_record, NetworkAddress, PrettyPrintRecordKey, }; use std::{ @@ -533,7 +534,7 @@ impl SwarmDriver { warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); - let record_type = Self::get_type_from_record(record)?; + let record_type = get_type_from_record(record)?; let replicate_targets: HashSet<_> = self .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index f73c356b53..8462ff85f3 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -78,4 +78,7 @@ pub enum Error { // The record already exists at this node #[error("The record already exists, so do not charge for it: {0:?}")] RecordExists(PrettyPrintRecordKey<'static>), + + #[error("Record header is incorrect")] + IncorrectRecordHeader, } diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 4d3b92628d..7a33b58cd5 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -31,6 +31,8 @@ pub mod safenode_proto { pub use error::Error; use storage::ScratchpadAddress; +// pub use self::storage::get_type_from_record; + use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; use bytes::Bytes; use libp2p::{ diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 2935e43fce..3a6b4ba6a8 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -18,7 +18,10 @@ use std::{str::FromStr, time::Duration}; pub use self::{ address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, - header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, + header::{ + get_type_from_record, try_deserialize_record, try_serialize_record, RecordHeader, + RecordKind, RecordType, + }, scratchpad::Scratchpad, }; diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index 96a4515526..af43c21256 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -84,6 +84,33 @@ impl Display for RecordKind { } } +/// Return the RecordType +pub fn get_type_from_record(record: &Record) -> Result { + let key = record.key.clone(); + let record_key = PrettyPrintRecordKey::from(&key); + + match RecordHeader::from_record(record) { + Ok(record_header) => match record_header.kind { + RecordKind::Chunk => Ok(RecordType::Chunk), + RecordKind::Scratchpad => Ok(RecordType::Scratchpad), + RecordKind::Spend | RecordKind::Register => { + let content_hash = XorName::from_content(&record.value); + Ok(RecordType::NonChunk(content_hash)) + } + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { + error!("Record {record_key:?} with payment shall not be stored locally."); + Err(Error::IncorrectRecordHeader) + } + }, + Err(err) => { + error!("For record {record_key:?}, failed to parse record_header {err:?}"); + Err(Error::IncorrectRecordHeader) + } + } +} + impl RecordHeader { pub const SIZE: usize = 2; From 39de95819f33e59d0a2f30d1d970678a02f65013 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 4 Sep 2024 09:57:42 +0900 Subject: [PATCH 200/255] fix(networking): remove self from get_peers_excluding_self --- sn_networking/src/driver.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 37a7fe60ab..25b90f521d 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -934,7 +934,6 @@ impl SwarmDriver { } } } - all_peers.push(self.self_peer_id); all_peers } From 636b84f1e5f7d91426c0300fd635be0cba320758 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 4 Sep 2024 10:00:47 +0900 Subject: [PATCH 201/255] chore: small refactors and doc tweaks --- sn_networking/src/event/kad.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index ed0cc37045..0ed478346f 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -417,7 +417,7 @@ impl SwarmDriver { /// Checks passed peers from a request and checks they are sufficiently spaced to /// ensure we have searched enough of the network range as determined by our `get_range` /// - /// We expect any conflicting records to have been reported prior ti this check, + /// We expect any conflicting records to have been reported prior to this check, /// so we assume we're returning unique records only. fn have_we_have_searched_thoroughly_for_quorum( expected_get_range: KBucketDistance, @@ -425,7 +425,7 @@ impl SwarmDriver { data_key_address: &NetworkAddress, quorum: &Quorum, ) -> bool { - warn!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); + info!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); let is_sensitive_data = matches!(quorum, Quorum::All); let required_quorum = get_quorum_value(quorum); @@ -441,7 +441,6 @@ impl SwarmDriver { let mut max_distance_to_data_from_responded_nodes = KBucketDistance::default(); // iterate over peers and see if the distance to the data is greater than the get_range - // Fathest peer from the data that has returned it for peer_id in searched_peers_list.iter() { let peer_address = NetworkAddress::from_peer(*peer_id); let distance_to_data = peer_address.distance(data_key_address); From 4a9fa8ff6ae45cf6140f2406e27b35437accc0fe Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 4 Sep 2024 20:32:32 +0900 Subject: [PATCH 202/255] fix(networking): remove custom REPLICATION_FACTOR this indirectly affects how many close nodes returned and could stymie the GetRange effectiveness --- sn_networking/src/driver.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 25b90f521d..5b175edae1 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -63,7 +63,6 @@ use std::{ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, fmt::Debug, net::SocketAddr, - num::NonZeroUsize, path::PathBuf, }; use tokio::sync::{mpsc, oneshot}; @@ -129,13 +128,6 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { - Some(v) => v, - None => panic!("CLOSE_GROUP_SIZE should not be zero"), -}; - /// The various settings to apply to when fetching a record from network #[derive(Clone)] pub struct GetRecordCfg { @@ -355,8 +347,6 @@ impl NetworkBuilder { .set_publication_interval(None) // 1mb packet size .set_max_packet_size(MAX_PACKET_SIZE) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR) .set_query_timeout(KAD_QUERY_TIMEOUT_S) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) @@ -436,9 +426,7 @@ impl NetworkBuilder { .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) .set_max_packet_size(MAX_PACKET_SIZE) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. - .disjoint_query_paths(true) - // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR); + .disjoint_query_paths(true); let (network, net_event_recv, driver) = self.build( kad_cfg, From 834923f2a5f51035d06a835c20d63e8d77c50500 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 5 Sep 2024 09:03:55 +0900 Subject: [PATCH 203/255] ci: normalize send amount for tests --- .github/workflows/merge.yml | 164 ++++++++++++++++++------------------ sn_client/src/api.rs | 1 + sn_networking/src/driver.rs | 3 - 3 files changed, 83 insertions(+), 85 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index d37b04a679..a48e72a9cb 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -864,15 +864,15 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet first time - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - # echo "----------" - # cat first.txt - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Move faucet log to the working folder # run: | @@ -898,44 +898,44 @@ jobs: # continue-on-error: true # if: always() - # - name: Create and fund a wallet second time - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - # echo "----------" - # cat second.txt - # if grep "genesis is already spent" second.txt; then - # echo "Duplicated faucet rejected" - # else - # echo "Duplicated faucet not rejected!" - # exit 1 - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Create and fund a wallet with different keypair - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - # echo "Faucet with different genesis key not rejected!" - # exit 1 - # else - # echo "Faucet with different genesis key rejected" - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet second time + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt + # echo "----------" + # cat second.txt + # if grep "genesis is already spent" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Build faucet binary again without the gifting feature # run: cargo build --release --bin faucet @@ -1063,14 +1063,14 @@ jobs: # echo "PWD subdirs:" # du -sh */ - # - name: Create and fund a wallet to pay for files storage - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick @@ -1196,14 +1196,14 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet to pay for files storage - # run: | - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload first file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick @@ -1272,25 +1272,25 @@ jobs: # run: sleep 300 # timeout-minutes: 6 - # # Start a different client to avoid local wallet slow down with more payments handled. - # - name: Start a different client - # run: | - # pwd - # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - # ls -l $SAFE_DATA_PATH - # ls -l $SAFE_DATA_PATH/client_first - # mkdir $SAFE_DATA_PATH/client - # ls -l $SAFE_DATA_PATH - # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - # ls -l $CLIENT_DATA_PATH - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # SAFE_DATA_PATH: /home/runner/.local/share/safe - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 25 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 25 # - name: Use second client to upload third file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index 4ac1fb322f..f955b2d6b0 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -863,6 +863,7 @@ impl Client { .iter() .cloned() .collect(); + info!("Expecting holders: {expected_holders:?}"); (Some(record.clone()), expected_holders) } else { (None, Default::default()) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 5b175edae1..b1ad096969 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -856,9 +856,6 @@ impl SwarmDriver { let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); info!("Sorted distances: {:?}", mapped); - // We get around 5-7 peers returned here... We want to take further in larger networks - // - // This value let farthest_peer_to_check = self .get_all_local_peers_excluding_self() .len() From 05d97b889a03d5920bc4edaef2739a132abd79c9 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 5 Sep 2024 10:02:54 +0900 Subject: [PATCH 204/255] ci: fixes --- .github/workflows/merge.yml | 16 ++++++---------- sn_node/tests/verify_routing_table.rs | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index a48e72a9cb..0f2e616bb7 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -743,10 +743,6 @@ jobs: echo "EVM_NETWORK has been set to $EVM_NETWORK" fi - - name: Wait from network to stabilise - shell: bash - run: sleep 30 - - name: Verify the routing tables of the nodes run: cargo test --release -p sn_node --features="local" --test verify_routing_table -- --nocapture env: @@ -867,7 +863,7 @@ jobs: # - name: Create and fund a wallet first time # run: | # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt # echo "----------" # cat first.txt # env: @@ -906,7 +902,7 @@ jobs: # rm -rf /home/runner/.local/share/safe/test_genesis # rm -rf /home/runner/.local/share/safe/client # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt # echo "----------" # cat second.txt # if grep "genesis is already spent" second.txt; then @@ -927,7 +923,7 @@ jobs: # rm -rf /home/runner/.local/share/safe/test_genesis # rm -rf /home/runner/.local/share/safe/client # ~/safe --log-output-dest=data-dir wallet create --no-password - # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then # echo "Faucet with different genesis key not rejected!" # exit 1 # else @@ -1066,7 +1062,7 @@ jobs: # - name: Create and fund a wallet to pay for files storage # run: | # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 1000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex # env: # SN_LOG: "all" @@ -1199,7 +1195,7 @@ jobs: # - name: Create and fund a wallet to pay for files storage # run: | # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex # env: # SN_LOG: "all" @@ -1284,7 +1280,7 @@ jobs: # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs # ls -l $CLIENT_DATA_PATH # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex # env: # SN_LOG: "all" diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index da19270b69..85dc2e3a09 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -26,7 +26,7 @@ use tracing::{error, info, trace}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(60); #[tokio::test(flavor = "multi_thread")] async fn verify_routing_table() -> Result<()> { From b85d51b815ef49d496143b5dc0ec3f73231f9247 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 5 Sep 2024 12:39:40 +0900 Subject: [PATCH 205/255] fix(client): retry faucet loading if genesis is spent --- sn_client/src/error.rs | 2 ++ sn_client/src/faucet.rs | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs index 618632303c..1bed15f7a4 100644 --- a/sn_client/src/error.rs +++ b/sn_client/src/error.rs @@ -24,6 +24,8 @@ use xor_name::XorName; pub enum Error { #[error("Genesis disbursement failed")] GenesisDisbursement, + #[error("Faucet disbursement failed")] + FaucetDisbursement, #[error("Genesis error {0}")] GenesisError(#[from] sn_transfers::GenesisError), diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs index 3f97e386f5..695b415d47 100644 --- a/sn_client/src/faucet.rs +++ b/sn_client/src/faucet.rs @@ -38,7 +38,13 @@ pub async fn fund_faucet_from_genesis_wallet( if client.is_genesis_spend_present().await { warn!("Faucet can't get funded from genesis, genesis is already spent!"); println!("Faucet can't get funded from genesis, genesis is already spent!"); - panic!("Faucet can't get funded from genesis, genesis is already spent!"); + + // try loading again + faucet_wallet.try_load_cash_notes()?; + + if faucet_wallet.balance().is_zero() { + return Err(Error::FaucetDisbursement); + } } println!("Initiating genesis..."); From 862cf9f39e87615a63af1064a9c3549fd6d4d73e Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 5 Sep 2024 14:24:51 +0900 Subject: [PATCH 206/255] fix(faucet): loop if initial faucet balance 0 but genesis spent --- sn_client/src/faucet.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs index 695b415d47..b3ccaace78 100644 --- a/sn_client/src/faucet.rs +++ b/sn_client/src/faucet.rs @@ -38,13 +38,21 @@ pub async fn fund_faucet_from_genesis_wallet( if client.is_genesis_spend_present().await { warn!("Faucet can't get funded from genesis, genesis is already spent!"); println!("Faucet can't get funded from genesis, genesis is already spent!"); - - // try loading again - faucet_wallet.try_load_cash_notes()?; - - if faucet_wallet.balance().is_zero() { - return Err(Error::FaucetDisbursement); + // Try loading cash notes up to 100 times, waiting 1 second between attempts + for attempt in 1..=100 { + println!("Attempt {attempt} to load cash notes"); + debug!("Attempt {attempt} to load cash notes"); + faucet_wallet.try_load_cash_notes()?; + if !faucet_wallet.balance().is_zero() { + println!("Successfully loaded cash notes on attempt {attempt}"); + debug!("Successfully loaded cash notes on attempt {attempt}"); + return Ok(()); + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; } + + // If we've tried 100 times and still have zero balance, return an error + return Err(Error::FaucetDisbursement); } println!("Initiating genesis..."); From 00b06ec9950e837f368fdb503d47268672e45cc2 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Mon, 9 Sep 2024 16:33:48 +0900 Subject: [PATCH 207/255] fix(networking): ensure we update getrange when we can ie, even if query times out if we have peers, we update seems like we timeout more due to high K_VALUE --- sn_networking/src/bootstrap.rs | 2 +- sn_networking/src/event/kad.rs | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index 5c28c9a4d2..ec6c019a88 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -13,7 +13,7 @@ use crate::target_arch::Instant; /// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the /// routing table. -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(30); +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(15); impl SwarmDriver { /// This functions triggers network discovery based on when the last peer was added to the RT and the number of diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 0ed478346f..964376b883 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -92,9 +92,8 @@ impl SwarmDriver { ref step, } => { event_string = "kad_event::get_closest_peers_err"; - error!("GetClosest Query task {id:?} errored with {err:?}, {stats:?} - {step:?}"); - let (_address, get_closest_type, mut current_closest) = + let (address, get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { debug!( "Can't locate query task {id:?}, it has likely been completed already." @@ -111,13 +110,23 @@ impl SwarmDriver { match err { GetClosestPeersError::Timeout { ref peers, .. } => { current_closest.extend(peers.iter().map(|i| i.peer_id)); + if current_closest.len() < CLOSE_GROUP_SIZE { + error!( + "GetClosest Query task {id:?} errored, not enough found. {err:?}, {stats:?} - {step:?}" + ); + } } } match get_closest_type { - PendingGetClosestType::NetworkDiscovery => self - .network_discovery - .handle_get_closest_query(¤t_closest), + PendingGetClosestType::NetworkDiscovery => { + // do not set this via function calls, as that could potentially + // skew the results in favour of heavily queried (and manipulated) + // areas of the network + self.set_request_range(address, ¤t_closest); + self.network_discovery + .handle_get_closest_query(¤t_closest); + } PendingGetClosestType::FunctionCall(sender) => { sender .send(current_closest) From 0e681604ea4cdd2c44993728dcd3cd316fdf581e Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 10 Sep 2024 12:40:16 +0900 Subject: [PATCH 208/255] fix(networking): redial peers on intermittent errors previously we just dropped any attempt at comms there --- sn_client/src/api.rs | 1 + sn_networking/Cargo.toml | 2 +- sn_networking/src/event/swarm.rs | 49 ++++++++++++++++++-------------- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index f955b2d6b0..5ed63210a6 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -436,6 +436,7 @@ impl Client { retry_strategy, target_record: None, expected_holders: Default::default(), + is_register: true, }; let maybe_record = self.network.get_record_from_network(key, &get_cfg).await; diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index ce8c88d950..07c23c542d 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -54,7 +54,7 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.15" } +sn_build_info = { path = "../sn_build_info", version = "0.1.15" } sn_protocol = { path = "../sn_protocol", version = "0.17.11" } sn_transfers = { path = "../sn_transfers", version = "0.19.3" } sn_registers = { path = "../sn_registers", version = "0.3.21" } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 3d5dd62ab6..0e5b35d48d 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, - target_arch::Instant, NetworkEvent, Result, SwarmDriver, + cmd::NetworkSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, + relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; #[cfg(feature = "local")] use libp2p::mdns; @@ -25,7 +25,7 @@ use libp2p::{ }; use sn_protocol::version::{IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR}; use std::collections::HashSet; -use tokio::time::Duration; +use tokio::{sync::oneshot, time::Duration}; impl SwarmDriver { /// Handle `SwarmEvents` @@ -392,8 +392,9 @@ impl SwarmDriver { let _ = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); + let mut failed_peer_addresses = vec![]; // we need to decide if this was a critical error and the peer should be removed from the routing table - let (should_clean_peer, should_track_issue) = match error { + let should_clean_peer = match error { DialError::Transport(errors) => { // as it's an outgoing error, if it's transport based we can assume it is _our_ fault // @@ -401,10 +402,14 @@ impl SwarmDriver { // so we default to it not being a real issue // unless there are _specific_ errors (connection refused eg) error!("Dial errors len : {:?}", errors.len()); - let mut remove_peer_track_peer_issue = (false, false); - for (_addr, err) in errors { + let mut remove_peer_track_peer_issue = false; + for (addr, err) in errors { error!("OutgoingTransport error : {err:?}"); + if !failed_peer_addresses.contains(&addr) { + failed_peer_addresses.push(addr) + } + match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); @@ -414,7 +419,7 @@ impl SwarmDriver { println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); } // if we can't dial a peer on a given address, we should remove it from the routing table - remove_peer_track_peer_issue = (false, true) + remove_peer_track_peer_issue = false } TransportError::Other(err) => { let problematic_errors = @@ -431,7 +436,7 @@ impl SwarmDriver { && self.peers_in_rt < self.bootstrap_peers.len() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); - remove_peer_track_peer_issue = (false, false); + remove_peer_track_peer_issue = false; } else { // It is really difficult to match this error, due to being eg: // Custom { kind: Other, error: Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) } @@ -442,13 +447,13 @@ impl SwarmDriver { .any(|err| error_msg.contains(err)) { warn!("Problematic error encountered: {error_msg}"); - remove_peer_track_peer_issue = (true, true); + remove_peer_track_peer_issue = true; } else if intermittent_errors .iter() .any(|err| error_msg.contains(err)) { warn!("Intermittent error encountered: {error_msg}"); - remove_peer_track_peer_issue = (false, true); + remove_peer_track_peer_issue = false; } } } @@ -460,17 +465,17 @@ impl SwarmDriver { // We provided no address, and while we can't really blame the peer // we also can't connect, so we opt to cleanup... warn!("OutgoingConnectionError: No address provided"); - (true, false) + true } DialError::Aborted => { // not their fault warn!("OutgoingConnectionError: Aborted"); - (false, false) + false } DialError::DialPeerConditionFalse(_) => { // we could not dial due to an internal condition, so not their issue warn!("OutgoingConnectionError: DialPeerConditionFalse"); - (false, false) + false } DialError::LocalPeerId { endpoint, .. } => { // This is actually _us_ So we should remove this from the RT @@ -478,19 +483,19 @@ impl SwarmDriver { "OutgoingConnectionError: LocalPeerId: {}", endpoint_str(&endpoint) ); - (true, true) + true } DialError::WrongPeerId { obtained, endpoint } => { // The peer id we attempted to dial was not the one we expected // cleanup error!("OutgoingConnectionError: WrongPeerId: obtained: {obtained:?}, endpoint: {endpoint:?}"); - (true, true) + true } DialError::Denied { cause } => { // The peer denied our connection // cleanup error!("OutgoingConnectionError: Denied: {cause:?}"); - (true, true) + true } }; @@ -507,13 +512,13 @@ impl SwarmDriver { } } - if should_track_issue { - warn!("Tracking issue of {failed_peer_id:?}."); + if !should_clean_peer { + // lets try and redial. + for addr in failed_peer_addresses { + let (sender, _recv) = oneshot::channel(); - self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { - peer_id: failed_peer_id, - issue: crate::NodeIssue::ConnectionIssue, - })?; + self.queue_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); + } } } SwarmEvent::IncomingConnectionError { From 182a88dc0f8a1c7dfd04f36e673d49acb7f3c56d Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 10 Sep 2024 12:47:36 +0900 Subject: [PATCH 209/255] fix(networking): clarify faucet error and update test --- .github/workflows/merge.yml | 2 +- sn_client/src/error.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 0f2e616bb7..82a3dc4d15 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -905,7 +905,7 @@ jobs: # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt # echo "----------" # cat second.txt - # if grep "genesis is already spent" second.txt; then + # if grep "Faucet disbursement has already occured" second.txt; then # echo "Duplicated faucet rejected" # else # echo "Duplicated faucet not rejected!" diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs index 1bed15f7a4..d19ce4d58d 100644 --- a/sn_client/src/error.rs +++ b/sn_client/src/error.rs @@ -24,7 +24,7 @@ use xor_name::XorName; pub enum Error { #[error("Genesis disbursement failed")] GenesisDisbursement, - #[error("Faucet disbursement failed")] + #[error("Faucet disbursement has already occured")] FaucetDisbursement, #[error("Genesis error {0}")] From 8562d4c0c165644bc376a7d9af1107667350b6e2 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 10 Sep 2024 12:50:21 +0900 Subject: [PATCH 210/255] ci: clarify duplicate genesis tests in merge.yml --- .github/workflows/merge.yml | 36 ++++++++++++++++++++++++++++-------- second.txt | 1 + 2 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 second.txt diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 82a3dc4d15..e49b773bb0 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -894,22 +894,42 @@ jobs: # continue-on-error: true # if: always() - # - name: Create and fund a wallet second time + # - name: Cleanup prior faucet and cashnotes # run: | # ls -l /home/runner/.local/share # ls -l /home/runner/.local/share/safe # rm -rf /home/runner/.local/share/safe/test_faucet # rm -rf /home/runner/.local/share/safe/test_genesis # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt - # echo "----------" - # cat second.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create a new wallet + # run: ~/safe --log-output-dest=data-dir wallet create --no-password + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Attempt second faucet genesis disbursement + # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: cat second.txt + # run: cat second.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Verify a second disbursement is rejected + # run: | # if grep "Faucet disbursement has already occured" second.txt; then - # echo "Duplicated faucet rejected" + # echo "Duplicated faucet rejected" # else - # echo "Duplicated faucet not rejected!" - # exit 1 + # echo "Duplicated faucet not rejected!" + # exit 1 # fi # env: # SN_LOG: "all" diff --git a/second.txt b/second.txt new file mode 100644 index 0000000000..e212976f10 --- /dev/null +++ b/second.txt @@ -0,0 +1 @@ +This is an error message From 1e04bd752f3936f99ad3a8426f1c6cec933a6254 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 10 Sep 2024 16:10:26 +0900 Subject: [PATCH 211/255] fix(networking): local network dont force cleanup bootstrap We were cleaning up bootstrap even if not full just because of being local --- .github/workflows/merge.yml | 2 +- sn_networking/src/event/swarm.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index e49b773bb0..e9042f903f 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -912,7 +912,7 @@ jobs: # timeout-minutes: 5 # - name: Attempt second faucet genesis disbursement - # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 + # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 || true # env: # SN_LOG: "all" # timeout-minutes: 5 diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 0e5b35d48d..2416b5681c 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -244,7 +244,7 @@ impl SwarmDriver { } // If we are not local, we care only for peers that we dialed and thus are reachable. - if self.local || has_dialed { + if !self.local && has_dialed { // A bad node cannot establish a connection with us. So we can add it to the RT directly. self.remove_bootstrap_from_full(peer_id); @@ -254,7 +254,10 @@ impl SwarmDriver { multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit)) }); } + } + if self.local || has_dialed { + // If we are not local, we care only for peers that we dialed and thus are reachable. debug!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); // Attempt to add the addresses to the routing table. From 2f927badececea5731f1571eecf4d3f883be53b1 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 11 Sep 2024 10:24:35 +0900 Subject: [PATCH 212/255] ci: split out cashnote check for clarity --- .github/workflows/merge.yml | 50 ++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index e9042f903f..5ff033fa30 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -1227,29 +1227,33 @@ jobs: # SN_LOG: "all" # timeout-minutes: 5 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Check current directories + # run: | + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # timeout-minutes: 1 + + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 From d42ca11d5acd9bb11342df185098d00afee5045a Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 11 Sep 2024 12:54:36 +0900 Subject: [PATCH 213/255] test(networking): clarify that _at least_ all in range keys should be fetched allow more to be fetched if needed due to distance variation --- sn_networking/src/replication_fetcher.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 2d675cfdcc..3c9a32737c 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -536,10 +536,9 @@ mod tests { replication_fetcher.on_going_fetches.len(), "keys to fetch and ongoing fetches should match" ); - assert_eq!( - in_range_keys, - keys_to_fetch.len() + replication_fetcher.to_be_fetched.len(), - "all keys should be in range and in the fetcher" + assert!( + keys_to_fetch.len() + replication_fetcher.to_be_fetched.len() >= in_range_keys, + "at least all keys in range should be in the fetcher" ); } } From aa3bce688f5c282591a23fd420f9911a395b5d2c Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 11 Sep 2024 15:04:39 +0900 Subject: [PATCH 214/255] ci: assert faucet funded before upload --- .github/workflows/merge.yml | 106 ++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 48 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 5ff033fa30..358c61aa19 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -1136,6 +1136,8 @@ jobs: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" # name: Replication bench with heavy upload # runs-on: ubuntu-latest + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client # steps: # - uses: actions/checkout@v4 @@ -1212,6 +1214,20 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi + # - name: Sleep 15s + # shell: bash + # run: sleep 15 + + # - name: Check faucet has been funded + # shell: bash + # run: | + # cash_note_count=$(ls -l /home/runner/.local/share/safe/test_faucet/wallet/cash_notes/ | wc -l) + # echo $cash_note_count + # if [ "$cash_note_count" -eq 0 ]; then + # echo "Error: Expected at least 1 cash note, but found $cash_note_count" + # exit 1 + # fi + # - name: Create and fund a wallet to pay for files storage # run: | # ./target/release/safe --log-output-dest=data-dir wallet create --no-password @@ -1251,8 +1267,7 @@ jobs: # echo "Got too many payment files leftover: $payment_files" # exit 1 # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 # - name: Wait for certain period @@ -1265,28 +1280,26 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 @@ -1309,7 +1322,6 @@ jobs: # env: # SN_LOG: "all" # SAFE_DATA_PATH: /home/runner/.local/share/safe - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client # timeout-minutes: 25 # - name: Use second client to upload third file @@ -1318,29 +1330,27 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # timeout-minutes: 10 # - name: Stop the local network and upload logs # if: always() From d9acdaea2789af3d65e895601dbeb69e9d65f21d Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 12 Sep 2024 09:42:40 +0900 Subject: [PATCH 215/255] chore(net): remove clone post-rebase --- sn_networking/src/driver.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index b1ad096969..e40b04f9c8 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -912,7 +912,7 @@ impl SwarmDriver { let mut all_peers: Vec = vec![]; for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { for entry in kbucket.iter() { - let id = entry.node.key.clone().into_preimage(); + let id = entry.node.key.into_preimage(); if id != our_peer_id { all_peers.push(id); From 51753331ca8ebeccebf289d12d3f450ce7daa9c8 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 12 Sep 2024 12:43:22 +0900 Subject: [PATCH 216/255] ci: wait for faucet completion in dist test --- .github/workflows/merge.yml | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 358c61aa19..cd18b5b230 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -531,15 +531,19 @@ jobs: # platform: ${{ matrix.os }} # build: true - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + # # incase the faucet is not ready yet + # - name: 30s sleep for faucet completion + # run: sleep 30 + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi # - name: execute token_distribution tests # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 From d5864f82b4e10e8a589186aaff4a9f035e279718 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 17 Sep 2024 14:53:23 +0900 Subject: [PATCH 217/255] chore(networking): small fixes comment updates --- sn_networking/src/cmd.rs | 8 ++++---- sn_networking/src/driver.rs | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index cccb679e0e..5ec9ebd827 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -1009,16 +1009,16 @@ impl SwarmDriver { target_address: &NetworkAddress, ) -> Vec { let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); - let cloest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); - if filtered_peers.len() >= cloest_node_buffer_zone { + let closest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); + if filtered_peers.len() >= closest_node_buffer_zone { filtered_peers } else { - warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {cloest_node_buffer_zone:?} closest nodes"); + warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {closest_node_buffer_zone:?} closest nodes"); let all_peers = self.get_all_local_peers_excluding_self(); match sort_peers_by_address_and_limit( &all_peers, target_address, - cloest_node_buffer_zone, + closest_node_buffer_zone, ) { Ok(peers) => peers.iter().map(|p| **p).collect(), Err(err) => { diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index e40b04f9c8..23ef1eb0c6 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -806,7 +806,7 @@ impl SwarmDriver { // logging for handling events happens inside handle_swarm_events // otherwise we're rewriting match statements etc around this anwyay if let Err(err) = self.handle_swarm_events(swarm_event) { - trace!("Issue while handling swarm event: {err}"); + warn!("Issue while handling swarm event: {err}"); } }, // thereafter we can check our intervals @@ -818,7 +818,6 @@ impl SwarmDriver { _ = set_farthest_record_interval.tick() => { if !self.is_client { let get_range = self.get_request_range(); - // set any new distance to farthest record in the store self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(get_range); // the distance range within the replication_fetcher shall be in sync as well From 705106f20ed64bd353bd39019cd3c5f564a73322 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 18 Sep 2024 10:30:55 +0900 Subject: [PATCH 218/255] chore: post-rebase update get register call --- sn_protocol/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/sn_protocol/src/lib.rs b/sn_protocol/src/lib.rs index 7a33b58cd5..4d3b92628d 100644 --- a/sn_protocol/src/lib.rs +++ b/sn_protocol/src/lib.rs @@ -31,8 +31,6 @@ pub mod safenode_proto { pub use error::Error; use storage::ScratchpadAddress; -// pub use self::storage::get_type_from_record; - use self::storage::{ChunkAddress, RegisterAddress, SpendAddress}; use bytes::Bytes; use libp2p::{ From 769c4816d53583893a2383057dfa01abc1861b02 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 19 Sep 2024 14:49:33 +0900 Subject: [PATCH 219/255] fix: prevent recursive bootstrap quereis on peer addition --- sn_networking/src/driver.rs | 3 +++ sn_networking/src/event/kad.rs | 3 +++ 2 files changed, 6 insertions(+) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 23ef1eb0c6..91a530b10e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -693,6 +693,7 @@ impl NetworkBuilder { quotes_history: Default::default(), replication_targets: Default::default(), range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), + first_contact_made: false, }; let network = Network::new( @@ -756,6 +757,8 @@ pub struct SwarmDriver { // Each update is generated when there is a routing table change // We use the largest of these X_STORAGE_LIMIT values as our X distance. pub(crate) range_distances: VecDeque, + // have we found out initial peer + pub(crate) first_contact_made: bool, } impl SwarmDriver { diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 964376b883..171e30c520 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -268,8 +268,11 @@ impl SwarmDriver { event_string = "kad_event::RoutingUpdated"; if is_new_peer { self.update_on_peer_addition(peer); + } + if !self.first_contact_made { // This should only happen once + self.first_contact_made = true; info!("Performing the first bootstrap"); self.trigger_network_discovery(); } From d77ec7e27beff3d00d03af3da96f870d1e094254 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 9 Oct 2024 14:40:49 +0900 Subject: [PATCH 220/255] chore: fix node test clippy issues post-evm rebase --- sn_networking/src/event/request_response.rs | 9 +++------ sn_node/tests/verify_data_location.rs | 13 ++----------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 9675bffe1c..fddcbe2648 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -266,15 +266,12 @@ impl SwarmDriver { // This is to avoid malicious node escaping the check by never send a replication_list. // With further reduced probability of 1% (5% * 20%) if rng.gen_bool(0.2) { - let close_group_peers = if let Ok(peers) = sort_peers_by_address_and_limit( + let close_group_peers = sort_peers_by_address_and_limit( &peers, &NetworkAddress::from_peer(our_peer_id), CLOSE_GROUP_SIZE, - ) { - peers - } else { - vec![] - }; + ) + .unwrap_or_default(); if close_group_peers.len() == CLOSE_GROUP_SIZE { loop { diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 419f49aa64..8649d07909 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -96,17 +96,8 @@ async fn verify_data_location() -> Result<()> { let (client, wallet) = get_client_and_funded_wallet().await; - let paying_wallet_dir = TempDir::new()?; - - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; - store_registers( - client.clone(), - register_count, - paying_wallet_dir.to_path_buf(), - ) - .await?; + store_chunks(&client, chunk_count, &wallet).await?; + store_registers(&client, register_count, &wallet).await?; // Verify data location initially verify_location(&all_peers, &node_rpc_address).await?; From b69484f1cd4e01cafac625f4aa298e39128d6bd7 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 9 Oct 2024 16:03:52 +0900 Subject: [PATCH 221/255] fix(networking): prevent subtraction overflow in set_request_range --- second.txt | 1 - sn_networking/src/driver.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 second.txt diff --git a/second.txt b/second.txt deleted file mode 100644 index e212976f10..0000000000 --- a/second.txt +++ /dev/null @@ -1 +0,0 @@ -This is an error message diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 91a530b10e..a895655650 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -867,7 +867,7 @@ impl SwarmDriver { info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); let yardstick = if sorted_distances.len() >= farthest_peer_to_check { - sorted_distances.get(farthest_peer_to_check - 1) + sorted_distances.get(farthest_peer_to_check.saturating_sub(1)) } else { sorted_distances.last() }; From 198f3bf89bdd5dc8246fdd5cc47323217597d0ba Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Thu, 10 Oct 2024 13:41:13 +0900 Subject: [PATCH 222/255] chore: cleanup --- sn_networking/src/event/kad.rs | 4 +--- sn_networking/src/replication_fetcher.rs | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 171e30c520..88a2a7ffca 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -147,7 +147,7 @@ impl SwarmDriver { PrettyPrintRecordKey::from(&peer_record.record.key), peer_record.peer ); - self.accumulate_get_record_found(id, peer_record, stats, step)?; + self.accumulate_get_record_found(id, peer_record)?; } kad::Event::OutboundQueryProgressed { id, @@ -369,8 +369,6 @@ impl SwarmDriver { &mut self, query_id: QueryId, peer_record: PeerRecord, - _stats: QueryStats, - _step: ProgressStep, ) -> Result<()> { let expected_get_range = self.get_request_range(); let key = peer_record.record.key.clone(); diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 3c9a32737c..5e0d3a3ad4 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -503,7 +503,6 @@ mod tests { let mut replication_fetcher = ReplicationFetcher::new(peer_id, event_sender); // Set distance range - // TODO: close peers can break the distance range check here... we need a proper // way to update this test let distance_target = NetworkAddress::from_peer(PeerId::random()); let distance_range = self_address.distance(&distance_target); From 9bca1aa328227476f20057439b823f0bb06950c7 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 15 Oct 2024 15:05:14 +0900 Subject: [PATCH 223/255] ci: split out node restart verification in workflow --- .github/workflows/merge.yml | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index cd18b5b230..1b5395b028 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -635,7 +635,37 @@ jobs: log_file_prefix: safe_test_logs_churn platform: ${{ matrix.os }} - - name: Verify restart of nodes using rg + - name: Get total node count + shell: bash + timeout-minutes: 1 + run: | + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" + + - name: Get restart of nodes using rg + shell: bash + timeout-minutes: 1 + # get the counts, then the specific line, and then the digit count only + # then check we have an expected level of restarts + # TODO: make this use an env var, or relate to testnet size + run: | + restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) + echo "Restarted $restart_count nodes" + + - name: Get peers removed from nodes using rg + shell: bash + timeout-minutes: 1 + run: | + peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ + rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } + if [ -z "$peer_removed" ]; then + echo "No peer removal count found" + exit 1 + fi + echo "PeerRemovedFromRoutingTable $peer_removed times" + + - name: Verify peers removed exceed restarted node counts shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -652,8 +682,6 @@ jobs: echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" exit 1 fi - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then From 8839059110b8a3b68a9cadd33eb40af743d76233 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 15 Oct 2024 20:20:53 +0900 Subject: [PATCH 224/255] feat(networking): increase chunk validation frequency --- sn_networking/src/event/request_response.rs | 101 ++++++++++---------- 1 file changed, 49 insertions(+), 52 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index fddcbe2648..60eb392160 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -209,7 +209,6 @@ impl SwarmDriver { incoming_keys.len() ); - let more_than_one_key = incoming_keys.len() > 1; // accept replication requests from all peers known peers within our GetRange if !peers.contains(&holder) || holder == our_peer_id { trace!("Holder {holder:?} is self or not in replication range."); @@ -243,66 +242,64 @@ impl SwarmDriver { // Only trigger chunk_proof check based every X% of the time let mut rng = OsRng; - // 5% probability - if more_than_one_key && rng.gen_bool(0.05) { - let event_sender = self.event_sender.clone(); - let _handle = tokio::spawn(async move { - let keys_to_verify = - Self::select_verification_data_candidates(&peers, &all_keys, &sender); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {holder:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + let event_sender = self.event_sender.clone(); + let _handle = tokio::spawn(async move { + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &sender); - // In additon to verify the sender, we also verify a random close node. - // This is to avoid malicious node escaping the check by never send a replication_list. - // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.2) { - let close_group_peers = sort_peers_by_address_and_limit( - &peers, - &NetworkAddress::from_peer(our_peer_id), - CLOSE_GROUP_SIZE, - ) - .unwrap_or_default(); + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {holder:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); + } - if close_group_peers.len() == CLOSE_GROUP_SIZE { - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate = NetworkAddress::from_peer(*close_group_peers[index]); - if sender != candidate { - let keys_to_verify = Self::select_verification_data_candidates( - &peers, &all_keys, &candidate, - ); + // In additon to verify the sender, we also verify a random close node. + // This is to avoid malicious node escaping the check by never send a replication_list. + // With further reduced probability of 1% (5% * 20%) + if rng.gen_bool(0.2) { + let close_group_peers = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) + .unwrap_or_default(); - if keys_to_verify.is_empty() { - debug!( - "No valid candidate to be checked against peer {candidate:?}" - ); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + if close_group_peers.len() >= CLOSE_GROUP_SIZE { + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = Self::select_verification_data_candidates( + &peers, &all_keys, &candidate, + ); - break; + if keys_to_verify.is_empty() { + debug!( + "No valid candidate to be checked against peer {candidate:?}" + ); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); } + + break; } } } - }); - } + } + }); } /// Check among all chunk type records that we have, select those close to the peer, From 0a500d0a81bbea8710d361a9340e21671c687609 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 16 Oct 2024 09:34:41 +0900 Subject: [PATCH 225/255] chore(networking): increase chunk validations --- sn_networking/src/event/request_response.rs | 42 ++++++++++----------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 60eb392160..2d26a76dc7 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -271,31 +271,27 @@ impl SwarmDriver { ) .unwrap_or_default(); - if close_group_peers.len() >= CLOSE_GROUP_SIZE { - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate = NetworkAddress::from_peer(*close_group_peers[index]); - if sender != candidate { - let keys_to_verify = Self::select_verification_data_candidates( - &peers, &all_keys, &candidate, - ); - - if keys_to_verify.is_empty() { - debug!( - "No valid candidate to be checked against peer {candidate:?}" - ); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = Self::select_verification_data_candidates( + &peers, &all_keys, &candidate, + ); - break; + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {candidate:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: holder, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); } + + break; } } } From aec6fbf3ba132a41b4ba2d2f509ae18715661f1d Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 16 Oct 2024 10:22:33 +0900 Subject: [PATCH 226/255] fix(networking): ensure we verify the candidate not the replicant --- sn_networking/src/event/request_response.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 2d26a76dc7..6e9d09a0f3 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -263,7 +263,7 @@ impl SwarmDriver { // In additon to verify the sender, we also verify a random close node. // This is to avoid malicious node escaping the check by never send a replication_list. // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.2) { + if rng.gen_bool(0.5) { let close_group_peers = sort_peers_by_address_and_limit( &peers, &NetworkAddress::from_peer(our_peer_id), @@ -273,6 +273,7 @@ impl SwarmDriver { loop { let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate_peer_id = *close_group_peers[index]; let candidate = NetworkAddress::from_peer(*close_group_peers[index]); if sender != candidate { let keys_to_verify = Self::select_verification_data_candidates( @@ -283,7 +284,7 @@ impl SwarmDriver { debug!("No valid candidate to be checked against peer {candidate:?}"); } else if let Err(error) = event_sender .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, + peer_id: candidate_peer_id, keys_to_verify, }) .await From 972610cede8fc763366d10bef571df1facc6d751 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 16 Oct 2024 12:28:54 +0900 Subject: [PATCH 227/255] chore(networking): verify some node for each data put --- sn_networking/src/event/request_response.rs | 56 +++++++++------------ 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index 6e9d09a0f3..ca6808ed1b 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -240,9 +240,6 @@ impl SwarmDriver { self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch)); } - // Only trigger chunk_proof check based every X% of the time - let mut rng = OsRng; - let event_sender = self.event_sender.clone(); let _handle = tokio::spawn(async move { let keys_to_verify = @@ -263,37 +260,34 @@ impl SwarmDriver { // In additon to verify the sender, we also verify a random close node. // This is to avoid malicious node escaping the check by never send a replication_list. // With further reduced probability of 1% (5% * 20%) - if rng.gen_bool(0.5) { - let close_group_peers = sort_peers_by_address_and_limit( - &peers, - &NetworkAddress::from_peer(our_peer_id), - CLOSE_GROUP_SIZE, - ) - .unwrap_or_default(); + let close_group_peers = sort_peers_by_address_and_limit( + &peers, + &NetworkAddress::from_peer(our_peer_id), + CLOSE_GROUP_SIZE, + ) + .unwrap_or_default(); - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate_peer_id = *close_group_peers[index]; - let candidate = NetworkAddress::from_peer(*close_group_peers[index]); - if sender != candidate { - let keys_to_verify = Self::select_verification_data_candidates( - &peers, &all_keys, &candidate, - ); + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate_peer_id = *close_group_peers[index]; + let candidate = NetworkAddress::from_peer(*close_group_peers[index]); + if sender != candidate { + let keys_to_verify = + Self::select_verification_data_candidates(&peers, &all_keys, &candidate); - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {candidate:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: candidate_peer_id, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } - - break; + if keys_to_verify.is_empty() { + debug!("No valid candidate to be checked against peer {candidate:?}"); + } else if let Err(error) = event_sender + .send(NetworkEvent::ChunkProofVerification { + peer_id: candidate_peer_id, + keys_to_verify, + }) + .await + { + error!("SwarmDriver failed to send event: {}", error); } + + break; } } }); From c4a85b666f25ff53d70e49191ed4a1aea6a6fb30 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 16 Oct 2024 15:01:01 +0900 Subject: [PATCH 228/255] feat: remove cli/sn_client and faucet API To reintroduce native tokens now, we should do so encapsulated behind the Wallet struct in the autonomi API. This is used buy ndoes for verification, and clients for paying sending etc. This allows us to keep the payment layer an abstraction in that one place (and optinally enact/use certain types of payment or validation layers. This commit removes everything in One Big Go, such that we can easily retrieve the state of the native token APIs (which should work) at this point. (That said, with EVM in, the CI tests were not running here, but it should not be far off working) --- sn_cli/CHANGELOG.md | 3693 ----------------- sn_cli/Cargo.toml | 86 - sn_cli/README.md | 9 - sn_cli/benches/files.rs | 155 - sn_cli/src/acc_packet.rs | 1603 ------- sn_cli/src/acc_packet/change_tracking.rs | 265 -- sn_cli/src/bin/main.rs | 384 -- sn_cli/src/bin/subcommands/files.rs | 262 -- sn_cli/src/bin/subcommands/folders.rs | 220 - sn_cli/src/bin/subcommands/mod.rs | 102 - sn_cli/src/bin/subcommands/register.rs | 213 - sn_cli/src/bin/subcommands/wallet.rs | 207 - sn_cli/src/bin/subcommands/wallet/audit.rs | 220 - sn_cli/src/bin/subcommands/wallet/helpers.rs | 156 - .../src/bin/subcommands/wallet/hot_wallet.rs | 452 -- .../src/bin/subcommands/wallet/wo_wallet.rs | 310 -- sn_cli/src/files.rs | 34 - sn_cli/src/files/chunk_manager.rs | 1045 ----- sn_cli/src/files/download.rs | 187 - sn_cli/src/files/estimate.rs | 83 - sn_cli/src/files/files_uploader.rs | 480 --- sn_cli/src/files/upload.rs | 71 - sn_cli/src/lib.rs | 17 - sn_cli/src/utils.rs | 37 - sn_client/CHANGELOG.md | 2712 ------------ sn_client/Cargo.toml | 90 - sn_client/README.md | 56 - sn_client/src/acc_packet.rs | 74 - sn_client/src/acc_packet/user_secret.rs | 74 - sn_client/src/api.rs | 1234 ------ sn_client/src/audit.rs | 17 - sn_client/src/audit/dag_crawling.rs | 644 --- sn_client/src/audit/dag_error.rs | 75 - sn_client/src/audit/spend_dag.rs | 831 ---- sn_client/src/audit/tests/mod.rs | 478 --- sn_client/src/audit/tests/setup.rs | 147 - sn_client/src/chunks.rs | 13 - sn_client/src/chunks/error.rs | 75 - sn_client/src/chunks/pac_man.rs | 136 - sn_client/src/error.rs | 164 - sn_client/src/event.rs | 67 - sn_client/src/faucet.rs | 147 - sn_client/src/files.rs | 195 - sn_client/src/files/download.rs | 532 --- sn_client/src/folders.rs | 344 -- sn_client/src/lib.rs | 158 - sn_client/src/register.rs | 833 ---- sn_client/src/test_utils.rs | 124 - sn_client/src/uploader/mod.rs | 461 -- sn_client/src/uploader/tests/mod.rs | 459 -- sn_client/src/uploader/tests/setup.rs | 461 -- sn_client/src/uploader/upload.rs | 1084 ----- sn_client/src/wallet.rs | 1175 ------ sn_client/tests/folders_api.rs | 424 -- sn_faucet/CHANGELOG.md | 1355 ------ sn_faucet/Cargo.toml | 58 - sn_faucet/README.md | 11 - sn_faucet/maid_address_claims.csv | 0 sn_faucet/src/faucet_server.rs | 576 --- sn_faucet/src/gutenberger.rs | 68 - sn_faucet/src/main.rs | 311 -- sn_faucet/src/token_distribution.rs | 734 ---- 62 files changed, 26688 deletions(-) delete mode 100644 sn_cli/CHANGELOG.md delete mode 100644 sn_cli/Cargo.toml delete mode 100644 sn_cli/README.md delete mode 100644 sn_cli/benches/files.rs delete mode 100644 sn_cli/src/acc_packet.rs delete mode 100644 sn_cli/src/acc_packet/change_tracking.rs delete mode 100644 sn_cli/src/bin/main.rs delete mode 100644 sn_cli/src/bin/subcommands/files.rs delete mode 100644 sn_cli/src/bin/subcommands/folders.rs delete mode 100644 sn_cli/src/bin/subcommands/mod.rs delete mode 100644 sn_cli/src/bin/subcommands/register.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/audit.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/helpers.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/hot_wallet.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/wo_wallet.rs delete mode 100644 sn_cli/src/files.rs delete mode 100644 sn_cli/src/files/chunk_manager.rs delete mode 100644 sn_cli/src/files/download.rs delete mode 100644 sn_cli/src/files/estimate.rs delete mode 100644 sn_cli/src/files/files_uploader.rs delete mode 100644 sn_cli/src/files/upload.rs delete mode 100644 sn_cli/src/lib.rs delete mode 100644 sn_cli/src/utils.rs delete mode 100644 sn_client/CHANGELOG.md delete mode 100644 sn_client/Cargo.toml delete mode 100644 sn_client/README.md delete mode 100644 sn_client/src/acc_packet.rs delete mode 100644 sn_client/src/acc_packet/user_secret.rs delete mode 100644 sn_client/src/api.rs delete mode 100644 sn_client/src/audit.rs delete mode 100644 sn_client/src/audit/dag_crawling.rs delete mode 100644 sn_client/src/audit/dag_error.rs delete mode 100644 sn_client/src/audit/spend_dag.rs delete mode 100644 sn_client/src/audit/tests/mod.rs delete mode 100644 sn_client/src/audit/tests/setup.rs delete mode 100644 sn_client/src/chunks.rs delete mode 100644 sn_client/src/chunks/error.rs delete mode 100644 sn_client/src/chunks/pac_man.rs delete mode 100644 sn_client/src/error.rs delete mode 100644 sn_client/src/event.rs delete mode 100644 sn_client/src/faucet.rs delete mode 100644 sn_client/src/files.rs delete mode 100644 sn_client/src/files/download.rs delete mode 100644 sn_client/src/folders.rs delete mode 100644 sn_client/src/lib.rs delete mode 100644 sn_client/src/register.rs delete mode 100644 sn_client/src/test_utils.rs delete mode 100644 sn_client/src/uploader/mod.rs delete mode 100644 sn_client/src/uploader/tests/mod.rs delete mode 100644 sn_client/src/uploader/tests/setup.rs delete mode 100644 sn_client/src/uploader/upload.rs delete mode 100644 sn_client/src/wallet.rs delete mode 100644 sn_client/tests/folders_api.rs delete mode 100644 sn_faucet/CHANGELOG.md delete mode 100644 sn_faucet/Cargo.toml delete mode 100644 sn_faucet/README.md delete mode 100644 sn_faucet/maid_address_claims.csv delete mode 100644 sn_faucet/src/faucet_server.rs delete mode 100644 sn_faucet/src/gutenberger.rs delete mode 100644 sn_faucet/src/main.rs delete mode 100644 sn_faucet/src/token_distribution.rs diff --git a/sn_cli/CHANGELOG.md b/sn_cli/CHANGELOG.md deleted file mode 100644 index ddcfd25b77..0000000000 --- a/sn_cli/CHANGELOG.md +++ /dev/null @@ -1,3693 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.93.6](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.5...sn_cli-v0.93.6) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.93.5](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.4...sn_cli-v0.93.5) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible - -## [0.93.4](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.3...sn_cli-v0.93.4) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.93.3](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.2...sn_cli-v0.93.3) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.93.2](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.1...sn_cli-v0.93.2) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.93.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.0...sn_cli-v0.93.1) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.93.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.92.0...sn_cli-v0.93.0) - 2024-06-03 - -### Added -- integrate DAG crawling fixes from Josh and Qi -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- *(cli)* showing cli final execution result explicitly -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.92.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.91.4...sn_cli-v0.92.0) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- pass sk_str via cli opt -- *(audit)* collect payment forward statistics -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- hide genesis keypair -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- audit flags activated independently -- reduce blabber in dot and royalties audit mode -- *(cli)* avoid mis-estimation due to overflow -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- further improve fast mode gathering speed -- improve cli DAG collection -- improve DAG collection perf -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- improve DAG verification redundancy -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- resolve errors after reverts -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- remove now unused mostly duplicated code -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(cli): track spend creation reasons during audit" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.3...sn_cli-v0.91.4) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.2...sn_cli-v0.91.3) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.1...sn_cli-v0.91.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0...sn_cli-v0.91.1) - 2024-05-08 - -### Other -- update Cargo.lock dependencies -- *(release)* sn_registers-v0.3.13 - -## [0.91.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0-alpha.5...sn_cli-v0.91.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(cli)* track spend creation reasons during audit -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- *(faucet)* log from sn_client -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(transfers)* do not genereate wallet by default -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- create faucet via account load or generation -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- check DAG crawling performance -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.90.1...sn_cli-v0.90.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.90.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.90.0...sn_cli-v0.90.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.90.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.85...sn_cli-v0.90.0) - 2024-03-27 - -### Added -- *(cli)* expose AccountPacket APIs from a lib so it can be used by other apps -- *(uploader)* collect all the uploaded registers -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- make logging simpler to use -- [**breaking**] remove gossip code -- svg caching, fault tolerance during DAG collection -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* use ClientRegister instead of Registers -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(cli)* files should be chunked before checking if the chunks are empty -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(cli)* moving binary target related files onto src/bin dir -- *(uploader)* remove FilesApi dependency -- *(uploader)* implement UploaderInterface for easier testing -- rename of function to be more descriptive -- remove counter run through several functions and replace with simple counter -- *(register)* minor simplification in Register Permissions implementation -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* initial test setup for uploader -- *(uploader)* remove failed_to states -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.89.85](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.84...sn_cli-v0.89.85) - 2024-03-21 - -### Added -- *(cli)* have CLI folders cmds to act on current directory by default -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(log)* set log levels on the fly -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(cli)* adding some high-level doc to acc-packet codebase -- *(node)* reduce bad_nodes check resource usage - -## [0.89.84](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.83...sn_cli-v0.89.84) - 2024-03-18 - -### Other -- *(acc-packet)* adding test for acc-packet moved to a different location on disk -- *(acc-packet)* adding unit test for acc-packet changes scanning logic -- *(acc-packet)* adding unit test to private methods/helpers -- *(cli)* breaking up acc-packet logic within its own mod -- name change to spawn events handler -- increase of text length -- iterate upload code rearranged for clear readability - -## [0.89.83](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82...sn_cli-v0.89.83) - 2024-03-14 - -### Added -- self in import change -- moved param to outside calc -- refactor spend validation - -### Fixed -- *(cli)* allow to upload chunks from acc-packet using chunked files local cache -- *(cli)* use chunk-mgr with iterator skipping tracking info files - -### Other -- *(acc-packet)* adding verifications to compare tracking info generated on acc-packets cloned -- *(acc-packet)* adding verifications to compare the files/dirs stored on acc-packets cloned -- *(acc-packet)* testing sync empty root dirs -- *(acc-packet)* testing mutations syncing across clones of an acc-packet -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- *(cli)* chunk-mgr to report files chunked/uploaded rather than bailing out -- improve code quality -- new `sn_service_management` crate - -## [0.89.82-alpha.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82-alpha.0...sn_cli-v0.89.82-alpha.1) - 2024-03-08 - -### Added -- reference checks -- reference checks -- builder added to estimate -- removal of unnecessary code in upload rs -- remove all use of client in iter uploader - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.89.81](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.80...sn_cli-v0.89.81) - 2024-03-06 - -### Added -- *(cli)* cmd to initialise a directory as root Folder for storing and syncing on/with network -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(cli)* files download respects filename path -- *(folders)* make payments for local mutations detected before syncing -- *(folders)* build mutations report to be used by status and sync apis -- *(folders)* sync up logic and CLI cmd -- impl iterate uploader self to extract spawn theads -- impl iterate uploader self to extract spawn theads -- elevate files api and cm -- refactor upload with iter -- a more clear param for a message function -- split upload and upload with iter -- removal of some messages from vody body -- batch royalties redemption -- collect royalties through DAG -- *(folders)* avoid chunking files when retrieving them with Folders from the network -- *(folders)* store files data-map within Folders metadata chunk -- file to download -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders - -### Fixed -- *(folders)* set correct change state to folders when scanning -- *(folders)* keep track of root folder sync status - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(cli)* minor improvements to acc-packet codebase comments -- rename to iterative upload -- rename to iterative upload -- *(folders)* some simplifications to acc-packet codebase -- *(folders)* minor improvements to folders status report - -## [0.89.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.79...sn_cli-v0.89.80) - 2024-02-23 - -### Added -- file to upload -- estimate refactor - -## [0.89.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.78...sn_cli-v0.89.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.89.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.77...sn_cli-v0.89.78) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.76...sn_cli-v0.89.77) - 2024-02-20 - -### Added -- dependency reconfiguration -- nano to snt -- concurrent estimate without error messages -- make data public bool -- removal of the retry strategy -- estimate feature with ci and balance after with fn docs - -## [0.89.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.75...sn_cli-v0.89.76) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.89.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.74...sn_cli-v0.89.75) - 2024-02-20 - -### Added -- spend and DAG utilities - -## [0.89.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.73...sn_cli-v0.89.74) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.89.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.72...sn_cli-v0.89.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_registers - -## [0.89.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.71...sn_cli-v0.89.72) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.89.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.70...sn_cli-v0.89.71) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.89.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.69...sn_cli-v0.89.70) - 2024-02-19 - -### Other -- *(cli)* allow to pass files iterator to chunk-mgr and files-upload tools - -## [0.89.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.68...sn_cli-v0.89.69) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(release)* sn_networking-v0.13.19/sn_faucet-v0.3.67/sn_client-v0.104.14/sn_node-v0.104.22 - -## [0.89.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.67...sn_cli-v0.89.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.66...sn_cli-v0.89.67) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.65...sn_cli-v0.89.66) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.89.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.64...sn_cli-v0.89.65) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.63...sn_cli-v0.89.64) - 2024-02-13 - -### Added -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.89.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.62...sn_cli-v0.89.63) - 2024-02-12 - -### Other -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.89.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.61...sn_cli-v0.89.62) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review -- *(cli)* adding simple example doc for using Folders cmd -- *(cli)* moving some Folder logic to a private helper function - -## [0.89.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.60...sn_cli-v0.89.61) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.89.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.59...sn_cli-v0.89.60) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.89.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.58...sn_cli-v0.89.59) - 2024-02-09 - -### Other -- update dependencies - -## [0.89.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.57...sn_cli-v0.89.58) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.89.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.56...sn_cli-v0.89.57) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.55...sn_cli-v0.89.56) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download - -### Fixed -- *(bench)* update retry strategy args - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.89.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.54...sn_cli-v0.89.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.53...sn_cli-v0.89.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.52...sn_cli-v0.89.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.51...sn_cli-v0.89.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.50...sn_cli-v0.89.51) - 2024-02-07 - -### Other -- update dependencies - -## [0.89.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.49...sn_cli-v0.89.50) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.89.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.48...sn_cli-v0.89.49) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.47...sn_cli-v0.89.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.46...sn_cli-v0.89.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.45...sn_cli-v0.89.46) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.44...sn_cli-v0.89.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.43...sn_cli-v0.89.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.42...sn_cli-v0.89.43) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.41...sn_cli-v0.89.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.40...sn_cli-v0.89.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.39...sn_cli-v0.89.40) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.38...sn_cli-v0.89.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.37...sn_cli-v0.89.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.36...sn_cli-v0.89.37) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.35...sn_cli-v0.89.36) - 2024-02-01 - -### Fixed -- *(cli)* move UploadedFiles creation logic from ChunkManager -- *(cli)* chunk manager to return error if fs operation fails - -### Other -- *(cli)* use 'completed' files everywhere - -## [0.89.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.34...sn_cli-v0.89.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.33...sn_cli-v0.89.34) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.32...sn_cli-v0.89.33) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.31...sn_cli-v0.89.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.30...sn_cli-v0.89.31) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.29...sn_cli-v0.89.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.28...sn_cli-v0.89.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.27...sn_cli-v0.89.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.26...sn_cli-v0.89.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.25...sn_cli-v0.89.26) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.24...sn_cli-v0.89.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.23...sn_cli-v0.89.24) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.22...sn_cli-v0.89.23) - 2024-01-29 - -### Other -- *(cli)* moving wallet mod into its own mod folder - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.21...sn_cli-v0.89.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.20...sn_cli-v0.89.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.19...sn_cli-v0.89.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.18...sn_cli-v0.89.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.17...sn_cli-v0.89.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.16...sn_cli-v0.89.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.15...sn_cli-v0.89.16) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.14...sn_cli-v0.89.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.13...sn_cli-v0.89.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.12...sn_cli-v0.89.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.11...sn_cli-v0.89.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.10...sn_cli-v0.89.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.9...sn_cli-v0.89.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.8...sn_cli-v0.89.9) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.7...sn_cli-v0.89.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.6...sn_cli-v0.89.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.5...sn_cli-v0.89.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.4...sn_cli-v0.89.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.3...sn_cli-v0.89.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.2...sn_cli-v0.89.3) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.1...sn_cli-v0.89.2) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.0...sn_cli-v0.89.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.22...sn_cli-v0.89.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.88.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.21...sn_cli-v0.88.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.88.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.20...sn_cli-v0.88.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.19...sn_cli-v0.88.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.18...sn_cli-v0.88.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.17...sn_cli-v0.88.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.16...sn_cli-v0.88.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.15...sn_cli-v0.88.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.14...sn_cli-v0.88.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.13...sn_cli-v0.88.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.12...sn_cli-v0.88.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.11...sn_cli-v0.88.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.10...sn_cli-v0.88.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.9...sn_cli-v0.88.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.8...sn_cli-v0.88.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.7...sn_cli-v0.88.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.6...sn_cli-v0.88.7) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline -- *(cli)* new cmd to sign a transaction offline -- *(cli)* new wallet cmd to create a unsigned transaction to be used for offline signing - -### Other -- *(transfers)* solving clippy issues about complex fn args - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.5...sn_cli-v0.88.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.4...sn_cli-v0.88.5) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.3...sn_cli-v0.88.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.2...sn_cli-v0.88.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.1...sn_cli-v0.88.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.0...sn_cli-v0.88.1) - 2024-01-09 - -### Added -- *(cli)* safe wallet create saves new key - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.87.0...sn_cli-v0.88.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.103...sn_cli-v0.87.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.86.103](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.102...sn_cli-v0.86.103) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.102](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.101...sn_cli-v0.86.102) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.86.101](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.100...sn_cli-v0.86.101) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.100](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.99...sn_cli-v0.86.100) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.99](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.98...sn_cli-v0.86.99) - 2024-01-06 - -### Fixed -- *(cli)* read datamap when the xor addr of the file is provided - -## [0.86.98](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.97...sn_cli-v0.86.98) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.97](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.96...sn_cli-v0.86.97) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.86.96](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.95...sn_cli-v0.86.96) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.95](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.94...sn_cli-v0.86.95) - 2024-01-05 - -### Added -- *(cli)* store uploaded file metadata - -## [0.86.94](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.93...sn_cli-v0.86.94) - 2024-01-05 - -### Other -- *(cli)* error if there is no file to upload - -## [0.86.93](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.92...sn_cli-v0.86.93) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.92](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.91...sn_cli-v0.86.92) - 2024-01-04 - -### Other -- update dependencies - -## [0.86.91](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.90...sn_cli-v0.86.91) - 2024-01-04 - -### Other -- *(cli)* print private data warning -- *(cli)* print the datamap's entire hex addr during first attempt - -## [0.86.90](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.89...sn_cli-v0.86.90) - 2024-01-03 - -### Other -- *(cli)* print the datamap's entire hex addr - -## [0.86.89](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.88...sn_cli-v0.86.89) - 2024-01-03 - -### Added -- *(cli)* keep downloaded files in a safe subdir -- *(client)* clients no longer upload data_map by default - -### Fixed -- *(cli)* write datamap to metadata - -### Other -- clippy test fixes and updates -- *(cli)* add not to non-public uploaded files -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.86.88](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.87...sn_cli-v0.86.88) - 2024-01-03 - -### Other -- update dependencies - -## [0.86.87](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.86...sn_cli-v0.86.87) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.86](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.85...sn_cli-v0.86.86) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.85](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.84...sn_cli-v0.86.85) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.84](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.83...sn_cli-v0.86.84) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.83](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.82...sn_cli-v0.86.83) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.82](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.81...sn_cli-v0.86.82) - 2023-12-26 - -### Other -- update dependencies - -## [0.86.81](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.80...sn_cli-v0.86.81) - 2023-12-22 - -### Other -- update dependencies - -## [0.86.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.79...sn_cli-v0.86.80) - 2023-12-22 - -### Fixed -- printout un-verified files to alert user - -## [0.86.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.78...sn_cli-v0.86.79) - 2023-12-21 - -### Other -- log full Register address when created in cli and example app - -## [0.86.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.77...sn_cli-v0.86.78) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.86.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.76...sn_cli-v0.86.77) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.86.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.75...sn_cli-v0.86.76) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.86.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.74...sn_cli-v0.86.75) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.73...sn_cli-v0.86.74) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.72...sn_cli-v0.86.73) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.71...sn_cli-v0.86.72) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.86.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.70...sn_cli-v0.86.71) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.69...sn_cli-v0.86.70) - 2023-12-18 - -### Added -- *(cli)* random shuffle upload chunks to allow clients co-operation - -## [0.86.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.68...sn_cli-v0.86.69) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.67...sn_cli-v0.86.68) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Other -- *(client)* add docs to the Files struct -- *(cli)* use the new client Files api to upload chunks - -## [0.86.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.66...sn_cli-v0.86.67) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.65...sn_cli-v0.86.66) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.64...sn_cli-v0.86.65) - 2023-12-14 - -### Other -- *(cli)* make upload summary printout clearer - -## [0.86.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.63...sn_cli-v0.86.64) - 2023-12-14 - -### Other -- *(cli)* make sequential payment fail limit a const - -## [0.86.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.62...sn_cli-v0.86.63) - 2023-12-14 - -### Other -- *(cli)* make wallet address easy to copy -- *(cli)* peer list is not printed to stdout - -## [0.86.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.61...sn_cli-v0.86.62) - 2023-12-14 - -### Added -- *(cli)* cli arg for controlling chunk retries -- *(cli)* simple retry mechanism for remaining chunks - -### Other -- prevent retries on ci runs w/ '-r 0' - -## [0.86.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.60...sn_cli-v0.86.61) - 2023-12-13 - -### Other -- *(cli)* refactor upload_files - -## [0.86.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.59...sn_cli-v0.86.60) - 2023-12-13 - -### Other -- update dependencies - -## [0.86.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.58...sn_cli-v0.86.59) - 2023-12-13 - -### Added -- *(cli)* download path is familiar to users - -## [0.86.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.57...sn_cli-v0.86.58) - 2023-12-13 - -### Added -- audit DAG collection and visualization -- cli double spends audit from genesis - -## [0.86.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.56...sn_cli-v0.86.57) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.55...sn_cli-v0.86.56) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.86.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.54...sn_cli-v0.86.55) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.53...sn_cli-v0.86.54) - 2023-12-12 - -### Added -- constant uploading across batches - -### Fixed -- *(cli)* remove chunk_manager clone that is unsafe - -### Other -- *(networking)* add replication logs -- *(networking)* solidify REPLICATION_RANGE use. exclude self_peer_id in some calcs -- *(cli)* bail early on any payment errors -- *(cli)* only report uploaded files if no errors - -## [0.86.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.52...sn_cli-v0.86.53) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.51...sn_cli-v0.86.52) - 2023-12-11 - -### Other -- update dependencies - -## [0.86.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.50...sn_cli-v0.86.51) - 2023-12-11 - -### Other -- *(cli)* ux improvements after upload completes - -## [0.86.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.49...sn_cli-v0.86.50) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.48...sn_cli-v0.86.49) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.47...sn_cli-v0.86.48) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.46...sn_cli-v0.86.47) - 2023-12-07 - -### Other -- update dependencies - -## [0.86.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.45...sn_cli-v0.86.46) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.44...sn_cli-v0.86.45) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.43...sn_cli-v0.86.44) - 2023-12-06 - -### Added -- *(cli)* enable gossipsub for client when wallet cmd requires it -- *(wallet)* basic impl of a watch-only wallet API - -### Other -- *(wallet)* major refactoring removing redundant and unused code -- *(cli)* Fix duplicate use of 'n' short flag -- *(cli)* All --name flags have short 'n' flag - -## [0.86.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.42...sn_cli-v0.86.43) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.86.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.41...sn_cli-v0.86.42) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.40...sn_cli-v0.86.41) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.39...sn_cli-v0.86.40) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.38...sn_cli-v0.86.39) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.37...sn_cli-v0.86.38) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.86.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.36...sn_cli-v0.86.37) - 2023-12-05 - -### Other -- *(cli)* print the failed uploads stats -- *(cli)* remove unpaid/paid distinction from chunk manager - -## [0.86.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.35...sn_cli-v0.86.36) - 2023-12-05 - -### Other -- *(networking)* remove triggered bootstrap slowdown - -## [0.86.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.34...sn_cli-v0.86.35) - 2023-12-04 - -### Other -- update dependencies - -## [0.86.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.33...sn_cli-v0.86.34) - 2023-12-01 - -### Other -- *(ci)* fix CI build cache parsing error - -## [0.86.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.32...sn_cli-v0.86.33) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.31...sn_cli-v0.86.32) - 2023-11-29 - -### Added -- most of nodes not subscribe to royalty_transfer topic - -## [0.86.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.30...sn_cli-v0.86.31) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.29...sn_cli-v0.86.30) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.28...sn_cli-v0.86.29) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.27...sn_cli-v0.86.28) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.26...sn_cli-v0.86.27) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.86.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.25...sn_cli-v0.86.26) - 2023-11-28 - -### Added -- *(cli)* serialise chunks metadata on disk with MsgPack instead of bincode -- *(royalties)* serialise royalties notifs with MsgPack instead of bincode - -## [0.86.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.24...sn_cli-v0.86.25) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.23...sn_cli-v0.86.24) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.22...sn_cli-v0.86.23) - 2023-11-27 - -### Other -- update dependencies - -## [0.86.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.21...sn_cli-v0.86.22) - 2023-11-24 - -### Added -- *(cli)* peers displayed as list - -## [0.86.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.20...sn_cli-v0.86.21) - 2023-11-24 - -### Other -- update dependencies - -## [0.86.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.19...sn_cli-v0.86.20) - 2023-11-23 - -### Added -- record put retry even when not verifying -- retry at the record level, remove all other retries, report errors - -### Other -- appease clippy -- fix tests compilation - -## [0.86.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.18...sn_cli-v0.86.19) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.17...sn_cli-v0.86.18) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.16...sn_cli-v0.86.17) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.15...sn_cli-v0.86.16) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.14...sn_cli-v0.86.15) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.86.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.13...sn_cli-v0.86.14) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.12...sn_cli-v0.86.13) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.86.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.11...sn_cli-v0.86.12) - 2023-11-21 - -### Other -- update dependencies - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.10...sn_cli-v0.86.11) - 2023-11-20 - -### Other -- increase default batch size - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.9...sn_cli-v0.86.10) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.8...sn_cli-v0.86.9) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.7...sn_cli-v0.86.8) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.6...sn_cli-v0.86.7) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.5...sn_cli-v0.86.6) - 2023-11-20 - -### Fixed -- use actual quote instead of dummy - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.4...sn_cli-v0.86.5) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.3...sn_cli-v0.86.4) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.2...sn_cli-v0.86.3) - 2023-11-16 - -### Other -- update dependencies - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.1...sn_cli-v0.86.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.0...sn_cli-v0.86.1) - 2023-11-15 - -### Other -- update dependencies - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.20...sn_cli-v0.86.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.19...sn_cli-v0.85.20) - 2023-11-15 - -### Added -- *(royalties)* make royalties payment to be 15% of the total storage cost -- *(protocol)* move test utils behind a feature gate - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.18...sn_cli-v0.85.19) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.17...sn_cli-v0.85.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.16...sn_cli-v0.85.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.15...sn_cli-v0.85.16) - 2023-11-14 - -### Fixed -- *(cli)* marking chunks as verified should mark them as paid too - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.14...sn_cli-v0.85.15) - 2023-11-14 - -### Fixed -- *(cli)* repay unpaid chunks due to transfer failures - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.13...sn_cli-v0.85.14) - 2023-11-13 - -### Fixed -- *(cli)* failed to move chunk path shall not get deleted - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.12...sn_cli-v0.85.13) - 2023-11-13 - -### Fixed -- avoid infinite looping on verification during upload - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.11...sn_cli-v0.85.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.10...sn_cli-v0.85.11) - 2023-11-13 - -### Other -- *(cli)* disable silent ignoring of wallet errors - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.9...sn_cli-v0.85.10) - 2023-11-10 - -### Added -- *(cli)* attempt to reload wallet from disk if storing it fails when receiving transfers online -- *(cli)* new cmd to listen to royalties payments and deposit them into a local wallet - -### Other -- *(cli)* minor improvement to help docs - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.8...sn_cli-v0.85.9) - 2023-11-10 - -### Other -- update dependencies - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.7...sn_cli-v0.85.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.6...sn_cli-v0.85.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.5...sn_cli-v0.85.6) - 2023-11-09 - -### Added -- increase retry count for chunk put -- chunk put retry taking repayment into account - -### Other -- const instead of magic num in code for wait time -- please ci - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.4...sn_cli-v0.85.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.3...sn_cli-v0.85.4) - 2023-11-08 - -### Fixed -- *(bench)* update benchmark to account for de duplicated files - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.2...sn_cli-v0.85.3) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.1...sn_cli-v0.85.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.0...sn_cli-v0.85.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.51...sn_cli-v0.85.0) - 2023-11-07 - -### Added -- *(cli)* store paid and upaid chunks separately -- *(cli)* use ChunkManager during the upload process -- *(cli)* implement ChunkManager to re-use already chunked files - -### Fixed -- *(cli)* keep track of files that have been completely uploaded -- *(cli)* get bytes from OsStr by first converting it into lossy string -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function -- *(upload)* don't ignore file if filename cannot be converted from OsString to String - -### Other -- rename test function and spell correction -- *(cli)* add more tests to chunk manager for unpaid paid dir refactor -- *(cli)* add some docs to ChunkManager -- *(cli)* add tests for `ChunkManager` -- *(cli)* move chunk management to its own module - -## [0.84.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.50...sn_cli-v0.84.51) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.49...sn_cli-v0.84.50) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.48...sn_cli-v0.84.49) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.47...sn_cli-v0.84.48) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.46...sn_cli-v0.84.47) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.45...sn_cli-v0.84.46) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.44...sn_cli-v0.84.45) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.84.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.43...sn_cli-v0.84.44) - 2023-11-03 - -### Other -- *(cli)* make file upload output cut n paste friendly - -## [0.84.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.42...sn_cli-v0.84.43) - 2023-11-03 - -### Other -- update dependencies - -## [0.84.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.41...sn_cli-v0.84.42) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.40...sn_cli-v0.84.41) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.39...sn_cli-v0.84.40) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.38...sn_cli-v0.84.39) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.37...sn_cli-v0.84.38) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.36...sn_cli-v0.84.37) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.35...sn_cli-v0.84.36) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.34...sn_cli-v0.84.35) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.33...sn_cli-v0.84.34) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.32...sn_cli-v0.84.33) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.31...sn_cli-v0.84.32) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.30...sn_cli-v0.84.31) - 2023-10-30 - -### Added -- *(cli)* error out if empty wallet -- *(cli)* error out if we do not have enough balance - -## [0.84.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.29...sn_cli-v0.84.30) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.28...sn_cli-v0.84.29) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(release)* sn_client-v0.95.11/sn_protocol-v0.8.7/sn_transfers-v0.14.8/sn_networking-v0.9.10 - -## [0.84.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.27...sn_cli-v0.84.28) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.26...sn_cli-v0.84.27) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.25...sn_cli-v0.84.26) - 2023-10-27 - -### Added -- *(cli)* verify as we upload when 1 batch - -## [0.84.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.24...sn_cli-v0.84.25) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.23...sn_cli-v0.84.24) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.22...sn_cli-v0.84.23) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.21...sn_cli-v0.84.22) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.20...sn_cli-v0.84.21) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.19...sn_cli-v0.84.20) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -### Fixed -- *(cli)* remove Arc from ProgressBar as it is Arc internally - -### Other -- *(cli)* add logs to indicate the time spent on chunking the files - -## [0.84.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.18...sn_cli-v0.84.19) - 2023-10-24 - -### Added -- *(cli)* wallet deposit cmd with no arg was not reading cash notes from disk -- *(cli)* new wallet create cmd allowing users to create a wallet from a given secret key - -## [0.84.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.17...sn_cli-v0.84.18) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.16...sn_cli-v0.84.17) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.15...sn_cli-v0.84.16) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.14...sn_cli-v0.84.15) - 2023-10-24 - -### Added -- *(log)* use LogBuilder to initialize logging - -### Other -- *(client)* log and wait tweaks - -## [0.84.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.13...sn_cli-v0.84.14) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.12...sn_cli-v0.84.13) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.11...sn_cli-v0.84.12) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.10...sn_cli-v0.84.11) - 2023-10-23 - -### Fixed -- *(cli)* don't bail if a payment was not found during verify/repayment - -## [0.84.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.9...sn_cli-v0.84.10) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.84.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.8...sn_cli-v0.84.9) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.7...sn_cli-v0.84.8) - 2023-10-22 - -### Other -- update dependencies - -## [0.84.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.6...sn_cli-v0.84.7) - 2023-10-21 - -### Other -- update dependencies - -## [0.84.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.5...sn_cli-v0.84.6) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.4...sn_cli-v0.84.5) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.3...sn_cli-v0.84.4) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.2...sn_cli-v0.84.3) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.1...sn_cli-v0.84.2) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.0...sn_cli-v0.84.1) - 2023-10-18 - -### Other -- update dependencies - -## [0.84.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.52...sn_cli-v0.84.0) - 2023-10-18 - -### Added -- *(client)* verify register uploads and retry and repay if failed - -### Other -- *(client)* always validate storage payments -- repay for data in node rewards tests - -## [0.83.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.51...sn_cli-v0.83.52) - 2023-10-18 - -### Other -- update dependencies - -## [0.83.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.50...sn_cli-v0.83.51) - 2023-10-17 - -### Other -- update dependencies - -## [0.83.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.49...sn_cli-v0.83.50) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.48...sn_cli-v0.83.49) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.47...sn_cli-v0.83.48) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.46...sn_cli-v0.83.47) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.45...sn_cli-v0.83.46) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.44...sn_cli-v0.83.45) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.43...sn_cli-v0.83.44) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.42...sn_cli-v0.83.43) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(docs)* cleanup comments and docs - -## [0.83.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.41...sn_cli-v0.83.42) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.40...sn_cli-v0.83.41) - 2023-10-11 - -### Fixed -- make client handle payment error - -## [0.83.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.39...sn_cli-v0.83.40) - 2023-10-11 - -### Added -- showing expected holders to CLI when required - -## [0.83.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.38...sn_cli-v0.83.39) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.37...sn_cli-v0.83.38) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.83.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.36...sn_cli-v0.83.37) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.35...sn_cli-v0.83.36) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.34...sn_cli-v0.83.35) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.33...sn_cli-v0.83.34) - 2023-10-09 - -### Other -- update dependencies - -## [0.83.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.32...sn_cli-v0.83.33) - 2023-10-09 - -### Added -- ensure temp SE chunks got cleaned after uploading - -## [0.83.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.31...sn_cli-v0.83.32) - 2023-10-08 - -### Other -- update dependencies - -## [0.83.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.30...sn_cli-v0.83.31) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- remove deposit vs received cashnote disctinction - -## [0.83.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.29...sn_cli-v0.83.30) - 2023-10-06 - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.83.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.28...sn_cli-v0.83.29) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.27...sn_cli-v0.83.28) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.26...sn_cli-v0.83.27) - 2023-10-05 - -### Added -- *(metrics)* enable node monitoring through dockerized grafana instance - -## [0.83.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.25...sn_cli-v0.83.26) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -### Fixed -- *(client)* remove concurrency limitations - -## [0.83.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.24...sn_cli-v0.83.25) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.83.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.23...sn_cli-v0.83.24) - 2023-10-05 - -### Fixed -- use specific verify func for chunk stored verification - -## [0.83.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.22...sn_cli-v0.83.23) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- use one files api and clarify variable names -- pay_for_chunks returns cost and new balance - -## [0.83.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.21...sn_cli-v0.83.22) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.20...sn_cli-v0.83.21) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.19...sn_cli-v0.83.20) - 2023-10-04 - -### Added -- *(client)* log the command invoked for safe - -## [0.83.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.18...sn_cli-v0.83.19) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.17...sn_cli-v0.83.18) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.16...sn_cli-v0.83.17) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.15...sn_cli-v0.83.16) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.14...sn_cli-v0.83.15) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.13...sn_cli-v0.83.14) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.12...sn_cli-v0.83.13) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.11...sn_cli-v0.83.12) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.10...sn_cli-v0.83.11) - 2023-10-02 - -### Added -- add read transfer from file option -- faucet using transfers instead of sending raw cashnotes - -### Other -- trim transfer hex nl and spaces -- add some more error info printing - -## [0.83.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.9...sn_cli-v0.83.10) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.8...sn_cli-v0.83.9) - 2023-10-02 - -### Added -- *(client)* show feedback on long wait for costs - -## [0.83.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.7...sn_cli-v0.83.8) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.6...sn_cli-v0.83.7) - 2023-09-29 - -### Other -- update dependencies - -## [0.83.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.5...sn_cli-v0.83.6) - 2023-09-29 - -### Fixed -- *(cli)* dont bail on errors during repay/upload - -## [0.83.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.4...sn_cli-v0.83.5) - 2023-09-29 - -### Fixed -- *(client)* just skip empty files - -## [0.83.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.3...sn_cli-v0.83.4) - 2023-09-28 - -### Added -- client to client transfers - -## [0.83.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.2...sn_cli-v0.83.3) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps - -## [0.83.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.1...sn_cli-v0.83.2) - 2023-09-27 - -### Other -- update dependencies - -## [0.83.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.0...sn_cli-v0.83.1) - 2023-09-27 - -### Added -- *(logging)* set default log levels to be more verbose -- *(logging)* set default logging to data-dir - -### Other -- *(client)* add timestamp to client log path - -## [0.83.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.8...sn_cli-v0.83.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.82.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.7...sn_cli-v0.82.8) - 2023-09-26 - -### Other -- update dependencies - -## [0.82.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.6...sn_cli-v0.82.7) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.82.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.5...sn_cli-v0.82.6) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.4...sn_cli-v0.82.5) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.3...sn_cli-v0.82.4) - 2023-09-25 - -### Added -- *(cli)* wrap repayment error for clarity - -## [0.82.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.2...sn_cli-v0.82.3) - 2023-09-25 - -### Added -- *(peers)* use a common way to bootstrap into the network for all the bins -- *(cli)* fetch network contacts for the provided network name -- *(cli)* fetch bootstrap peers from network contacts - -### Other -- more logs around parsing network-contacts -- *(cli)* feature gate network contacts and fetch from URL - -## [0.82.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.1...sn_cli-v0.82.2) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.0...sn_cli-v0.82.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.82.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.64...sn_cli-v0.82.0) - 2023-09-22 - -### Added -- *(cli)* deps update and arbitrary change for cli - -## [0.81.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.63...sn_cli-v0.81.64) - 2023-09-21 - -### Added -- provide a `files ls` command - -### Other -- *(release)* sn_client-v0.89.22 -- store uploaded files list as text -- clarify `files download` usage -- output address of uploaded file - -## [0.81.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.62...sn_cli-v0.81.63) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.61...sn_cli-v0.81.62) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.60...sn_cli-v0.81.61) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.59...sn_cli-v0.81.60) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.58...sn_cli-v0.81.59) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.57...sn_cli-v0.81.58) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.56...sn_cli-v0.81.57) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.55...sn_cli-v0.81.56) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.54...sn_cli-v0.81.55) - 2023-09-20 - -### Fixed -- make clearer cli send asks for whole token amounts, not nanos - -## [0.81.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.53...sn_cli-v0.81.54) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.52...sn_cli-v0.81.53) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.51...sn_cli-v0.81.52) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.50...sn_cli-v0.81.51) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.49...sn_cli-v0.81.50) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.81.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.48...sn_cli-v0.81.49) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.47...sn_cli-v0.81.48) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.46...sn_cli-v0.81.47) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.45...sn_cli-v0.81.46) - 2023-09-18 - -### Fixed -- avoid verification too close to put; remove un-necessary wait for put - -## [0.81.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.44...sn_cli-v0.81.45) - 2023-09-18 - -### Other -- some cleanups within the upload procedure - -## [0.81.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.43...sn_cli-v0.81.44) - 2023-09-18 - -### Other -- update dependencies - -## [0.81.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.42...sn_cli-v0.81.43) - 2023-09-18 - -### Fixed -- *(cli)* repay and upload after verifying all the chunks - -### Other -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.81.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.41...sn_cli-v0.81.42) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(cli)* move 'chunk_path' to files.rs -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.81.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.40...sn_cli-v0.81.41) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.39...sn_cli-v0.81.40) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.38...sn_cli-v0.81.39) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.37...sn_cli-v0.81.38) - 2023-09-14 - -### Other -- update dependencies - -## [0.81.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.36...sn_cli-v0.81.37) - 2023-09-14 - -### Added -- expose batch_size to cli -- split upload procedure into batches - -## [0.81.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.35...sn_cli-v0.81.36) - 2023-09-14 - -### Other -- *(metrics)* rename feature flag and small fixes - -## [0.81.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.34...sn_cli-v0.81.35) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -## [0.81.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.33...sn_cli-v0.81.34) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.81.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.32...sn_cli-v0.81.33) - 2023-09-12 - -### Other -- update dependencies - -## [0.81.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.31...sn_cli-v0.81.32) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.81.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.30...sn_cli-v0.81.31) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.81.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.29...sn_cli-v0.81.30) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.28...sn_cli-v0.81.29) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.81.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.27...sn_cli-v0.81.28) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.26...sn_cli-v0.81.27) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Fixed -- *(client)* dont bail on failed upload before verify/repay - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.81.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.25...sn_cli-v0.81.26) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.24...sn_cli-v0.81.25) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.23...sn_cli-v0.81.24) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.22...sn_cli-v0.81.23) - 2023-09-06 - -### Other -- update dependencies - -## [0.81.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.21...sn_cli-v0.81.22) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.20...sn_cli-v0.81.21) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.19...sn_cli-v0.81.20) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.18...sn_cli-v0.81.19) - 2023-09-05 - -### Added -- *(cli)* properly init color_eyre, advise on hex parse fail - -## [0.81.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.17...sn_cli-v0.81.18) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.16...sn_cli-v0.81.17) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.15...sn_cli-v0.81.16) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.14...sn_cli-v0.81.15) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.13...sn_cli-v0.81.14) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.12...sn_cli-v0.81.13) - 2023-09-02 - -### Other -- update dependencies - -## [0.81.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.11...sn_cli-v0.81.12) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.10...sn_cli-v0.81.11) - 2023-09-01 - -### Other -- *(cli)* better formatting for elapsed time statements -- *(transfers)* store dbcs by ref to avoid more clones - -## [0.81.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.9...sn_cli-v0.81.10) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.8...sn_cli-v0.81.9) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.7...sn_cli-v0.81.8) - 2023-08-31 - -### Added -- *(cli)* perform wallet actions without connecting to the network - -### Other -- remove unused async - -## [0.81.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.6...sn_cli-v0.81.7) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.5...sn_cli-v0.81.6) - 2023-08-31 - -### Added -- *(cli)* wallet cmd flag enabing to query a node's local wallet balance - -### Fixed -- *(cli)* don't try to create wallet paths when checking balance - -## [0.81.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.4...sn_cli-v0.81.5) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.3...sn_cli-v0.81.4) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.2...sn_cli-v0.81.3) - 2023-08-31 - -### Fixed -- correct bench download calculation - -## [0.81.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.1...sn_cli-v0.81.2) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.0...sn_cli-v0.81.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* improve download concurrency. - -## [0.81.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.64...sn_cli-v0.81.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning - -## [0.80.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.63...sn_cli-v0.80.64) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.62...sn_cli-v0.80.63) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.61...sn_cli-v0.80.62) - 2023-08-29 - -### Other -- update dependencies - -## [0.80.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.60...sn_cli-v0.80.61) - 2023-08-25 - -### Other -- update dependencies - -## [0.80.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.59...sn_cli-v0.80.60) - 2023-08-24 - -### Other -- *(cli)* verify bench uploads once more - -## [0.80.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.58...sn_cli-v0.80.59) - 2023-08-24 - -### Other -- rust 1.72.0 fixes - -## [0.80.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.57...sn_cli-v0.80.58) - 2023-08-24 - -### Other -- update dependencies - -## [0.80.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.56...sn_cli-v0.80.57) - 2023-08-22 - -### Other -- update dependencies - -## [0.80.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.55...sn_cli-v0.80.56) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.80.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.54...sn_cli-v0.80.55) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.53...sn_cli-v0.80.54) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.52...sn_cli-v0.80.53) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.51...sn_cli-v0.80.52) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.50...sn_cli-v0.80.51) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.49...sn_cli-v0.80.50) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.48...sn_cli-v0.80.49) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.47...sn_cli-v0.80.48) - 2023-08-17 - -### Fixed -- avoid download bench result polluted - -### Other -- more client logs - -## [0.80.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.46...sn_cli-v0.80.47) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.45...sn_cli-v0.80.46) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.44...sn_cli-v0.80.45) - 2023-08-16 - -### Other -- optimize benchmark flow - -## [0.80.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.43...sn_cli-v0.80.44) - 2023-08-15 - -### Other -- update dependencies - -## [0.80.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.42...sn_cli-v0.80.43) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.41...sn_cli-v0.80.42) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.40...sn_cli-v0.80.41) - 2023-08-11 - -### Other -- *(cli)* print cost info - -## [0.80.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.39...sn_cli-v0.80.40) - 2023-08-11 - -### Other -- update dependencies - -## [0.80.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.38...sn_cli-v0.80.39) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.37...sn_cli-v0.80.38) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.36...sn_cli-v0.80.37) - 2023-08-09 - -### Other -- update dependencies - -## [0.80.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.35...sn_cli-v0.80.36) - 2023-08-08 - -### Fixed -- *(cli)* remove manual faucet claim from benchmarking. -- *(node)* prevent panic in storage calcs - -### Other -- *(cli)* get more money for benching -- log bench errors - -## [0.80.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.34...sn_cli-v0.80.35) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.33...sn_cli-v0.80.34) - 2023-08-07 - -### Other -- *(node)* dont verify during benchmarks - -## [0.80.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.32...sn_cli-v0.80.33) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- cleanup comments and names - -## [0.80.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.31...sn_cli-v0.80.32) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.30...sn_cli-v0.80.31) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.29...sn_cli-v0.80.30) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.28...sn_cli-v0.80.29) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.27...sn_cli-v0.80.28) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.26...sn_cli-v0.80.27) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.25...sn_cli-v0.80.26) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.24...sn_cli-v0.80.25) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.23...sn_cli-v0.80.24) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.22...sn_cli-v0.80.23) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.21...sn_cli-v0.80.22) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.20...sn_cli-v0.80.21) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.19...sn_cli-v0.80.20) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.18...sn_cli-v0.80.19) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.17...sn_cli-v0.80.18) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- *(cli)* update logs and ci for payments - -## [0.80.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.16...sn_cli-v0.80.17) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.15...sn_cli-v0.80.16) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.14...sn_cli-v0.80.15) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.13...sn_cli-v0.80.14) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.12...sn_cli-v0.80.13) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.11...sn_cli-v0.80.12) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.10...sn_cli-v0.80.11) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.9...sn_cli-v0.80.10) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.8...sn_cli-v0.80.9) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.7...sn_cli-v0.80.8) - 2023-07-27 - -### Other -- update dependencies - -## [0.80.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.6...sn_cli-v0.80.7) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.5...sn_cli-v0.80.6) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.4...sn_cli-v0.80.5) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.3...sn_cli-v0.80.4) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.2...sn_cli-v0.80.3) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.1...sn_cli-v0.80.2) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.0...sn_cli-v0.80.1) - 2023-07-25 - -### Other -- update dependencies - -## [0.80.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.32...sn_cli-v0.80.0) - 2023-07-21 - -### Added -- *(cli)* allow to pass the hex-encoded DBC as arg -- *(protocol)* [**breaking**] make Chunks storage payment required - -## [0.79.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.31...sn_cli-v0.79.32) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.30...sn_cli-v0.79.31) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.29...sn_cli-v0.79.30) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.28...sn_cli-v0.79.29) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.27...sn_cli-v0.79.28) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.26...sn_cli-v0.79.27) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.25...sn_cli-v0.79.26) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.24...sn_cli-v0.79.25) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.23...sn_cli-v0.79.24) - 2023-07-18 - -### Fixed -- client - -## [0.79.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.22...sn_cli-v0.79.23) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.21...sn_cli-v0.79.22) - 2023-07-17 - -### Fixed -- *(cli)* add more context when failing to decode a wallet - -## [0.79.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.20...sn_cli-v0.79.21) - 2023-07-17 - -### Other -- update dependencies - -## [0.79.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.19...sn_cli-v0.79.20) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -## [0.79.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.18...sn_cli-v0.79.19) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.79.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.17...sn_cli-v0.79.18) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.16...sn_cli-v0.79.17) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.15...sn_cli-v0.79.16) - 2023-07-12 - -### Other -- client to upload paid chunks in batches -- chunk files only once when making payment for their storage - -## [0.79.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.14...sn_cli-v0.79.15) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.13...sn_cli-v0.79.14) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.79.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.12...sn_cli-v0.79.13) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.11...sn_cli-v0.79.12) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.10...sn_cli-v0.79.11) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.9...sn_cli-v0.79.10) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.8...sn_cli-v0.79.9) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.7...sn_cli-v0.79.8) - 2023-07-10 - -### Added -- faucet server and cli DBC read - -### Fixed -- use Deposit --stdin instead of Read in cli -- wallet store - -## [0.79.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.6...sn_cli-v0.79.7) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.5...sn_cli-v0.79.6) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.4...sn_cli-v0.79.5) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.3...sn_cli-v0.79.4) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.2...sn_cli-v0.79.3) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.1...sn_cli-v0.79.2) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.0...sn_cli-v0.79.1) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.26...sn_cli-v0.79.0) - 2023-07-06 - -### Added -- introduce `--log-format` arguments -- provide `--log-output-dest` arg for `safe` -- provide `--log-output-dest` arg for `safenode` - -### Other -- use data-dir rather than root-dir -- incorporate various feedback items - -## [0.78.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.25...sn_cli-v0.78.26) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.24...sn_cli-v0.78.25) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.23...sn_cli-v0.78.24) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.22...sn_cli-v0.78.23) - 2023-07-04 - -### Other -- update dependencies - -## [0.78.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.21...sn_cli-v0.78.22) - 2023-07-03 - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test - -## [0.78.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.20...sn_cli-v0.78.21) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.19...sn_cli-v0.78.20) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.18...sn_cli-v0.78.19) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.17...sn_cli-v0.78.18) - 2023-06-28 - -### Added -- register refactor, kad reg without cmds - -## [0.78.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.16...sn_cli-v0.78.17) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.15...sn_cli-v0.78.16) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.14...sn_cli-v0.78.15) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.13...sn_cli-v0.78.14) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.12...sn_cli-v0.78.13) - 2023-06-27 - -### Other -- benchmark client download - -## [0.78.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.11...sn_cli-v0.78.12) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.10...sn_cli-v0.78.11) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.9...sn_cli-v0.78.10) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.8...sn_cli-v0.78.9) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.7...sn_cli-v0.78.8) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.6...sn_cli-v0.78.7) - 2023-06-24 - -### Other -- update dependencies - -## [0.78.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.5...sn_cli-v0.78.6) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.4...sn_cli-v0.78.5) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.3...sn_cli-v0.78.4) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.2...sn_cli-v0.78.3) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.1...sn_cli-v0.78.2) - 2023-06-22 - -### Other -- update dependencies - -## [0.78.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.0...sn_cli-v0.78.1) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.78.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.49...sn_cli-v0.78.0) - 2023-06-22 - -### Added -- use standarised directories for files/wallet commands - -## [0.77.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.48...sn_cli-v0.77.49) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.47...sn_cli-v0.77.48) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.46...sn_cli-v0.77.47) - 2023-06-21 - -### Other -- *(node)* obtain parent_tx from SignedSpend - -## [0.77.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.45...sn_cli-v0.77.46) - 2023-06-21 - -### Added -- provide option for log output in json - -## [0.77.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.44...sn_cli-v0.77.45) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.43...sn_cli-v0.77.44) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.42...sn_cli-v0.77.43) - 2023-06-20 - -### Other -- include the Tx instead of output DBCs as part of storage payment proofs -- use a set to collect Chunks addrs for build payment proof - -## [0.77.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.41...sn_cli-v0.77.42) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.40...sn_cli-v0.77.41) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.39...sn_cli-v0.77.40) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.38...sn_cli-v0.77.39) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.37...sn_cli-v0.77.38) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.36...sn_cli-v0.77.37) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.35...sn_cli-v0.77.36) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.34...sn_cli-v0.77.35) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.33...sn_cli-v0.77.34) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.32...sn_cli-v0.77.33) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.31...sn_cli-v0.77.32) - 2023-06-19 - -### Fixed -- *(safe)* check if upload path contains a file - -## [0.77.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.30...sn_cli-v0.77.31) - 2023-06-16 - -### Fixed -- CLI is missing local-discovery feature - -## [0.77.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.29...sn_cli-v0.77.30) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.28...sn_cli-v0.77.29) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.27...sn_cli-v0.77.28) - 2023-06-16 - -### Other -- improve memory benchmarks, remove broken download bench - -## [0.77.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.26...sn_cli-v0.77.27) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.25...sn_cli-v0.77.26) - 2023-06-16 - -### Fixed -- *(bin)* negate local-discovery check - -## [0.77.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.24...sn_cli-v0.77.25) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.23...sn_cli-v0.77.24) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.22...sn_cli-v0.77.23) - 2023-06-15 - -### Fixed -- parent spend issue - -## [0.77.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.21...sn_cli-v0.77.22) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.20...sn_cli-v0.77.21) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.19...sn_cli-v0.77.20) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.18...sn_cli-v0.77.19) - 2023-06-15 - -### Other -- use throughput for benchmarking - -## [0.77.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.17...sn_cli-v0.77.18) - 2023-06-15 - -### Other -- add initial benchmarks for prs and chart generation - -## [0.77.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.16...sn_cli-v0.77.17) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.77.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.15...sn_cli-v0.77.16) - 2023-06-14 - -### Other -- update dependencies - -## [0.77.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.14...sn_cli-v0.77.15) - 2023-06-14 - -### Other -- use clap env and parse multiaddr - -## [0.77.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.13...sn_cli-v0.77.14) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -### Other -- *(client)* parse duration in clap derivation - -## [0.77.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.12...sn_cli-v0.77.13) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.11...sn_cli-v0.77.12) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.10...sn_cli-v0.77.11) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.9...sn_cli-v0.77.10) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.8...sn_cli-v0.77.9) - 2023-06-09 - -### Other -- improve documentation for cli commands - -## [0.77.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.7...sn_cli-v0.77.8) - 2023-06-09 - -### Other -- manually change crate version - -## [0.77.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.6...sn_cli-v0.77.7) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.5...sn_cli-v0.77.6) - 2023-06-09 - -### Other -- emit git info with vergen - -## [0.77.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.4...sn_cli-v0.77.5) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.3...sn_cli-v0.77.4) - 2023-06-09 - -### Other -- provide clarity on command arguments - -## [0.77.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.2...sn_cli-v0.77.3) - 2023-06-08 - -### Other -- update dependencies - -## [0.77.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.1...sn_cli-v0.77.2) - 2023-06-08 - -### Other -- improve documentation for cli arguments - -## [0.77.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.0...sn_cli-v0.77.1) - 2023-06-07 - -### Added -- making the CLI --peer arg global so it can be passed in any order -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- improve CLI --peer arg doc -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- *(logs)* enable metrics feature by default -- small log wording updates -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml deleted file mode 100644 index 0b130d77e4..0000000000 --- a/sn_cli/Cargo.toml +++ /dev/null @@ -1,86 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network CLI" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_cli" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.95.3" - -[[bin]] -path = "src/bin/main.rs" -name = "safe" - -[[bench]] -name = "files" -harness = false - -[features] -default = ["metrics"] -distribution = ["base64", "bitcoin"] -local = ["sn_client/local", "sn_peers_acquisition/local"] -metrics = ["sn_logging/process-metrics"] -network-contacts = ["sn_peers_acquisition/network-contacts"] -nightly = [] -open-metrics = ["sn_client/open-metrics"] - -[dependencies] -aes = "0.7.5" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", optional = true } -block-modes = "0.8.1" -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -custom_debug = "~0.6.1" -chrono = "~0.4.19" -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "~0.6" -dialoguer = "~0.11.0" -dirs-next = "~2.0.0" -futures = "~0.3.13" -hex = "~0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.54.1", features = ["identify", "kad"] } -rand = "0.8.5" -rayon = "1.8.0" -reqwest = { version = "0.12.2", default-features = false, features = [ - "rustls-tls-manual-roots", -] } -rmp-serde = "1.1.1" -rpassword = "7.3.1" -serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -tempfile = "3.6.0" -tiny-keccak = "~2.0.2" -tokio = { version = "1.32.0", features = [ - "io-util", - "macros", - "parking_lot", - "rt", - "sync", - "time", - "fs", -] } -tracing = { version = "~0.1.26" } -url = "2.4.0" -walkdir = "~2.5.0" -xor_name = "5.0.0" - -[dev-dependencies] -eyre = "0.6.8" -criterion = "0.5.1" -tempfile = "3.6.0" -rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.110.4", features = [ - "test-utils", -] } - -[lints] -workspace = true diff --git a/sn_cli/README.md b/sn_cli/README.md deleted file mode 100644 index f1a2f29edf..0000000000 --- a/sn_cli/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# sn_cli - -This directory contains the `safe` client binary. It is used to interact with the Safe Network and provides a range of commands for managing data, keys, wallets, and more. - -The `safe` binary includes the following subcommands: - -- `wallet`: Commands for wallet management. This includes creating wallets, checking balances, and making transactions. -- `files`: Commands for file management. This includes uploading, downloading, and deleting files. -- `register`: Commands for register management. This includes creating, reading, and writing to registers. diff --git a/sn_cli/benches/files.rs b/sn_cli/benches/files.rs deleted file mode 100644 index cece183f5c..0000000000 --- a/sn_cli/benches/files.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use criterion::{criterion_group, criterion_main, Criterion, Throughput}; -use rand::{thread_rng, Rng}; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use std::{ - fs::File, - io::Write, - path::{Path, PathBuf}, - process::{exit, Command}, - time::Duration, -}; -use tempfile::tempdir; - -const SAMPLE_SIZE: usize = 20; - -// This procedure includes the client startup, which will be measured by criterion as well. -// As normal user won't care much about initial client startup, -// but be more alerted on communication speed during transmission. -// It will be better to execute bench test with `local`, -// to make the measurement results reflect speed improvement or regression more accurately. -fn safe_files_upload(dir: &str) { - let output = Command::new("./target/release/safe") - .arg("files") - .arg("upload") - .arg(dir) - .arg("--retry-strategy") // no retries - .arg("quick") - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - let err = output.stderr; - let err_string = String::from_utf8(err).expect("Failed to parse error string"); - panic!("Upload command executed with failing error code: {err_string:?}"); - } -} - -fn safe_files_download() { - let output = Command::new("./target/release/safe") - .arg("files") - .arg("download") - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - let err = output.stderr; - let err_string = String::from_utf8(err).expect("Failed to parse error string"); - panic!("Download command executed with failing error code: {err_string:?}"); - } -} - -fn generate_file(path: &PathBuf, file_size_mb: usize) { - let mut file = File::create(path).expect("Failed to create file"); - let mut rng = thread_rng(); - - // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks - let n_small_chunks = file_size_mb * 1024 * 32; - for _ in 0..n_small_chunks { - let random_data: [u8; 32] = rng.gen(); - file.write_all(&random_data) - .expect("Failed to write to file"); - } - let size = file.metadata().expect("Failed to get metadata").len() as f64 / (1024 * 1024) as f64; - assert_eq!(file_size_mb as f64, size); -} - -fn fund_cli_wallet() { - let _ = Command::new("./target/release/safe") - .arg("wallet") - .arg("get-faucet") - .arg("127.0.0.1:8000") - .output() - .expect("Failed to execute 'safe wallet get-faucet' command"); -} - -fn criterion_benchmark(c: &mut Criterion) { - // Check if the binary exists - if !Path::new("./target/release/safe").exists() { - eprintln!("Error: Binary ./target/release/safe does not exist. Please make sure to compile your project first"); - exit(1); - } - - let sizes: [u64; 2] = [1, 10]; // File sizes in MB. Add more sizes as needed - - for size in sizes.iter() { - let temp_dir = tempdir().expect("Failed to create temp dir"); - let temp_dir_path = temp_dir.into_path(); - let temp_dir_path_str = temp_dir_path.to_str().expect("Invalid unicode encountered"); - - // create 23 random files. This is to keep the benchmark results consistent with prior runs. The change to make - // use of ChunkManager means that we don't upload the same file twice and the `uploaded_files` file is now read - // as a set and we don't download the same file twice. Hence create 23 files as counted from the logs - // pre ChunkManager change. - (0..23).into_par_iter().for_each(|idx| { - let path = temp_dir_path.join(format!("random_file_{size}_mb_{idx}")); - generate_file(&path, *size as usize); - }); - fund_cli_wallet(); - - // Wait little bit for the fund to be settled. - std::thread::sleep(Duration::from_secs(10)); - - let mut group = c.benchmark_group(format!("Upload Benchmark {size}MB")); - group.sampling_mode(criterion::SamplingMode::Flat); - // One sample may compose of multiple iterations, and this is decided by `measurement_time`. - // Set this to a lower value to ensure each sample only contains one iteration. - // To ensure the download throughput calculation is correct. - group.measurement_time(Duration::from_secs(5)); - group.warm_up_time(Duration::from_secs(5)); - group.sample_size(SAMPLE_SIZE); - - // Set the throughput to be reported in terms of bytes - group.throughput(Throughput::Bytes(size * 1024 * 1024)); - let bench_id = format!("safe files upload {size}mb"); - group.bench_function(bench_id, |b| { - b.iter(|| safe_files_upload(temp_dir_path_str)) - }); - group.finish(); - } - - let mut group = c.benchmark_group("Download Benchmark".to_string()); - group.sampling_mode(criterion::SamplingMode::Flat); - group.measurement_time(Duration::from_secs(10)); - group.warm_up_time(Duration::from_secs(5)); - - // The download will download all uploaded files during bench. - // If the previous bench executed with the default 100 sample size, - // there will then be around 1.1GB in total, and may take around 40s for each iteratioin. - // Hence we have to reduce the number of iterations from the default 100 to 10, - // To avoid the benchmark test taking over one hour to complete. - // - // During `measurement_time` and `warm_up_time`, there will be one upload run for each. - // Which means two additional `uploaded_files` created and for downloading. - let total_size: u64 = sizes - .iter() - .map(|size| (SAMPLE_SIZE as u64 + 2) * size) - .sum(); - group.sample_size(SAMPLE_SIZE / 2); - - // Set the throughput to be reported in terms of bytes - group.throughput(Throughput::Bytes(total_size * 1024 * 1024)); - let bench_id = "safe files download".to_string(); - group.bench_function(bench_id, |b| b.iter(safe_files_download)); - group.finish(); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/sn_cli/src/acc_packet.rs b/sn_cli/src/acc_packet.rs deleted file mode 100644 index a9430e3449..0000000000 --- a/sn_cli/src/acc_packet.rs +++ /dev/null @@ -1,1603 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod change_tracking; - -use change_tracking::*; - -use super::{ - files::{download_file, FilesUploader}, - ChunkManager, -}; - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - protocol::storage::{Chunk, RegisterAddress, RetryStrategy}, - registers::EntryHash, - transfers::{DerivationIndex, MainSecretKey}, - Client, FilesApi, FolderEntry, FoldersApi, Metadata, UploadCfg, WalletClient, -}; - -use bls::PublicKey; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use std::{ - collections::{ - btree_map::{Entry, OccupiedEntry}, - BTreeMap, - }, - ffi::OsString, - fs::{create_dir_all, remove_dir_all, remove_file, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tokio::task::JoinSet; -use tracing::trace; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// Derivation index used to obtain the account packet root folder xorname -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_ADDR_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x0; 32]); - -/// Derivation index used to obtain the owner key of the account packet root folder. -/// The derived key pair is used to: -/// - Sign all data operations sent to the network. -/// - Set it as the owner of all Folders (Registers) created on the network. -/// - Encrypt all the Folders entries metadata chunks. -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_OWNER_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x1; 32]); - -/// An `AccountPacket` object allows users to store and manage files, wallets, etc., with the ability -/// and tools necessary to keep an instance tracking a local storage path, as well as keeping it in sync -/// with its remote version stored on the network. -/// A `Client` and a the location for a funded local hot-wallet are required by this object in order to be able to connect -/// to the network, paying for data storage, and upload/retrieve information to/from the network. -/// -/// TODO: currently only files and folders are supported, wallets, keys, etc., to be added later. -/// -/// TODO: make use of eip2333 paths for deriving keys. Currently keys used for encrypting and signing -/// operations are derived from the root key provided using index derivation. -/// -/// The `AccountPacket` keeps a reference to the network address of the root Folder holding the user's -/// files/folder hierarchy. All tracking information is kept under the `.safe` directory on disk, whose -/// content is not uploaded to the network, but only kept locally in order to realise which files/dirs -/// the user has made changes on compared to their last version retrieved from the network. -/// -/// A subdirectory called `metadata` is kept under `.safe` directory with the following files: -/// - A file named `root_folder.addr` which contains the network address where the root Folder is stored, -/// which is the one holding the entire hierarchy of user's files/dirs to be kept in sync with local changes -/// made by the user. -/// - For each of the user's files/dirs, a serialised `MetadataTrackingInfo` instance is stored on using the -/// file/dir metadata chunk xorname as filename. The information stored in these files are used to realise -/// if changes were locally made by the user in comparison with the last version of such files/dirs retrieved -/// from the network. -/// Example of files generated within an account-packet to keep track of changes makde to user's files/dirs: -/// -/// ./my-acc-packet -/// ├── my_dir_1 -/// ├── my_file.txt -/// ├── my_dir_2 -/// │ ├── other_dir -/// │ └── my_other_file.txt -/// └── .safe -/// ├── chunk_artifacts -/// │ ├── ... -/// │ ... -/// ├── metadata -/// │ ├── 082cc90c900fa08d36067246a1e6136a828f1aae4926268c4349c200d56e34b9 -/// │ ├── 102c5536a10682bc3cdd4a1915fe2ad5e839cb94d0d3f124d0c18aee1d49ce50 -/// │ ├── 31824937c47a979df64af591f2e43f76190e65af835c4b338cbe7a7ba3f7d3cb -/// │ ├── 36778e471083140bc111677e2a86e49f4c0c20bc14ff2ad610e22615b72260b8 -/// │ ├── 3edd953cc320449e09b69b7b1b909a53874ee477f602f1a807dfd8057378367e -/// │ └── root_folder.addr -/// └── uploaded_files -/// ├── ... -/// ... -/// -/// There are other files which are stored under `.safe/chunk_artifacts` and `.safe/uploaded_files` directories -/// which are managed by the `ChunkManager` in order to locally cache chunked files, and a list of files -/// already uploaded to the network, to prevent from chunking and/or uploading the same files again. For more -/// details about these files, please refer to the `ChunkManager` module. -pub struct AccountPacket { - client: Client, - wallet_dir: PathBuf, - files_dir: PathBuf, - meta_dir: PathBuf, - tracking_info_dir: PathBuf, - curr_tracking_info: BTreeMap, - root_folder_addr: RegisterAddress, - root_folder_created: bool, -} - -impl AccountPacket { - /// Initialise directory as a fresh new packet. - /// All keys used for encrypting the files/folders metadata chunks and signing - /// operations are derived from the root key provided using index derivation. - /// The root Folder address and owner are also derived from the root SK. - /// A password can be optionally provided to encrypt the root SK before storing it on disk. - pub fn init( - client: Client, - wallet_dir: &Path, - path: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - ) -> Result { - let (_, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - - // If there is already some tracking info we bail out as this is meant ot be a fresh new packet. - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - bail!( - "The local path {path:?} is already being tracked with Folder address: {}", - addr.to_hex() - ); - } - - let (client, root_folder_addr) = derive_keys_and_address(client, root_sk); - store_root_folder_tracking_info(&meta_dir, root_folder_addr, false)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - Self::from_path(client, wallet_dir, path, password) - } - - /// Create AccountPacket instance from a directory which has been already initialised. - pub fn from_path( - client: Client, - wallet_dir: &Path, - path: &Path, - password: Option<&[u8]>, - ) -> Result { - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - let root_sk = read_root_sk(&tracking_info_dir, password)?; - let (client, root_folder_addr) = derive_keys_and_address(client, &root_sk); - - // this will fail if the directory was not previously initialised with 'init'. - let curr_tracking_info = read_tracking_info_from_disk(&meta_dir)?; - let (read_folder_addr, root_folder_created) = read_root_folder_addr(&meta_dir) - .map_err(|_| eyre!("Root Folder address not found, make sure the directory {path:?} is initialised."))?; - if read_folder_addr != root_folder_addr { - bail!( - "The path is already tracking another Folder with address: {}", - read_folder_addr.to_hex() - ); - } - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info, - root_folder_addr, - root_folder_created, - }) - } - - /// Return the address of the root Folder - pub fn root_folder_addr(&self) -> RegisterAddress { - self.root_folder_addr - } - - /// Retrieve and store entire Folders hierarchy from the network, generating tracking info. - pub async fn retrieve_folders( - client: &Client, - wallet_dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - download_path: &Path, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - create_dir_all(download_path)?; - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(download_path)?; - - let (client, root_folder_addr) = derive_keys_and_address(client.clone(), root_sk); - - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - // bail out if there is already a root folder address different from the passed in - if addr == root_folder_addr { - bail!("The download path is already tracking that Folder, use 'sync' instead."); - } else { - bail!( - "The download path is already tracking another Folder with address: {}", - addr.to_hex() - ); - } - } else { - store_root_folder_tracking_info(&meta_dir, root_folder_addr, true)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - } - - let mut acc_packet = Self { - client: client.clone(), - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info: BTreeMap::default(), - root_folder_addr, - root_folder_created: true, - }; - - let folder_name: OsString = download_path.file_name().unwrap_or_default().into(); - let folders_api = - FoldersApi::retrieve(client.clone(), wallet_dir, root_folder_addr).await?; - let folders_to_download = vec![(folder_name, folders_api, download_path.to_path_buf())]; - - let _ = acc_packet - .download_folders_and_files(folders_to_download, batch_size, retry_strategy) - .await?; - - acc_packet.curr_tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - - Ok(acc_packet) - } - - /// Generate a report with differences found in local files/folders in comparison with their versions stored on the network. - pub fn status(&self) -> Result<()> { - println!("Looking for local changes made to files/folders compared to version on network at: {} ...", self.root_folder_addr().to_hex()); - let changes = self.scan_files_and_folders_for_changes(false)?; - - if changes.mutations.is_empty() { - println!("No local changes made to files/folders."); - } else { - println!("Local changes made to files/folders:"); - changes.mutations.iter().for_each(|m| println!("{m}")); - - let num_of_changes = changes.mutations.len(); - println!("\nChanges found to local files/folders: {num_of_changes}"); - } - Ok(()) - } - - /// Sync local changes made to files and folder with their version on the network, - /// both pushing and pulling changes to/form the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg, make_data_public: bool) -> Result<()> { - let ChangesToApply { folders, mutations } = - self.scan_files_and_folders_for_changes(make_data_public)?; - - if mutations.is_empty() { - println!("No local changes made to files/folders to be pushed to network."); - } else { - println!("Local changes made to files/folders to be synced with network:"); - mutations.iter().for_each(|m| println!("{m}")); - } - - println!("Paying for folders hierarchy and uploading..."); - let synced_folders = self - .pay_and_sync_folders(folders, upload_cfg, make_data_public) - .await?; - - // mark root folder as created if it wasn't already - if !self.root_folder_created { - self.root_folder_created = true; - store_root_folder_tracking_info( - &self.meta_dir, - self.root_folder_addr, - self.root_folder_created, - )?; - } - - // update tracking information based on mutations detected locally - for mutation in mutations { - match mutation { - Mutation::NewFile(tracking_info) | Mutation::NewFolder(tracking_info) => { - self.store_tracking_info(tracking_info)?; - } - Mutation::FileRemoved((_, meta_xorname)) - | Mutation::FolderRemoved((_, meta_xorname)) => { - self.remove_tracking_info(meta_xorname); - } - Mutation::FileContentChanged((meta_xorname, tracking_info)) => { - self.store_tracking_info(tracking_info)?; - self.remove_tracking_info(meta_xorname); - } - } - } - - // download files/folders which are new in the synced folders - let folders_to_download: Vec<_> = synced_folders - .iter() - .map(|(path, (folders_api, _))| { - let folder_name: OsString = path.file_name().unwrap_or_default().into(); - (folder_name, folders_api.clone(), path.clone()) - }) - .collect(); - let mut updated_folders = self - .download_folders_and_files( - folders_to_download, - upload_cfg.batch_size, - upload_cfg.retry_strategy, - ) - .await?; - - // Now let's check if any file/folder was removed remotely so we remove them locally from disk. - // We do it in two phases, first we get rid of all dirs that were removed, then we go through - // the files, this is to make sure we remove files which belong to nested folders being removed. - let mut curr_tracking_info = read_tracking_info_from_disk(&self.meta_dir)?; - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::Folder(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::File(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - - self.curr_tracking_info = curr_tracking_info; - - Ok(()) - } - - // Private helpers - - // Generate the path relative to the user's root folder - fn get_relative_path(&self, path: &Path) -> Result { - let relative_path = path - .to_path_buf() - .canonicalize()? - .strip_prefix(&self.files_dir)? - .to_path_buf(); - Ok(relative_path) - } - - // Store tracking info in a file to keep track of any changes made to the source file/folder - fn store_tracking_info( - &self, - MetadataTrackingInfo { - file_path, - meta_xorname, - metadata, - entry_hash, - }: MetadataTrackingInfo, - ) -> Result<()> { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - let mut meta_file = File::create(metadata_file_path)?; - - let tracking_info = MetadataTrackingInfo { - // we store the relative path so the root folder can be moved to - // different locations/paths if desired by the user. - file_path: self.get_relative_path(&file_path)?, - meta_xorname, - metadata, - entry_hash, - }; - - meta_file.write_all(&rmp_serde::to_vec(&tracking_info)?)?; - - Ok(()) - } - - // Remove tracking information file for given xorname - fn remove_tracking_info(&self, meta_xorname: XorName) { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - if let Err(err) = remove_file(&metadata_file_path) { - println!("Failed to remove tracking info file {metadata_file_path:?}: {err}"); - } - } - - // If the file/folder referenced by the tracking info provided is not part of the passed Folders - // hierarchy, remove it from local disk along with its tracking information. - // Returns whether the file/folder was removed. - fn remove_tracking_if_not_found_in_folders( - &self, - tracking_info: &MetadataTrackingInfo, - folders: &mut Folders, - ) -> bool { - let mut removed = false; - let abs_path = self.files_dir.join(&tracking_info.file_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_dir_all(&abs_path) { - trace!("Failed to remove directory {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - folders.remove(&abs_path); - removed = true; - } - } - } - FolderEntry::File(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_file(&abs_path) { - // this is expected if parent folder was just removed as part of this syncing flow. - trace!("Failed to remove file {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - } - } - - removed - } - - // Scan existing files and folders on disk, generating a report of all the detected - // changes based on the tracking info kept locally. - // If make_data_public is false the metadata chunks are encrypted. - fn scan_files_and_folders_for_changes(&self, make_data_public: bool) -> Result { - // we don't use the local cache in order to realise of any changes made to files content. - let mut chunk_manager = ChunkManager::new(&self.tracking_info_dir); - chunk_manager.chunk_with_iter(self.iter_only_files(), false, false)?; - - let encryption_pk = if make_data_public { - None - } else { - // we pass down the key to encrypt the metadata chunk of any new content detected. - Some(self.client.signer_pk()) - }; - - let mut changes = self.read_folders_hierarchy_from_disk(encryption_pk)?; - - // add chunked files to the corresponding Folders - let folders = &mut changes.folders; - for chunked_file in chunk_manager.iter_chunked_files() { - let file_path = &chunked_file.file_path; - if let Some(Entry::Occupied(mut parent_folder)) = file_path - .parent() - .map(|parent| folders.entry(parent.to_path_buf())) - { - // try to find the tracking info of the file/folder by its name - match self.get_tracking_info(file_path) { - Ok(Some(tracking_info)) => match &tracking_info.metadata.content { - FolderEntry::File(chunk) => { - if chunk.address() != &chunked_file.head_chunk_address { - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - - changes.mutations.push(Mutation::FileContentChanged(( - tracking_info.meta_xorname, - MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - }, - ))); - } - } - FolderEntry::Folder(_) => { - // New file found where there used to be a folder - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - }, - Ok(None) => { - let (entry_hash, meta_xorname, metadata) = - parent_folder.get_mut().0.add_file( - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - parent_folder.get_mut().1.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - Err(err) => { - println!("Skipping file {file_path:?}: {err:?}"); - } - } - } - } - - // now let's check if any file/folder was removed from disk - for (item_path, tracking_info) in self.curr_tracking_info.iter() { - let abs_path = self.files_dir.join(item_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - if !folders.contains_key(&abs_path) { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FolderRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - FolderEntry::File(_) => { - if chunk_manager - .iter_chunked_files() - .all(|chunked_file| chunked_file.file_path != abs_path) - { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FileRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - } - } - - Ok(changes) - } - - // Build Folders hierarchy from the set files dir. The metadata chunk of every new folder - // will be encrpyted if an encrpytion key has been provided. - fn read_folders_hierarchy_from_disk( - &self, - encryption_pk: Option, - ) -> Result { - let mut changes = ChangesToApply::default(); - for (dir_path, depth, parent, dir_name) in self.iter_only_dirs().filter_map(|entry| { - entry.path().parent().map(|parent| { - ( - entry.path().to_path_buf(), - entry.depth(), - parent.to_owned(), - entry.file_name().to_owned(), - ) - }) - }) { - let (folder, folder_change) = changes - .folders - .entry(dir_path.clone()) - .or_insert(self.find_folder_in_tracking_info(&dir_path)?) - .clone(); - let curr_folder_addr = *folder.address(); - - if depth > 0 { - let (parent_folder, parent_folder_change) = changes - .folders - .entry(parent.clone()) - .or_insert(self.find_folder_in_tracking_info(&parent)?); - - if folder_change.is_new_folder() { - let (entry_hash, meta_xorname, metadata) = - parent_folder.add_folder(dir_name, curr_folder_addr, encryption_pk)?; - parent_folder_change.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFolder(MetadataTrackingInfo { - file_path: dir_path, - meta_xorname, - metadata, - entry_hash, - })); - } - } - } - - Ok(changes) - } - - // Read local tracking info for given file/folder item - fn get_tracking_info(&self, path: &Path) -> Result> { - let path = self.get_relative_path(path)?; - Ok(self.curr_tracking_info.get(&path)) - } - - // Instantiate a FolderApi based on local tracking info for given folder item - fn find_folder_in_tracking_info(&self, path: &Path) -> Result<(FoldersApi, FolderChange)> { - let mut folder_change = FolderChange::NewFolder; - let address = if path == self.files_dir { - if self.root_folder_created { - folder_change = FolderChange::NoChange; - } - Some(self.root_folder_addr) - } else { - self.get_tracking_info(path)?.and_then(|tracking_info| { - match tracking_info.metadata.content { - FolderEntry::Folder(addr) => { - folder_change = FolderChange::NoChange; - Some(addr) - } - FolderEntry::File(_) => None, - } - }) - }; - - let folders_api = FoldersApi::new(self.client.clone(), &self.wallet_dir, address)?; - Ok((folders_api, folder_change)) - } - - // Creates an iterator over the user's dirs names, excluding the '.safe' tracking dir - fn iter_only_dirs(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_dir() && e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - } - - // Creates an iterator over the user's file, excluding the tracking files under '.safe' dir - fn iter_only_files(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_file() || e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - .filter(|e| e.file_type().is_file()) - } - - // Pay and upload all the files and folder. - async fn pay_and_sync_folders( - &self, - folders: Folders, - upload_cfg: UploadCfg, - make_data_public: bool, - ) -> Result { - let files_uploader = FilesUploader::new(self.client.clone(), self.wallet_dir.clone()) - .set_upload_cfg(upload_cfg) - .set_make_data_public(make_data_public) - .insert_entries(self.iter_only_files()); - let _summary = files_uploader.start_upload().await?; - - // Let's make the storage payment for Folders - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(self.client.clone(), wallet); - let mut net_addresses = vec![]; - let mut new_folders = 0; - // let's collect list of addresses we need to pay for - folders.iter().for_each(|(_, (folder, folder_change))| { - if folder_change.is_new_folder() { - net_addresses.push(folder.as_net_addr()); - new_folders += 1; - } - net_addresses.extend(folder.meta_addrs_to_pay()); - }); - - let payment_result = wallet_client - .pay_for_storage(net_addresses.into_iter()) - .await?; - match payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - { - Some(cost) => { - let balance = wallet_client.balance(); - println!("Made payment of {cost} for {new_folders} Folders. New balance: {balance}",) - } - None => bail!("Failed to calculate total payment cost"), - } - - // Sync Folders concurrently now that payments have been made. - let mut tasks = JoinSet::new(); - for (path, (mut folder, folder_change)) in folders { - let op = if folder_change.is_new_folder() { - "Creation" - } else { - "Syncing" - }; - - tasks.spawn(async move { - match folder.sync(upload_cfg).await { - Ok(()) => { - println!( - "{op} of Folder (for {path:?}) succeeded. Address: {}", - folder.address().to_hex() - ); - } - Err(err) => { - println!("{op} of Folder (for {path:?}) failed: {err}") - } - } - (path, folder, folder_change) - }); - } - - let mut synced_folders = Folders::new(); - while let Some(res) = tasks.join_next().await { - match res { - Ok((path, folder, c)) => { - synced_folders.insert(path, (folder, c)); - } - Err(err) => { - println!("Failed to sync/create a Folder with/on the network: {err:?}"); - } - } - } - - Ok(synced_folders) - } - - // Download a Folders and their files from the network and generate tracking info - async fn download_folders_and_files( - &self, - mut folders_to_download: Vec<(OsString, FoldersApi, PathBuf)>, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - let mut files_to_download = vec![]; - let mut updated_folders = Folders::new(); - while let Some((name, mut folders_api, target_path)) = folders_to_download.pop() { - if updated_folders.contains_key(&target_path) { - // we've already downloaded this Folder - continue; - } - - println!( - "Downloading Folder {name:?} from {}", - folders_api.address().to_hex() - ); - self.download_folder_from_network( - &target_path, - &mut folders_api, - &mut files_to_download, - &mut folders_to_download, - ) - .await?; - updated_folders.insert(target_path, (folders_api, FolderChange::NoChange)); - } - - let files_api: FilesApi = FilesApi::new(self.client.clone(), self.files_dir.clone()); - for (file_name, data_map_chunk, path) in files_to_download { - download_file( - files_api.clone(), - *data_map_chunk.name(), - (file_name, Some(data_map_chunk)), - &path, - false, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(updated_folders) - } - - // Download a Folder from the network and generate tracking info - async fn download_folder_from_network( - &self, - target_path: &Path, - folders_api: &mut FoldersApi, - files_to_download: &mut Vec<(OsString, Chunk, PathBuf)>, - folders_to_download: &mut Vec<(OsString, FoldersApi, PathBuf)>, - ) -> Result<()> { - for (entry_hash, (meta_xorname, metadata)) in folders_api.entries().await?.into_iter() { - let name = metadata.name.clone(); - let item_path = target_path.join(name.clone()); - if let Ok(Some(tracking_info)) = self.get_tracking_info(&item_path) { - if tracking_info.meta_xorname == meta_xorname { - // thus we already have this same file/folder locally - continue; - } - } - - match &metadata.content { - FolderEntry::File(data_map_chunk) => { - files_to_download.push(( - name.clone().into(), - data_map_chunk.clone(), - target_path.to_path_buf(), - )); - let _ = File::create(&item_path)?; - } - FolderEntry::Folder(subfolder_addr) => { - let folders_api = FoldersApi::retrieve( - self.client.clone(), - &self.wallet_dir, - *subfolder_addr, - ) - .await?; - - folders_to_download.push((name.clone().into(), folders_api, item_path.clone())); - create_dir_all(&item_path)?; - } - }; - - self.store_tracking_info(MetadataTrackingInfo { - file_path: item_path, - meta_xorname, - metadata, - entry_hash, - })?; - } - - Ok(()) - } -} - -// Given an absolute path, find the Folder containing such item, and remove it from its entries. -fn remove_from_parent(folders: &mut Folders, path: &Path, entry_hash: EntryHash) -> Result<()> { - if let Some((parent_folder, folder_change)) = path.parent().and_then(|p| folders.get_mut(p)) { - folder_change.has_new_entries(); - parent_folder.remove_item(entry_hash)?; - } - Ok(()) -} - -// Replace a file/folder item from a given Folder (passed in as a container's OccupiedEntry'). -// The metadata chunk of the new item (folder/file) will be encrpyted if a key has been provided. -fn replace_item_in_folder( - folder: &mut OccupiedEntry<'_, PathBuf, (FoldersApi, FolderChange)>, - entry_hash: EntryHash, - file_name: OsString, - data_map: Chunk, - encryption_pk: Option, -) -> Result<(EntryHash, XorName, Metadata)> { - let (ref mut folders_api, ref mut folder_change) = folder.get_mut(); - folder_change.has_new_entries(); - let res = folders_api.replace_file( - entry_hash, - file_name.clone(), - data_map.clone(), - encryption_pk, - )?; - Ok(res) -} - -// Search for a file/folder item in its parent Folder by its name, returning its metadata chunk xorname. -fn find_by_name_in_parent_folder(name: &str, path: &Path, folders: &Folders) -> Option { - path.parent() - .and_then(|parent| folders.get(parent)) - .and_then(|(folder, _)| folder.find_by_name(name)) - .map(|(meta_xorname, _)| *meta_xorname) -} - -// Using the provided root SK, derive client signer SK and the root Folder address from it. -// It returns the Client updated with the derived signing key set, along with the derived Register address. -// TODO: use eip2333 path for deriving keys and address. -fn derive_keys_and_address( - mut client: Client, - root_sk: &MainSecretKey, -) -> (Client, RegisterAddress) { - // Set the client signer SK as a derived key from the root key. This will - // be used for signing operations and also for encrypting metadata chunks. - let signer_sk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key(); - client.set_signer_key(signer_sk); - - // Derive a key from the root key to generate the root Folder xorname, and use - // the client signer's corresponding PK as the owner of it. - let derived_pk = root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key(); - let root_folder_addr = RegisterAddress::new( - XorName::from_content(&derived_pk.to_bytes()), - client.signer_pk(), - ); - - (client, root_folder_addr) -} - -#[cfg(test)] -mod tests { - // All tests require a network running so Clients can be instantiated. - - use crate::acc_packet::{ - derive_keys_and_address, RECOVERY_SEED_FILENAME, SAFE_TRACKING_CHANGES_DIR, - }; - - use super::{ - read_root_folder_addr, read_tracking_info_from_disk, AccountPacket, Metadata, - MetadataTrackingInfo, Mutation, ACC_PACKET_ADDR_DERIVATION_INDEX, - ACC_PACKET_OWNER_DERIVATION_INDEX, - }; - use rand::{thread_rng, Rng}; - use sn_client::{ - protocol::storage::{Chunk, RetryStrategy}, - registers::{EntryHash, RegisterAddress}, - test_utils::{get_funded_wallet, get_new_client, random_file_chunk}, - transfers::MainSecretKey, - FolderEntry, UploadCfg, BATCH_SIZE, - }; - - use bls::SecretKey; - use bytes::Bytes; - use eyre::{bail, eyre, Result}; - use std::{ - collections::{BTreeMap, BTreeSet}, - fs::{create_dir_all, remove_dir_all, remove_file, File, OpenOptions}, - io::{Read, Write}, - path::{Path, PathBuf}, - }; - use xor_name::XorName; - - const SYNC_OPTS: (UploadCfg, bool) = { - let cfg = UploadCfg { - verify_store: true, - batch_size: BATCH_SIZE, - retry_strategy: RetryStrategy::Quick, - show_holders: false, - max_repayments_for_failed_data: 1, - collect_registers: false, - }; - let make_data_public = false; - (cfg, make_data_public) - }; - - #[tokio::test] - async fn test_acc_packet_private_helpers() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let files_path = tmp_dir.path().join("myfiles"); - create_dir_all(&files_path)?; - - let owner_pk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key() - .public_key(); - let xorname = XorName::from_content( - &root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key() - .to_bytes(), - ); - let expected_folder_addr = RegisterAddress::new(xorname, owner_pk); - - let acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - assert_eq!( - derive_keys_and_address(client, &root_sk).1, - expected_folder_addr - ); - assert_eq!(acc_packet.root_folder_addr(), expected_folder_addr); - - let mut test_files = create_test_files_on_disk(&files_path)?; - let mut rng = rand::thread_rng(); - let dummy_metadata = Metadata { - name: "dummy".to_string(), - content: FolderEntry::File(Chunk::new(Bytes::new())), - }; - for (relative_path, _) in test_files.iter() { - let abs_path = files_path.join(relative_path); - - // test helper which calculates relative paths based on root files dir of acc packet - assert!( - matches!(acc_packet.get_relative_path(&abs_path), Ok(p) if &p == relative_path), - "AccountPacket::get_relative_path helper returned invalid path" - ); - - // let's test helper to store tracking info - // use just dummy/invalid metadata and meta-xorname since we won't verify it - let meta_xorname = XorName::random(&mut rng); - acc_packet.store_tracking_info(MetadataTrackingInfo { - file_path: abs_path, - meta_xorname, - metadata: dummy_metadata.clone(), - entry_hash: EntryHash::default(), - })?; - assert!(acc_packet.meta_dir.join(hex::encode(meta_xorname)).exists()); - } - - // let's test helpers to read and remove tracking info - let tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - assert_eq!(tracking_info.len(), test_files.len()); - for (abs_path, info) in tracking_info.iter() { - assert!(test_files.remove(abs_path).is_some()); - acc_packet.remove_tracking_info(info.meta_xorname); - assert!(!acc_packet - .meta_dir - .join(hex::encode(info.meta_xorname)) - .exists()); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_from_empty_dir() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacketempty"); - create_dir_all(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - // let's sync up with the network from the original empty account packet - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpacketempty-clone"); - let cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's verify both the original and cloned packets are empty - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_upload_download() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket"); - let expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - - let downloaded_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - check_files_and_dirs_match(&acc_packet, &downloaded_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &downloaded_acc_packet, expected_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_scan_files_and_folders_changes() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-to-scan"); - let mut test_files = create_test_files_on_disk(&files_path)?; - let files_path = files_path.canonicalize()?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify changes detected - assert_eq!(changes.mutations.len(), 4); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir1")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir2")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make some mutations/changes - mutate_test_files_on_disk(&files_path, &mut test_files)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify new changes detected - assert_eq!(changes.mutations.len(), 8); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::FileRemoved((p, _)) if p == &files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::FolderRemoved((p,_)) if p == &files_path.join("dir2")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3").join("dir3_1")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir3").join("dir3_1").join("file3.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4").join("dir4_1")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - Ok(()) - } - - #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] - #[tokio::test] - async fn test_acc_packet_sync_mutations() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpackettosync"); - let mut expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpackettosync-clone"); - let mut cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's make mutations to the clone: - mutate_test_files_on_disk(&clone_files_path, &mut expected_files)?; - - // and finally, sync the clone up with the network - cloned_acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's sync up with the network from the original account packet to merge - // changes made earlier from the cloned version - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's verify both the original and cloned packets contain the same content - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, expected_files)?; - - Ok(()) - } - - // Acc-packets can be moved to different locations on local disk without affecting their tracking info. - // We disable this test for Windows since in CI the use of std::fs::rename gives a permissions issue. - #[cfg(any(target_os = "linux", target_os = "linux"))] - #[tokio::test] - async fn test_acc_packet_moved_folder() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket-to-move"); - let mut test_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make just one mutation before moving the dir to another disk location - let new_chunk = random_file_chunk(); - let file2modify = Path::new("dir1").join("file1.txt"); - OpenOptions::new() - .write(true) - .open(src_files_path.join(&file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify, Some(new_chunk)); - - // let's now move it to another disk location - let moved_files_path = tmp_dir.path().join("myaccpacket-moved"); - create_dir_all(&moved_files_path)?; - std::fs::rename(src_files_path, &moved_files_path)?; - let moved_files_path = moved_files_path.canonicalize()?; - - let moved_acc_packet = - AccountPacket::from_path(client.clone(), wallet_dir, &moved_files_path, None)?; - - // verify only one change is detected still after moved to another location on disk - let changes = moved_acc_packet.scan_files_and_folders_for_changes(false)?; - assert_eq!(changes.mutations.len(), 1); - assert_eq!(changes.mutations.first().map(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == moved_files_path.join("dir1").join("file1.txt")) - }), Some(true)); - - check_tracking_info_match(&moved_acc_packet, &moved_acc_packet, test_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_derived_address() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-unencrypted-metadata"); - let _ = create_test_files_on_disk(&files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // try to download Folder with a different root SK should fail since it - // will derive a different addresse than the one used for creating it - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - let other_root_sk = MainSecretKey::random(); - - if AccountPacket::retrieve_folders( - &client, - wallet_dir, - &other_root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await - .is_ok() - { - bail!("acc-packet retrieval succeeded unexpectedly"); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_recovery_seed_encryption() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - - // let's first test with unencrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_unencrypted_seed"); - create_dir_all(&src_files_path)?; - let _ = AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - let _ = AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None)?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert_eq!(bytes, root_sk.to_bytes()); - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(b"123456789"), - ) - .is_ok() - { - bail!("acc-packet loading with a password succeeded unexpectedly"); - } - - // let's now test with encrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_encrypted_seed"); - create_dir_all(&src_files_path)?; - let mut rng = thread_rng(); - let password: [u8; 32] = rng.gen(); - let incorrect_password: [u8; 32] = rng.gen(); - - let _ = AccountPacket::init( - client.clone(), - wallet_dir, - &src_files_path, - &root_sk, - Some(&password), - )?; - - if AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None).is_ok() { - bail!("acc-packet loading without a password succeeded unexpectedly"); - } - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(&incorrect_password), - ) - .is_ok() - { - bail!("acc-packet loading with incorrect password succeeded unexpectedly"); - } - - let _ = - AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, Some(&password))?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert!(!bytes.is_empty()); - assert_ne!(bytes, root_sk.to_bytes()); - - Ok(()) - } - - // Helpers functions to generate and verify test data - - // Create a hard-coded set of test files and dirs on disk - fn create_test_files_on_disk(base_path: &Path) -> Result>> { - // let's create a hierarchy with dirs and files with random content - let mut files = BTreeMap::new(); - files.insert( - Path::new("file0.txt").to_path_buf(), - Some(random_file_chunk()), - ); - files.insert( - Path::new("dir1").join("file1.txt"), - Some(random_file_chunk()), - ); - files.insert(Path::new("dir2").to_path_buf(), None); - - for (path, chunk) in files.iter() { - let full_path = base_path.join(path); - if let Some(chunk) = chunk { - // it's a file, thus we create it and store its chunk bytes - create_dir_all(full_path.parent().expect("invalid path for test file"))?; - let mut file = File::create(full_path)?; - file.write_all(chunk.value())?; - } else { - // it's a dir, and it shall be empty - create_dir_all(full_path)?; - } - } - Ok(files) - } - - // Apply a hard-coded set of mutations to test files and dirs on disk - fn mutate_test_files_on_disk( - path: &Path, - test_files: &mut BTreeMap>, - ) -> Result<()> { - // - modify the content of a file - let new_chunk = random_file_chunk(); - let file2modify = Path::new("file0.txt"); - OpenOptions::new() - .write(true) - .open(path.join(file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify.to_path_buf(), Some(new_chunk)); - // - remove one of the files - let file2remove = Path::new("dir1").join("file1.txt"); - remove_file(path.join(&file2remove))?; - test_files.remove(&file2remove); - // we need to keep the empty dir within the list of expected files though - test_files.insert(Path::new("dir1").to_path_buf(), None); - // - remove one of the dirs - let dir2remove = Path::new("dir2"); - remove_dir_all(path.join(dir2remove))?; - test_files.remove(dir2remove); - // - create new file within subdirs - create_dir_all(path.join("dir3").join("dir3_1"))?; - let file2create = Path::new("dir3").join("dir3_1").join("file3.txt"); - let mut file = File::create(path.join(&file2create))?; - let new_chunk = random_file_chunk(); - file.write_all(new_chunk.value())?; - test_files.insert(file2create, Some(new_chunk)); - // - create new subdirs - let dir2create = Path::new("dir4").join("dir4_1"); - create_dir_all(path.join(&dir2create))?; - test_files.insert(dir2create.to_path_buf(), None); - - Ok(()) - } - - // Helper to check if a dir is empty - fn is_empty_dir(path: &Path) -> bool { - path.read_dir() - .map(|mut i| i.next().is_none()) - .unwrap_or(false) - } - - // Collect list of files and empty dirs, to be used for comparing in tests - fn list_of_files_and_empty_dirs(acc_packet: &AccountPacket) -> BTreeSet { - acc_packet - .iter_only_files() - .chain(acc_packet.iter_only_dirs()) - .flat_map(|file_entry| { - let path = file_entry.path(); - if path.is_dir() && !is_empty_dir(path) { - bail!("we skip non empty dirs"); - } - - acc_packet.get_relative_path(path) - }) - .collect() - } - - // Check both acc packets kept the same set of tracking information locally - fn check_tracking_info_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - let root_addr = src_packet.root_folder_addr(); - assert_eq!( - read_root_folder_addr(&src_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in source directory tracking info." - ); - assert_eq!( - read_root_folder_addr(&target_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in target directory tracking info." - ); - - let src_tracking_info = read_tracking_info_from_disk(&src_packet.meta_dir)?; - let mut target_tracking_info = read_tracking_info_from_disk(&target_packet.meta_dir)?; - - for (path, src_tracking_info) in src_tracking_info { - match target_tracking_info.remove(&path) { - None => { - bail!("Tracking info found in source is missing in target directory for file/dir: {path:?}") - } - Some(info) => { - if info != src_tracking_info { - bail!("Different tracking info kept in source and target for file/dir: {path:?}"); - } - } - } - - let abs_path = src_packet.files_dir.join(&path); - if abs_path.is_dir() { - assert_eq!(src_tracking_info.file_path, path, - "Incorrect path in tracking info found in source and target directories for dir: {path:?}"); - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Incorrect tracking info found in source and target directories for dir: {path:?}"); - // if it's an empty dir we shall find it in the list of expected files - if is_empty_dir(&abs_path) { - let _ = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for dir: {path:?}" - ) - })?; - } - } else { - let chunk = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for file: {path:?}" - ) - })?; - - if chunk.is_some() { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::File(_)), - "Tracking info found in source and target directories don't match the file: {path:?}"); - } else { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Tracking info found in source and target directories don't match the dir: {path:?}"); - } - } - } - - if !target_tracking_info.is_empty() { - bail!("Tracking info found in target directory but missing in source directory: {target_tracking_info:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s are lacking their tracking info in source or target directories: {expected_files:?}"); - } - - Ok(()) - } - - // Check both dirs have the same set of files and folders and no more - fn check_files_and_dirs_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - // let's collect all paths in target acc packet, i.e. files and empty dirs paths - let mut target_packet_files: BTreeSet = - list_of_files_and_empty_dirs(target_packet); - - // let's now compare those paths in target acc packet with those in source acc packet - for relative_path in list_of_files_and_empty_dirs(src_packet) { - if !target_packet_files.remove(&relative_path) { - bail!("File/dir found in source is missing in target directory: {relative_path:?}"); - } - - let src_path = src_packet.files_dir.join(&relative_path); - let target_path = target_packet.files_dir.join(&relative_path); - - let chunk = expected_files.remove(&relative_path).ok_or_else(|| { - eyre!("Unexpected file/dir found on source and target directories: {src_path:?}") - })?; - - if let Some(chunk) = chunk { - // it's a file, let's compare their content - let mut src_file = File::open(&src_path) - .map_err(|err| eyre!("couldn't open source file {src_path:?}: {err:?}"))?; - let mut target_file = File::open(&target_path) - .map_err(|err| eyre!("couldn't open target file {target_path:?}: {err:?}"))?; - - let mut src_content = Vec::new(); - src_file - .read_to_end(&mut src_content) - .expect("couldn't read source file"); - let mut target_content = Vec::new(); - target_file - .read_to_end(&mut target_content) - .expect("couldn't read target file"); - - assert_eq!( - src_content, - chunk.value().slice(..), - "source file content doesn't match with expected" - ); - assert_eq!( - target_content, - chunk.value().slice(..), - "target file content doesn't match with expected" - ); - } else { - // it's a dir, let's check they exist as dirs - assert!(src_path.is_dir(), "source path is not a dir {src_path:?}"); - assert!( - target_path.is_dir(), - "target path is not a dir {target_path:?}" - ); - } - } - - if !target_packet_files.is_empty() { - bail!("File/dir/s found in target directory but missing in source directory: {target_packet_files:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s were not found in source or target directories: {expected_files:?}"); - } - - Ok(()) - } -} diff --git a/sn_cli/src/acc_packet/change_tracking.rs b/sn_cli/src/acc_packet/change_tracking.rs deleted file mode 100644 index a2eba85270..0000000000 --- a/sn_cli/src/acc_packet/change_tracking.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - protocol::storage::RegisterAddress, registers::EntryHash, transfers::MainSecretKey, FoldersApi, - Metadata, -}; - -use aes::Aes256; -use block_modes::{block_padding::Pkcs7, BlockMode, Cbc}; -use bls::{SecretKey, SK_SIZE}; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use walkdir::WalkDir; -use xor_name::XorName; - -// AES used to encrypt/decrypt the cached recovery seed. -type Aes256Cbc = Cbc; - -// AES Initialisation Vector length used. -const IV_LENGTH: usize = 16; - -// Length of buffers used for AES encryption/decryption. -const AES_BUFFER_LENGTH: usize = 48; - -// Name of hidden folder where tracking information and metadata is locally stored. -pub(super) const SAFE_TRACKING_CHANGES_DIR: &str = ".safe"; - -// Subfolder where files metadata will be cached -pub(super) const METADATA_CACHE_DIR: &str = "metadata"; - -// Name of the file where metadata about root folder is locally cached. -pub(super) const ROOT_FOLDER_METADATA_FILENAME: &str = "root_folder.addr"; - -// Name of the file where the recovery secret/seed is locally cached. -pub(crate) const RECOVERY_SEED_FILENAME: &str = "recovery_seed"; - -// Container to keep track in memory what changes are detected in local Folders hierarchy and files. -pub(super) type Folders = BTreeMap; - -// Type of local changes detected to a Folder -#[derive(Clone, Debug, PartialEq)] -pub(super) enum FolderChange { - NoChange, - NewFolder, - NewEntries, -} - -impl FolderChange { - /// Returns true if it's currently set to NewFolder. - pub fn is_new_folder(&self) -> bool { - self == &Self::NewFolder - } - - /// If it's currently set to NoChange then switch it to NewEntries. - /// Otherwise we don't need to change it as the entire Folder will need to be uploaded. - pub fn has_new_entries(&mut self) { - if self == &Self::NoChange { - *self = Self::NewEntries; - } - } -} - -// Changes detected locally which eventually can be applied and upload to network. -#[derive(Default)] -pub(super) struct ChangesToApply { - pub folders: Folders, - pub mutations: Vec, -} - -// Type of mutation detected locally. -#[derive(Debug)] -pub(super) enum Mutation { - NewFile(MetadataTrackingInfo), - FileRemoved((PathBuf, XorName)), - FileContentChanged((XorName, MetadataTrackingInfo)), - NewFolder(MetadataTrackingInfo), - FolderRemoved((PathBuf, XorName)), -} - -impl fmt::Display for Mutation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::NewFile(tracking_info) => { - write!(f, "New file: {:?}", tracking_info.file_path) - } - Self::FileRemoved((path, _)) => write!(f, "File removed: {path:?}"), - Self::FileContentChanged((_, tracking_info)) => { - write!(f, "File content changed: {:?}", tracking_info.file_path) - } - Self::NewFolder(tracking_info) => { - write!(f, "New folder: {:?}", tracking_info.file_path) - } - Self::FolderRemoved((path, _)) => write!(f, "Folder removed: {path:?}"), - } - } -} - -// Information stored locally to keep track of local changes to files/folders. -// TODO: to make file changes discovery more efficient, and prevent chunking for -// such purposes, add more info like file size and last modified timestamp. -#[derive(Debug, Serialize, Deserialize, PartialEq)] -pub(super) struct MetadataTrackingInfo { - pub file_path: PathBuf, - pub meta_xorname: XorName, - pub metadata: Metadata, - pub entry_hash: EntryHash, -} - -// Build absolute paths for the different dirs to be used for locally tracking changes -pub(super) fn build_tracking_info_paths(path: &Path) -> Result<(PathBuf, PathBuf, PathBuf)> { - let files_dir = path.to_path_buf().canonicalize()?; - let tracking_info_dir = files_dir.join(SAFE_TRACKING_CHANGES_DIR); - let meta_dir = tracking_info_dir.join(METADATA_CACHE_DIR); - create_dir_all(&meta_dir) - .map_err(|err| eyre!("The path provided needs to be a directory: {err}"))?; - - Ok((files_dir, tracking_info_dir, meta_dir)) -} - -pub(super) fn read_tracking_info_from_disk( - meta_dir: &Path, -) -> Result> { - let mut curr_tracking_info = BTreeMap::new(); - for entry in WalkDir::new(meta_dir) - .into_iter() - .flatten() - .filter(|e| e.file_type().is_file() && e.file_name() != ROOT_FOLDER_METADATA_FILENAME) - { - let path = entry.path(); - let bytes = std::fs::read(path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err}"))?; - let tracking_info: MetadataTrackingInfo = rmp_serde::from_slice(&bytes) - .map_err(|err| eyre!("Error while deserializing tracking info from {path:?}: {err}"))?; - - curr_tracking_info.insert(tracking_info.file_path.clone(), tracking_info); - } - - Ok(curr_tracking_info) -} - -// Store tracking info about the root folder in a file to keep track of any changes made -pub(super) fn store_root_folder_tracking_info( - meta_dir: &Path, - root_folder_addr: RegisterAddress, - created: bool, -) -> Result<()> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let mut meta_file = File::create(path)?; - meta_file.write_all(&rmp_serde::to_vec(&(root_folder_addr, created))?)?; - - Ok(()) -} - -// Store the given root seed/SK on disk, (optionally) encrypted with a password -pub(super) fn store_root_sk( - dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, -) -> Result<()> { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut secret_file = File::create(path)?; - let seed_bytes = root_sk.to_bytes(); - - if let Some(pwd) = password { - // encrypt the SK with the (hashed) password - let key = encryption_key_from_hashed_password(pwd); - - let pos = seed_bytes.len(); - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..pos].copy_from_slice(&seed_bytes); - - // IV is randomly chosen and prefixed it to cipher - let mut rng = rand::thread_rng(); - let random_iv: [u8; IV_LENGTH] = rng.gen(); - let mut iv_with_cipher = vec![]; - iv_with_cipher.extend(random_iv); - - let cipher = Aes256Cbc::new_from_slices(&key, &random_iv)?; - let ciphertext = cipher.encrypt(&mut buffer, pos)?; - iv_with_cipher.extend(ciphertext); - - secret_file.write_all(&iv_with_cipher)?; - } else { - secret_file.write_all(&seed_bytes)?; - } - - Ok(()) -} - -// Read the root seed/SK from disk, (optionally) decrypting it with a password -pub(super) fn read_root_sk(dir: &Path, password: Option<&[u8]>) -> Result { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut bytes = std::fs::read(&path).map_err(|err| { - eyre!("Error while reading the recovery seed/secret from {path:?}: {err:?}") - })?; - - if let Some(pwd) = password { - // decrypt the SK with the (hashed) password - if bytes.len() < IV_LENGTH + AES_BUFFER_LENGTH { - bail!( - "Not enough bytes found on disk ({}) to decrypt the recovery seed", - bytes.len() - ); - } - - // the IV is prefixed - let mut iv = [0u8; IV_LENGTH]; - iv[..IV_LENGTH].copy_from_slice(&bytes[..IV_LENGTH]); - - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..48].copy_from_slice(&bytes[IV_LENGTH..]); - - let key = encryption_key_from_hashed_password(pwd); - let cipher = Aes256Cbc::new_from_slices(&key, &iv)?; - bytes = cipher - .decrypt_vec(&buffer) - .map_err(|_| eyre!("Failed to decrypt the recovery seed with the provided password"))?; - } - - if bytes.len() != SK_SIZE { - bail!( - "The length of bytes read from disk ({}) doesn't match a recovery seed's length ({SK_SIZE})", bytes.len() - ); - } - let mut seed_bytes = [0u8; SK_SIZE]; - seed_bytes[..SK_SIZE].copy_from_slice(&bytes); - let sk = MainSecretKey::new(SecretKey::from_bytes(seed_bytes)?); - - Ok(sk) -} - -fn encryption_key_from_hashed_password(password: &[u8]) -> [u8; 32] { - let mut key = [0; 32]; - let mut hasher = Sha3::v256(); - hasher.update(password); - hasher.finalize(&mut key); - key -} - -// Read the tracking info about the root folder -pub(super) fn read_root_folder_addr(meta_dir: &Path) -> Result<(RegisterAddress, bool)> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let bytes = std::fs::read(&path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err:?}"))?; - - Ok(rmp_serde::from_slice(&bytes)?) -} diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs deleted file mode 100644 index 2fa931f217..0000000000 --- a/sn_cli/src/bin/main.rs +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[macro_use] -extern crate tracing; - -mod subcommands; - -use subcommands::{ - files::files_cmds, - folders::folders_cmds, - register::register_cmds, - wallet::{ - hot_wallet::{wallet_cmds, wallet_cmds_without_client, WalletCmds}, - wo_wallet::{wo_wallet_cmds, wo_wallet_cmds_without_client, WatchOnlyWalletCmds}, - }, - Opt, SubCmd, -}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::Result; -use indicatif::ProgressBar; -use sn_client::transfers::bls_secret_from_hex; -use sn_client::{Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}; -#[cfg(feature = "metrics")] -use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; -use sn_protocol::version::IDENTIFY_PROTOCOL_STR; -use std::{io, path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; - -const CLIENT_KEY: &str = "clientkey"; - -#[tokio::main] -async fn main() -> Result<()> { - color_eyre::install()?; - let opt = Opt::parse(); - - if opt.version { - println!( - "{}", - sn_build_info::version_string( - "Autonomi CLI", - env!("CARGO_PKG_VERSION"), - Some(&IDENTIFY_PROTOCOL_STR) - ) - ); - return Ok(()); - } - - if opt.crate_version { - println!("{}", env!("CARGO_PKG_VERSION")); - return Ok(()); - } - - if opt.protocol_version { - println!("{}", *IDENTIFY_PROTOCOL_STR); - return Ok(()); - } - - #[cfg(not(feature = "nightly"))] - if opt.package_version { - println!("{}", sn_build_info::package_version()); - return Ok(()); - } - - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("sn_networking".to_string(), Level::INFO), - ("safe".to_string(), Level::TRACE), - ("sn_build_info".to_string(), Level::TRACE), - ("autonomi".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - let _log_handles = log_builder.initialize()?; - - #[cfg(feature = "metrics")] - tokio::spawn(init_metrics(std::process::id())); - - // Log the full command that was run - info!("\"{}\"", std::env::args().collect::>().join(" ")); - - debug!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - println!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - - let client_data_dir_path = get_client_data_dir_path()?; - // Perform actions that do not require us connecting to the network and return early - if let Some(SubCmd::Wallet(cmds)) = &opt.cmd { - if let WalletCmds::Address { .. } - | WalletCmds::Balance { .. } - | WalletCmds::Create { .. } - | WalletCmds::Sign { .. } - | WalletCmds::Status { .. } - | WalletCmds::Encrypt { .. } = cmds - { - wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - if let Some(SubCmd::WatchOnlyWallet(cmds)) = &opt.cmd { - if let WatchOnlyWalletCmds::Addresses - | WatchOnlyWalletCmds::Balance { .. } - | WatchOnlyWalletCmds::Deposit { .. } - | WatchOnlyWalletCmds::Create { .. } - | WatchOnlyWalletCmds::Transaction { .. } = cmds - { - wo_wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - println!("Instantiating a SAFE client..."); - let secret_key = get_client_secret_key(&client_data_dir_path)?; - - let bootstrap_peers = opt.peers.get_peers().await?; - - println!( - "Connecting to the network with {} peers", - bootstrap_peers.len(), - ); - - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - // get the broadcaster as we want to have our own progress bar. - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, progress_bar_handler) = - spawn_connection_progress_bar(broadcaster.subscribe()); - - let result = Client::new( - secret_key, - bootstrap_peers, - opt.connection_timeout, - Some(broadcaster), - ) - .await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - return Err(err.into()); - } - }; - progress_bar_handler.await?; - - let should_verify_store = !opt.no_verify; - - // PowerShell seems having issue to showing the unwrapped error - // Hence capture the result and print it out explicity. - let result = match opt.cmd { - Some(SubCmd::Wallet(cmds)) => { - wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::WatchOnlyWallet(cmds)) => { - wo_wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Files(cmds)) => { - files_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Folders(cmds)) => { - folders_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Register(cmds)) => { - register_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - None => { - println!("Use --help to see available commands"); - return Ok(()); - } - }; - println!("Completed with {result:?}"); - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -fn get_client_secret_key(root_dir: &PathBuf) -> Result { - // create the root directory if it doesn't exist - std::fs::create_dir_all(root_dir)?; - let key_path = root_dir.join(CLIENT_KEY); - let secret_key = if key_path.is_file() { - info!("Client key found. Loading from file..."); - let secret_hex_bytes = std::fs::read(key_path)?; - bls_secret_from_hex(secret_hex_bytes)? - } else { - info!("No key found. Generating a new client key..."); - let secret_key = SecretKey::random(); - std::fs::write(key_path, hex::encode(secret_key.to_bytes()))?; - secret_key - }; - Ok(secret_key) -} - -fn get_client_data_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir().expect("Data directory is obtainable"); - home_dirs.push("safe"); - home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path())?; - Ok(home_dirs) -} - -fn get_stdin_response(prompt: &str) -> String { - println!("{prompt}"); - let mut buffer = String::new(); - let stdin = io::stdin(); - if stdin.read_line(&mut buffer).is_err() { - // consider if error should process::exit(1) here - return "".to_string(); - }; - // Remove leading and trailing whitespace - buffer.trim().to_owned() -} - -fn get_stdin_password_response(prompt: &str) -> String { - rpassword::prompt_password(prompt) - .map(|v| v.trim().to_owned()) - .unwrap_or("".to_string()) -} - -#[cfg(test)] -mod tests { - use crate::subcommands::wallet::hot_wallet::{wallet_cmds_without_client, WalletCmds}; - use crate::subcommands::wallet::WalletApiHelper; - use bls::SecretKey; - use color_eyre::Result; - use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; - use sn_client::transfers::HotWallet; - use std::path::Path; - - fn create_wallet(root_dir: &Path, derivation_passphrase: Option) -> Result { - let mnemonic = load_or_create_mnemonic(root_dir)?; - let secret_key = secret_key_from_mnemonic(mnemonic, derivation_passphrase)?; - let wallet = HotWallet::create_from_key(root_dir, secret_key, None)?; - Ok(wallet) - } - - #[tokio::test] - async fn test_wallet_address_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - // Create wallet - let _wallet = create_wallet(&root_dir, None).expect("Could not create wallet"); - - let cmds = WalletCmds::Address; - - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_address_command_should_fail_with_no_existing_wallet() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let client_data_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Address; - - // Runs command without a wallet being present, thus should fail - let result = wallet_cmds_without_client(&cmds, &client_data_dir).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_wallet_create_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: None, - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_create_command_with_hex_key() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let secret_key = SecretKey::random(); - let secret_key_hex = secret_key.to_hex(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: Some(secret_key_hex), - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - - if let WalletApiHelper::HotWallet(wallet) = result.expect("No valid wallet found") { - // Compare public addresses (secret keys are the same if the public addresses are) - assert_eq!(wallet.address().to_hex(), secret_key.public_key().to_hex()); - } else { - panic!("Did not expect a watch only wallet"); - } - } -} diff --git a/sn_cli/src/bin/subcommands/files.rs b/sn_cli/src/bin/subcommands/files.rs deleted file mode 100644 index 2bc3a26fed..0000000000 --- a/sn_cli/src/bin/subcommands/files.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Help, Result, -}; -use sn_cli::{ - download_file, download_files, ChunkManager, Estimator, FilesUploader, UploadedFile, - UPLOADED_FILES, -}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - UploadCfg, -}; -use sn_client::{Client, FilesApi, BATCH_SIZE}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, -}; -use walkdir::WalkDir; -use xor_name::XorName; - -#[derive(Parser, Debug)] -pub enum FilesCmds { - Estimate { - /// The location of the file(s) to upload. Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - path: PathBuf, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - }, - Upload { - /// The location of the file(s) to upload. - /// - /// Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - file_path: PathBuf, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - Download { - /// The name to apply to the downloaded file. - /// - /// If the name argument is used, the address argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "name")] - file_name: Option, - /// The hex address of a file. - /// - /// If the address argument is used, the name argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "address")] - file_addr: Option, - /// Flagging whether to show the holders of the uploaded chunks. - /// Default to be not showing. - #[clap(long, name = "show_holders", default_value = "false")] - show_holders: bool, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn files_cmds( - cmds: FilesCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FilesCmds::Estimate { - path, - make_data_public, - } => { - let files_api = FilesApi::build(client.clone(), root_dir.to_path_buf())?; - let chunk_manager = ChunkManager::new(root_dir); - Estimator::new(chunk_manager, files_api) - .estimate_cost(path, make_data_public, root_dir) - .await? - } - FilesCmds::Upload { - file_path, - batch_size, - retry_strategy, - make_data_public, - } => { - let files_count = count_files_in_path_recursively(&file_path); - - if files_count == 0 { - if file_path.is_dir() { - bail!( - "The directory specified for upload is empty. \ - Please verify the provided path." - ); - } else { - bail!("The provided file path is invalid. Please verify the path."); - } - } - let upload_cfg = UploadCfg { - batch_size, - verify_store, - retry_strategy, - ..Default::default() - }; - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(make_data_public) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let _summary = files_uploader.start_upload().await?; - } - FilesCmds::Download { - file_name, - file_addr, - show_holders, - batch_size, - retry_strategy, - } => { - if (file_name.is_some() && file_addr.is_none()) - || (file_addr.is_some() && file_name.is_none()) - { - return Err( - eyre!("Both the name and address must be supplied if either are used") - .suggestion( - "Please run the command again in the form 'files download
'", - ), - ); - } - - let mut download_dir = root_dir.to_path_buf(); - let mut download_file_name = file_name.clone(); - if let Some(file_name) = file_name { - // file_name may direct the downloaded data to: - // - // the current directory (just a filename) - // eg safe files download myfile.txt ADDRESS - // - // a directory relative to the current directory (relative filename) - // eg safe files download my/relative/path/myfile.txt ADDRESS - // - // a directory relative to root of the filesystem (absolute filename) - // eg safe files download /home/me/mydir/myfile.txt ADDRESS - let file_name_path = Path::new(&file_name); - if file_name_path.is_dir() { - return Err(eyre!("Cannot download file to path: {:?}", file_name)); - } - let file_name_dir = file_name_path.parent(); - if file_name_dir.is_none() { - // just a filename, use the current_dir - download_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - } else if file_name_path.is_relative() { - // relative to the current directory. Make the relative path - // into an absolute path by joining it to current_dir - if let Some(relative_dir) = file_name_dir { - let current_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - download_dir = current_dir.join(relative_dir); - if !download_dir.exists() { - return Err(eyre!("Directory does not exist: {:?}", download_dir)); - } - if let Some(path_file_name) = file_name_path.file_name() { - download_file_name = Some(OsString::from(path_file_name)); - } - } - } else { - // absolute dir - download_dir = file_name_dir.unwrap_or(root_dir).to_path_buf(); - } - } - let files_api: FilesApi = FilesApi::new(client.clone(), download_dir.clone()); - - match (download_file_name, file_addr) { - (Some(download_file_name), Some(address_provided)) => { - let bytes = - hex::decode(&address_provided).expect("Input address is not a hex string"); - let xor_name_provided = XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - // try to read the data_map if it exists locally. - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let expected_data_map_location = uploaded_files_path.join(address_provided); - let local_data_map = { - if expected_data_map_location.exists() { - let uploaded_file_metadata = - UploadedFile::read(&expected_data_map_location)?; - - uploaded_file_metadata.data_map.map(|bytes| Chunk { - address: ChunkAddress::new(xor_name_provided), - value: bytes, - }) - } else { - None - } - }; - - download_file( - files_api, - xor_name_provided, - (download_file_name, local_data_map), - &download_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await - } - _ => { - println!("Attempting to download all files uploaded by the current user..."); - download_files( - &files_api, - root_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await? - } - } - } - } - Ok(()) -} - -fn count_files_in_path_recursively(file_path: &PathBuf) -> u32 { - let entries_iterator = WalkDir::new(file_path).into_iter().flatten(); - let mut count = 0; - - entries_iterator.for_each(|entry| { - if entry.file_type().is_file() { - count += 1; - } - }); - count -} diff --git a/sn_cli/src/bin/subcommands/folders.rs b/sn_cli/src/bin/subcommands/folders.rs deleted file mode 100644 index 705b746459..0000000000 --- a/sn_cli/src/bin/subcommands/folders.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_cli::AccountPacket; - -use sn_client::{ - protocol::storage::RetryStrategy, transfers::MainSecretKey, Client, UploadCfg, BATCH_SIZE, -}; - -use bls::{SecretKey, SK_SIZE}; -use clap::Parser; -use color_eyre::{eyre::bail, Result}; -use dialoguer::Password; -use std::{ - env::current_dir, - path::{Path, PathBuf}, -}; - -#[derive(Parser, Debug)] -pub enum FoldersCmds { - Init { - /// The directory to initialise as a root folder, which can then be stored on the network (and kept in sync with). - /// By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - }, - Download { - /// The full local path where to download the folder. By default the current path is assumed, - /// and the main Folder's network address will be used as the folder name. - #[clap(name = "target folder path")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - /// Report any changes made to local version of files/folders (this doesn't compare it with their versions stored on the network). - Status { - /// Path to check changes made on. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - }, - /// Sync up local files/folders changes with their versions stored on the network. - Sync { - /// Path to sync with its remote version on the network. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the files be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Balanced, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn folders_cmds( - cmds: FoldersCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FoldersCmds::Init { path, root_sk } => { - let path = get_path(path, None)?; - // initialise path as a fresh new Folder with a network address derived from the root SK - let root_sk = get_recovery_secret_sk(root_sk, true)?; - let acc_packet = AccountPacket::init(client.clone(), root_dir, &path, &root_sk, None)?; - println!("Directory at {path:?} initialised as a root Folder, ready to track and sync changes with the network at address: {}", acc_packet.root_folder_addr().to_hex()) - } - FoldersCmds::Download { - path, - root_sk, - batch_size, - retry_strategy, - } => { - let root_sk = get_recovery_secret_sk(root_sk, false)?; - let root_sk_hex = root_sk.main_pubkey().to_hex(); - let download_folder_name = format!( - "folder_{}_{}", - &root_sk_hex[..6], - &root_sk_hex[root_sk_hex.len() - 6..] - ); - let download_folder_path = get_path(path, Some(&download_folder_name))?; - println!("Downloading onto {download_folder_path:?}, with batch-size {batch_size}"); - debug!("Downloading onto {download_folder_path:?}"); - - let _ = AccountPacket::retrieve_folders( - client, - root_dir, - &root_sk, - None, - &download_folder_path, - batch_size, - retry_strategy, - ) - .await?; - } - FoldersCmds::Status { path } => { - let path = get_path(path, None)?; - let acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - acc_packet.status()?; - } - FoldersCmds::Sync { - path, - batch_size, - make_data_public, - retry_strategy, - } => { - let path = get_path(path, None)?; - let mut acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - - let options = UploadCfg { - verify_store, - batch_size, - retry_strategy, - ..Default::default() - }; - acc_packet.sync(options, make_data_public).await?; - } - } - Ok(()) -} - -// Unwrap provided path, or return the current path if none was provided. -// It can optionally be provided a string to adjoin when the current dir is returned. -fn get_path(path: Option, to_join: Option<&str>) -> Result { - let path = if let Some(path) = path { - path - } else { - let current_dir = current_dir()?; - to_join.map_or_else(|| current_dir.clone(), |str| current_dir.join(str)) - }; - Ok(path) -} - -// Either get a hex-encoded SK entered by the user, or generate a new one -// TODO: get/generate a mnemonic instead -fn get_recovery_secret_sk( - root_sk: Option, - gen_new_recovery_secret: bool, -) -> Result { - let result = if let Some(str) = root_sk { - SecretKey::from_hex(&str) - } else { - let prompt_msg = if gen_new_recovery_secret { - println!( - "\n\nA recovery secret is required to derive signing/encryption keys, and network addresses, \ - used by an Account Packet." - ); - println!( - "The recovery secret used to initialise an Account Packet, can be used to retrieve and restore \ - a new replica/clone from the network, onto any local path and even onto another device.\n" - ); - - "Please enter your recovery secret for this new Account Packet,\nif you don't have one, \ - press [Enter] to generate one" - } else { - "Please enter your recovery secret" - }; - - let err_msg = format!("Hex-encoded recovery secret must be {} long", 2 * SK_SIZE); - let sk_hex = Password::new() - .with_prompt(prompt_msg) - .allow_empty_password(gen_new_recovery_secret) - .validate_with(|input: &String| -> Result<(), &str> { - let len = input.chars().count(); - if len == 0 || len == 2 * SK_SIZE { - Ok(()) - } else { - Err(&err_msg) - } - }) - .interact()?; - - println!(); - if sk_hex.is_empty() { - println!("Generating your recovery secret..."); - let sk = SecretKey::random(); - println!("\n*** Recovery secret generated ***\n{}", sk.to_hex()); - println!(); - println!( - "Please *MAKE SURE YOU DON'T LOOSE YOU RECOVERY SECRET*, and always sync up local changes \ - made to your Account Packet with the remote replica on the network to not loose them either.\n" - ); - - Ok(sk) - } else { - SecretKey::from_hex(&sk_hex) - } - }; - - match result { - Ok(sk) => Ok(MainSecretKey::new(sk)), - Err(err) => bail!("Failed to decode the recovery secret: {err:?}"), - } -} diff --git a/sn_cli/src/bin/subcommands/mod.rs b/sn_cli/src/bin/subcommands/mod.rs deleted file mode 100644 index 575e90b3d3..0000000000 --- a/sn_cli/src/bin/subcommands/mod.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) mod files; -pub(crate) mod folders; -pub(crate) mod register; -pub(crate) mod wallet; - -use clap::Parser; -use clap::Subcommand; -use color_eyre::Result; -use sn_logging::{LogFormat, LogOutputDest}; -use sn_peers_acquisition::PeersArgs; -use std::time::Duration; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser)] -#[command(disable_version_flag = true)] -pub(crate) struct Opt { - /// Specify the logging output destination. - /// - /// Valid values are "stdout", "data-dir", or a custom path. - /// - /// `data-dir` is the default value. - /// - /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs - #[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")] - pub log_output_dest: LogOutputDest, - - /// Specify the logging format. - /// - /// Valid values are "default" or "json". - /// - /// If the argument is not used, the default format will be applied. - #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] - pub log_format: Option, - - #[command(flatten)] - pub(crate) peers: PeersArgs, - - /// Available sub commands. - #[clap(subcommand)] - pub cmd: Option, - - /// The maximum duration to wait for a connection to the network before timing out. - #[clap(long = "timeout", global = true, value_parser = |t: &str| -> Result { Ok(t.parse().map(Duration::from_secs)?) })] - pub connection_timeout: Option, - - /// Prevent verification of data storage on the network. - /// - /// This may increase operation speed, but offers no guarantees that operations were successful. - #[clap(global = true, long = "no-verify", short = 'x')] - pub no_verify: bool, - - /// Print the crate version. - #[clap(long)] - pub crate_version: bool, - - /// Print the network protocol version. - #[clap(long)] - pub protocol_version: bool, - - /// Print the package version. - #[clap(long)] - #[cfg(not(feature = "nightly"))] - pub package_version: bool, - - /// Print version information. - #[clap(long)] - pub version: bool, -} - -#[derive(Subcommand, Debug)] -pub(super) enum SubCmd { - #[clap(name = "wallet", subcommand)] - /// Commands for a hot-wallet management. - /// A hot-wallet holds the secret key, thus it can be used for signing transfers/transactions. - Wallet(wallet::hot_wallet::WalletCmds), - #[clap(name = "wowallet", subcommand)] - /// Commands for watch-only wallet management - /// A watch-only wallet holds only the public key, thus it cannot be used for signing - /// transfers/transactions, but only to query balances and broadcast offline signed transactions. - WatchOnlyWallet(wallet::wo_wallet::WatchOnlyWalletCmds), - #[clap(name = "files", subcommand)] - /// Commands for file management - Files(files::FilesCmds), - #[clap(name = "folders", subcommand)] - /// Commands for folders management - Folders(folders::FoldersCmds), - #[clap(name = "register", subcommand)] - /// Commands for register management - Register(register::RegisterCmds), -} diff --git a/sn_cli/src/bin/subcommands/register.rs b/sn_cli/src/bin/subcommands/register.rs deleted file mode 100644 index 675e1ae6c5..0000000000 --- a/sn_cli/src/bin/subcommands/register.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::PublicKey; -use clap::Subcommand; -use color_eyre::{eyre::WrapErr, Result, Section}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::protocol::storage::RegisterAddress; -use sn_client::registers::Permissions; - -use sn_client::{Client, Error as ClientError, WalletClient}; -use std::path::Path; -use xor_name::XorName; - -#[derive(Subcommand, Debug)] -pub enum RegisterCmds { - /// Create a new register with a name. - Create { - /// The name of the register to create. This could be the app's name. - /// This is used along with your public key to derive the address of the register - #[clap(name = "name", short = 'n')] - name: String, - - /// Create the register with public write access. - /// By default only the owner can write to the register. - #[clap(name = "public", short = 'p')] - public: bool, - }, - Edit { - /// The address of the register to edit. - #[clap(name = "address")] - address: String, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register name instead of the address - #[clap(name = "name", short = 'n')] - use_name: bool, - /// The entry to add to the register. - #[clap(name = "entry")] - entry: String, - }, - Get { - /// The register addresses to get. - #[clap(name = "addresses")] - addresses: Vec, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register names instead of the addresses - #[clap(name = "name", short = 'n')] - use_name: bool, - }, -} - -pub(crate) async fn register_cmds( - cmds: RegisterCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - RegisterCmds::Create { name, public } => { - create_register(name, public, client, root_dir, verify_store).await? - } - RegisterCmds::Edit { - address, - use_name, - entry, - } => edit_register(address, use_name, entry, client, verify_store).await?, - RegisterCmds::Get { - addresses, - use_name, - } => get_registers(addresses, use_name, client).await?, - } - Ok(()) -} - -async fn create_register( - name: String, - public: bool, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - trace!("Starting to pay for Register storage"); - - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None) - .wrap_err("Unable to read wallet file in {path:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - let meta = XorName::from_content(name.as_bytes()); - let perms = match public { - true => Permissions::new_anyone_can_write(), - false => Permissions::default(), - }; - let (register, storage_cost, royalties_fees) = client - .create_and_pay_for_register(meta, &mut wallet_client, verify_store, perms) - .await?; - - if storage_cost.is_zero() { - println!("Register '{name}' already exists!",); - } else { - println!( - "Successfully created register '{name}' for {storage_cost:?} (royalties fees: {royalties_fees:?})!", - ); - } - - println!("REGISTER_ADDRESS={}", register.address().to_hex()); - - Ok(()) -} - -async fn edit_register( - address_str: String, - use_name: bool, - entry: String, - client: &Client, - verify_store: bool, -) -> Result<()> { - let (address, printing_name) = parse_addr(&address_str, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register from {address}"); - - match client.get_register(address).await { - Ok(mut register) => { - println!("Successfully retrieved Register {printing_name}",); - println!("Editing Register {printing_name} with: {entry}"); - match register.write_online(entry.as_bytes(), verify_store).await { - Ok(()) => {} - Err(ref err @ ClientError::ContentBranchDetected(ref branches)) => { - println!( - "We need to merge {} branches in Register entries: {err}", - branches.len() - ); - register - .write_merging_branches_online(entry.as_bytes(), verify_store) - .await?; - } - Err(err) => return Err(err.into()), - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - - Ok(()) -} - -async fn get_registers(addresses: Vec, use_name: bool, client: &Client) -> Result<()> { - for addr in addresses { - let (address, printing_name) = parse_addr(&addr, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register {printing_name}"); - - match client.get_register(address).await { - Ok(register) => { - println!("Successfully retrieved Register {printing_name}"); - let entries = register.read(); - println!("Register entries:"); - for (hash, bytes) in entries { - let data_str = match String::from_utf8(bytes.clone()) { - Ok(data_str) => data_str, - Err(_) => format!("{bytes:?}"), - }; - println!("{hash:?}: {data_str}"); - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - } - - Ok(()) -} - -/// Parse str and return the address and the register info for printing -fn parse_addr( - address_str: &str, - use_name: bool, - pk: PublicKey, -) -> Result<(RegisterAddress, String)> { - if use_name { - debug!("Parsing address as name"); - let user_metadata = XorName::from_content(address_str.as_bytes()); - let addr = RegisterAddress::new(user_metadata, pk); - Ok((addr, format!("'{address_str}' at {addr}"))) - } else { - debug!("Parsing address as hex"); - let addr = RegisterAddress::from_hex(address_str) - .wrap_err("Could not parse hex string") - .suggestion( - "If getting a register by name, use the `-n` flag eg:\n - safe register get -n ", - )?; - Ok((addr, format!("at {address_str}"))) - } -} diff --git a/sn_cli/src/bin/subcommands/wallet.rs b/sn_cli/src/bin/subcommands/wallet.rs deleted file mode 100644 index 0392c81874..0000000000 --- a/sn_cli/src/bin/subcommands/wallet.rs +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod audit; -pub(crate) mod helpers; -pub(crate) mod hot_wallet; -pub(crate) mod wo_wallet; - -use sn_client::transfers::{CashNote, HotWallet, MainPubkey, NanoTokens, WatchOnlyWallet}; -use sn_protocol::storage::SpendAddress; - -use crate::get_stdin_password_response; -use color_eyre::Result; -use std::{collections::BTreeSet, io::Read, path::Path}; - -// TODO: convert this into a Trait part of the wallet APIs. -pub(crate) enum WalletApiHelper { - WatchOnlyWallet(WatchOnlyWallet), - HotWallet(HotWallet), -} - -impl WalletApiHelper { - pub fn watch_only_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - Ok(Self::WatchOnlyWallet(wallet)) - } - - pub fn load_from(root_dir: &Path) -> Result { - let wallet = if HotWallet::is_encrypted(root_dir) { - println!("Wallet is encrypted. It needs a password to unlock."); - let password = get_stdin_password_response("Enter password: "); - let mut wallet = HotWallet::load_encrypted_from_path(root_dir, password.to_owned())?; - // Authenticate so that a user doesn't have to immediately provide the password again - wallet.authenticate_with_password(password)?; - wallet - } else { - HotWallet::load_from(root_dir)? - }; - - Ok(Self::HotWallet(wallet)) - } - - pub fn encrypt(root_dir: &Path, password: &str) -> Result<()> { - HotWallet::encrypt(root_dir, password)?; - Ok(()) - } - - pub fn balance(&self) -> NanoTokens { - match self { - Self::WatchOnlyWallet(w) => w.balance(), - Self::HotWallet(w) => w.balance(), - } - } - - pub fn status(&mut self) -> Result<()> { - self.authenticate()?; - - match self { - Self::WatchOnlyWallet(_) => Ok(()), - Self::HotWallet(w) => { - println!("Unconfirmed spends are:"); - for spend in w.unconfirmed_spend_requests().iter() { - let address = SpendAddress::from_unique_pubkey(&spend.spend.unique_pubkey); - println!( - "Unconfirmed spend {address:?} - {:?}, hex_str: {:?}", - spend.spend.unique_pubkey, - address.to_hex() - ); - println!( - "reason {:?}, amount {}, inputs: {}, outputs: {}", - spend.spend.reason, - spend.spend.amount(), - spend.spend.ancestors.len(), - spend.spend.descendants.len() - ); - println!("Inputs in hex str:"); - for input in spend.spend.ancestors.iter() { - let address = SpendAddress::from_unique_pubkey(input); - println!("Input spend {}", address.to_hex()); - } - println!("Outputs in hex str:"); - for (output, amount) in spend.spend.descendants.iter() { - let address = SpendAddress::from_unique_pubkey(output); - println!("Output {} with {amount}", address.to_hex()); - } - } - println!("Available cash notes are:"); - if let Ok(available_cnrs) = w.available_cash_notes() { - for cnr in available_cnrs.0.iter() { - println!("{cnr:?}"); - } - } - - Ok(()) - } - } - } - - pub fn read_cash_note_from_stdin(&mut self) -> Result<()> { - println!("Please paste your CashNote below:"); - let mut input = String::new(); - std::io::stdin().read_to_string(&mut input)?; - self.deposit_from_cash_note_hex(&input) - } - - pub fn deposit_from_cash_note_hex(&mut self, input: &str) -> Result<()> { - let cash_note = CashNote::from_hex(input.trim())?; - - let old_balance = self.balance(); - let cash_notes = vec![cash_note.clone()]; - - let spent_unique_pubkeys: BTreeSet<_> = cash_note - .parent_spends - .iter() - .map(|spend| spend.unique_pubkey()) - .collect(); - - match self { - Self::WatchOnlyWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - Self::HotWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - } - let new_balance = self.balance(); - println!("Successfully stored cash_note to wallet dir. \nOld balance: {old_balance}\nNew balance: {new_balance}"); - - Ok(()) - } - - pub fn deposit(&mut self, read_from_stdin: bool, cash_note: Option<&str>) -> Result<()> { - if read_from_stdin { - return self.read_cash_note_from_stdin(); - } - - if let Some(cash_note_hex) = cash_note { - return self.deposit_from_cash_note_hex(cash_note_hex); - } - - let previous_balance = self.balance(); - - self.try_load_cash_notes()?; - - let deposited = NanoTokens::from(self.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = self.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) - } - - fn deposit_and_store_to_disk(&mut self, cash_notes: &Vec) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - Self::HotWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - } - Ok(()) - } - - fn try_load_cash_notes(&mut self) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.try_load_cash_notes()?, - Self::HotWallet(w) => w.try_load_cash_notes()?, - } - Ok(()) - } - - /// Authenticate with password for encrypted wallet. - fn authenticate(&mut self) -> Result<()> { - match self { - WalletApiHelper::WatchOnlyWallet(_) => Ok(()), - WalletApiHelper::HotWallet(w) => { - if w.authenticate().is_err() { - let password = get_stdin_password_response("Wallet password: "); - w.authenticate_with_password(password)?; - Ok(()) - } else { - Ok(()) - } - } - } - } -} - -fn watch_only_wallet_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let pk_hex = main_pk.to_hex(); - let folder_name = format!("pk_{}_{}", &pk_hex[..6], &pk_hex[pk_hex.len() - 6..]); - let wallet_dir = root_dir.join(folder_name); - println!( - "Loading watch-only local wallet from: {}", - wallet_dir.display() - ); - let wallet = WatchOnlyWallet::load_from(&wallet_dir, main_pk)?; - Ok(wallet) -} diff --git a/sn_cli/src/bin/subcommands/wallet/audit.rs b/sn_cli/src/bin/subcommands/wallet/audit.rs deleted file mode 100644 index c0e3833d50..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/audit.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::path::Path; -use std::str::FromStr; - -use bls::SecretKey; -use color_eyre::eyre::bail; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::{CashNoteRedemption, SpendAddress, Transfer, GENESIS_SPEND_UNIQUE_KEY}; -use sn_client::{Client, SpendDag}; - -const SPEND_DAG_FILENAME: &str = "spend_dag"; -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -async fn step_by_step_spend_dag_gathering(client: &Client, mut dag: SpendDag) -> Result { - let start_time = std::time::Instant::now(); - println!("Gathering the Spend DAG, note that this might take a very long time..."); - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - tokio::spawn(async move { - let mut spend_count = 0; - let mut exponential = 64; - while let Some(_spend) = rx.recv().await { - spend_count += 1; - if spend_count % exponential == 0 { - println!("Collected {spend_count} spends..."); - exponential *= 2; - } - } - }); - - client - .spend_dag_continue_from_utxos(&mut dag, Some(tx), false) - .await; - println!("Done gathering the Spend DAG in {:?}", start_time.elapsed()); - - // verify the DAG - if let Err(e) = dag.record_faults(&dag.source()) { - println!("DAG verification failed: {e}"); - } else { - let faults_len = dag.faults().len(); - println!("DAG verification successful, identified {faults_len} faults.",); - if faults_len > 0 { - println!("Logging identified faults: {:#?}", dag.faults()); - } - } - Ok(dag) -} - -/// Gather the Spend DAG from the Network and store it on disk -/// If a DAG is found on disk, it will continue from it -/// If fast_mode is true, gathers in a silent and fast way -/// else enjoy a step by step slow narrated gathering -async fn gather_spend_dag(client: &Client, root_dir: &Path, fast_mode: bool) -> Result { - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let inital_dag = match SpendDag::load_from_file(&dag_path) { - Ok(mut dag) => { - println!("Found a local spend dag on disk, continuing from it..."); - if fast_mode { - client - .spend_dag_continue_from_utxos(&mut dag, None, false) - .await; - } - dag - } - Err(err) => { - println!("Starting from Genesis as found no local spend dag on disk..."); - info!("Starting from Genesis as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - if fast_mode { - client - .spend_dag_build_from(genesis_addr, None, true) - .await? - } else { - client.new_dag_with_genesis_only().await? - } - } - }; - - let dag = match fast_mode { - true => inital_dag, - false => step_by_step_spend_dag_gathering(client, inital_dag).await?, - }; - - println!("Saving DAG to disk at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - Ok(dag) -} - -pub async fn audit( - client: &Client, - to_dot: bool, - royalties: bool, - root_dir: &Path, - foundation_sk: Option, -) -> Result<()> { - let fast_mode = to_dot || royalties || foundation_sk.is_some(); - let dag = gather_spend_dag(client, root_dir, fast_mode).await?; - - if to_dot { - println!("========================== spends DAG digraph =========================="); - println!("{}", dag.dump_dot_format()); - } - if let Some(sk) = foundation_sk { - println!( - "========================== payment forward statistics ==========================" - ); - println!("{}", dag.dump_payment_forward_statistics(&sk)); - } - if royalties { - let royalties = dag.all_royalties()?; - redeem_royalties(royalties, client, root_dir).await?; - } - - println!("Audit completed successfully."); - Ok(()) -} - -/// Redeem royalties from the Network and deposit them into the wallet -/// Only works if the wallet has the private key for the royalties -async fn redeem_royalties( - royalties: Vec, - client: &Client, - root_dir: &Path, -) -> Result<()> { - if royalties.is_empty() { - println!("No royalties found to redeem."); - return Ok(()); - } else { - println!("Found {} royalties.", royalties.len()); - } - - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - // batch royalties per 100 - let mut batch = Vec::new(); - for (i, royalty) in royalties.iter().enumerate() { - batch.push(royalty.clone()); - if i % 100 == 0 { - println!( - "Attempting to redeem {} royalties from the Network...", - batch.len() - ); - let transfer = Transfer::NetworkRoyalties(batch.clone()); - batch.clear(); - println!("Current balance: {}", wallet.balance()); - let cashnotes = client.receive(&transfer, &wallet).await?; - wallet.deposit_and_store_to_disk(&cashnotes)?; - println!("Successfully redeemed royalties from the Network."); - println!("Current balance: {}", wallet.balance()); - } - } - Ok(()) -} - -/// Verify a spend's existance on the Network. -/// If genesis is true, verify all the way to Genesis, note that this might take A VERY LONG TIME -pub async fn verify_spend_at( - spend_address: String, - genesis: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - // get spend - println!("Verifying spend's existance at: {spend_address}"); - let addr = SpendAddress::from_str(&spend_address)?; - let spend = match client.get_spend_from_network(addr).await { - Ok(s) => { - println!("Confirmed spend's existance on the Network at {addr:?}"); - s - } - Err(err) => { - bail!("Could not confirm spend's validity, be careful: {err}") - } - }; - - // stop here if we don't go all the way to Genesis - if !genesis { - return Ok(()); - } - println!("Verifying spend all the way to Genesis, note that this might take a while..."); - - // extend DAG until spend - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let mut dag = match SpendDag::load_from_file(&dag_path) { - Ok(d) => { - println!("Found a local spend dag on disk, continuing from it, this might make things faster..."); - d - } - Err(err) => { - info!("Starting verification from an empty DAG as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - SpendDag::new(genesis_addr) - } - }; - info!("Extending DAG with {spend_address} {addr:?}"); - client.spend_dag_extend_until(&mut dag, addr, spend).await?; - info!("Saving DAG locally at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - // verify spend is not faulty - let faults = dag.get_spend_faults(&addr); - if faults.is_empty() { - println!( - "Successfully confirmed spend at {spend_address} is valid, and comes from Genesis!" - ); - } else { - println!("Spend at {spend_address} has {} faults", faults.len()); - println!("{faults:#?}"); - } - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/helpers.rs b/sn_cli/src/bin/subcommands/wallet/helpers.rs deleted file mode 100644 index e3ef2d6687..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/helpers.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::Transfer; -use sn_client::Client; -use std::path::Path; -use url::Url; - -#[cfg(feature = "distribution")] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - address: Option, - signature: Option, -) -> Result<()> { - if address.is_some() ^ signature.is_some() { - println!("Address and signature must both be specified."); - return Ok(()); - } - if address.is_none() && signature.is_none() { - get_faucet_fixed_amount(root_dir, client, url).await?; - } else if let Some(addr) = address { - if let Some(sig) = signature { - get_faucet_distribution(root_dir, client, url, addr, sig).await?; - } - } - Ok(()) -} - -#[cfg(not(feature = "distribution"))] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - _address: Option, - _signature: Option, -) -> Result<()> { - get_faucet_fixed_amount(root_dir, client, url).await -} - -pub async fn get_faucet_fixed_amount(root_dir: &Path, client: &Client, url: String) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let address_hex = wallet.address().to_hex(); - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - let req_url = Url::parse(&format!("{url}/{address_hex}"))?; - println!("Requesting token for wallet address: {address_hex}"); - - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let body = response.text().await?; - if is_ok { - receive(body, false, client, root_dir).await?; - println!("Successfully got tokens from faucet."); - } else { - println!("Failed to get tokens from faucet, server responded with: {body:?}"); - } - Ok(()) -} - -#[cfg(feature = "distribution")] -pub async fn get_faucet_distribution( - root_dir: &Path, - client: &Client, - url: String, - address: String, - signature: String, -) -> Result<()> { - // submit the details to the faucet to get the distribution - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - // receive to the current local wallet - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)? - .address() - .to_hex(); - println!("Requesting distribution for maid address {address} to local wallet {wallet}"); - // base64 uses + and / as the delimiters which doesn't go well in the query - // string, so the signature is encoded using url safe characters. - let sig_bytes = base64::engine::general_purpose::STANDARD.decode(signature)?; - let sig_url = base64::engine::general_purpose::URL_SAFE.encode(sig_bytes); - let req_url = Url::parse(&format!( - "{url}/distribution?address={address}&wallet={wallet}&signature={sig_url}" - ))?; - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let transfer_hex = response.text().await?; - if !is_ok { - println!( - "Failed to get distribution from faucet, server responded with:\n{transfer_hex:?}" - ); - return Ok(()); - } - println!("Receiving transfer for maid address {address}:\n{transfer_hex}"); - receive(transfer_hex, false, client, root_dir).await?; - Ok(()) -} - -pub async fn receive( - transfer: String, - is_file: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - let transfer = if is_file { - std::fs::read_to_string(transfer)?.trim().to_string() - } else { - transfer - }; - - let transfer = match Transfer::from_hex(&transfer) { - Ok(transfer) => transfer, - Err(err) => { - println!("Failed to parse transfer: {err:?}"); - println!("Transfer: \"{transfer}\""); - return Err(err.into()); - } - }; - println!("Successfully parsed transfer. "); - - println!("Verifying transfer with the Network..."); - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let cashnotes = match client.receive(&transfer, &wallet).await { - Ok(cashnotes) => cashnotes, - Err(err) => { - println!("Failed to verify and redeem transfer: {err:?}"); - return Err(err.into()); - } - }; - println!("Successfully verified transfer."); - - let old_balance = wallet.balance(); - wallet.deposit_and_store_to_disk(&cashnotes)?; - let new_balance = wallet.balance(); - - println!("Successfully stored cash_note to wallet dir."); - println!("Old balance: {old_balance}"); - println!("New balance: {new_balance}"); - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs b/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs deleted file mode 100644 index 6b209a9625..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - audit::{audit, verify_spend_at}, - helpers::{get_faucet, receive}, - WalletApiHelper, -}; -use crate::{get_stdin_password_response, get_stdin_response}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{eyre::eyre, Result}; -use dialoguer::Confirm; -use sn_cli::utils::is_valid_key_hex; -use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; -use sn_client::transfers::{ - HotWallet, MainPubkey, MainSecretKey, NanoTokens, Transfer, TransferError, UnsignedTransaction, - WalletError, -}; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error as ClientError, -}; -use std::{path::Path, str::FromStr}; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WalletCmds { - /// Print the wallet address. - Address, - /// Print the wallet balance. - Balance { - /// Instead of checking CLI local wallet balance, the PeerId of a node can be used - /// to check the balance of its rewards local wallet. Multiple ids can be provided - /// in order to read the balance of multiple nodes at once. - #[clap(long)] - peer_id: Vec, - }, - /// Create a hot wallet. - Create { - /// Optional flag to not replace existing wallet. - #[clap(long, action)] - no_replace: bool, - /// Optional flag to not add a password. - #[clap(long, action)] - no_password: bool, - /// Optional hex-encoded main secret key. - #[clap(long, short, name = "key")] - key: Option, - /// Optional derivation passphrase to protect the mnemonic, - /// it's not the source of the entropy for the mnemonic generation. - /// The mnemonic+passphrase will be the seed. See detail at - /// `` - #[clap(long, short, name = "derivation")] - derivation_passphrase: Option, - /// Optional password to encrypt the wallet with. - #[clap(long, short)] - password: Option, - }, - /// Get tokens from a faucet. - GetFaucet { - /// The http url of the faucet to get tokens from. - #[clap(name = "url")] - url: String, - /// The maidsafecoin address to claim. Leave blank to receive a fixed - /// amount of tokens. - maid_address: Option, - /// A signature of the safe wallet address, made by the maidsafecoin - /// address. - signature: Option, - }, - /// Send a transfer. - /// - /// This command will create a new transfer and encrypt it for the recipient. - /// This encrypted transfer can then be shared with the recipient, who can then - /// use the 'receive' command to claim the funds. - Send { - /// The number of SafeNetworkTokens to send. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// Signs a transaction to be then broadcasted to the network. - Sign { - /// Hex-encoded unsigned transaction. It requires a hot-wallet was created for CLI. - #[clap(name = "tx")] - tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Receive a transfer created by the 'send' or 'broadcast' command. - Receive { - /// Read the encrypted transfer from a file. - #[clap(long, default_value = "false")] - file: bool, - /// Encrypted transfer. - #[clap(name = "transfer")] - transfer: String, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, - /// Audit the Currency - /// Note that this might take a very long time - /// Analogous to verifying the entire blockchain in Bitcoin - /// - /// When run without any flags, runs in verbose mode, - /// a slower but more informative mode where DAG collection progress is diplayed - Audit { - /// EXPERIMENTAL Dump Audit DAG in dot format on stdout - #[clap(long, default_value = "false")] - dot: bool, - /// EXPERIMENTAL redeem all royalties - #[clap(long, default_value = "false")] - royalties: bool, - /// Hex string of the Foundation SK. - /// Providing this key allow displaying rewards statistics gathered from the DAG. - #[clap(long, name = "sk_str")] - sk_str: Option, - }, - Status, - /// Encrypt wallet with a password. - Encrypt, -} - -pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Path) -> Result<()> { - match cmds { - WalletCmds::Address => { - let wallet = WalletApiHelper::load_from(root_dir)?; - match wallet { - WalletApiHelper::WatchOnlyWallet(w) => println!("{:?}", w.address()), - WalletApiHelper::HotWallet(w) => println!("{:?}", w.address()), - } - Ok(()) - } - WalletCmds::Balance { peer_id } => { - if peer_id.is_empty() { - let wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - } else { - let default_node_dir_path = dirs_next::data_dir() - .ok_or_else(|| eyre!("Failed to obtain data directory path"))? - .join("safe") - .join("node"); - - for id in peer_id { - let path = default_node_dir_path.join(id); - let rewards = WalletApiHelper::load_from(&path)?.balance(); - println!("Node's rewards wallet balance (PeerId: {id}): {rewards}"); - } - } - Ok(()) - } - WalletCmds::Create { - no_replace, - no_password, - key, - derivation_passphrase, - password, - } => { - let mut wallet_already_exists = false; - if key.is_some() && derivation_passphrase.is_some() { - return Err(eyre!( - "Only one of `--key` or `--derivation` may be specified" - )); - } - if *no_password && password.is_some() { - return Err(eyre!( - "Only one of `--no-password` or `--password` may be specified" - )); - } - if let Some(key) = key { - // Check if key is valid - // Doing this early to avoid stashing an existing wallet while the provided key is invalid - if !is_valid_key_hex(key) { - return Err(eyre!("Please provide a valid secret key in hex format. It must be 64 characters long.")); - } - } - // Check for existing wallet - if HotWallet::is_encrypted(root_dir) { - wallet_already_exists = true; - println!("Existing encrypted wallet found."); - } else if let Ok(existing_wallet) = WalletApiHelper::load_from(root_dir) { - wallet_already_exists = true; - let balance = existing_wallet.balance(); - println!("Existing wallet found with balance of {balance}"); - } - // If a wallet already exists, ask the user if they want to replace it - if wallet_already_exists { - let response = if *no_replace { - "n".to_string() - } else { - get_stdin_response("Replace existing wallet with new wallet? [y/N]") - }; - if response != "y" { - // Do nothing, return ok and prevent any further operations - println!("Exiting without creating new wallet"); - return Ok(()); - } - // remove existing wallet - let new_location = HotWallet::stash(root_dir)?; - println!("Old wallet stored at {}", new_location.display()); - } - let main_sk = if let Some(key) = key { - let sk = SecretKey::from_hex(key) - .map_err(|err| eyre!("Failed to parse hex-encoded SK: {err:?}"))?; - MainSecretKey::new(sk) - } else { - // If no key is specified, use the mnemonic - let mnemonic = load_or_create_mnemonic(root_dir)?; - secret_key_from_mnemonic(mnemonic, derivation_passphrase.to_owned())? - }; - // Ask user if they want to encrypt the wallet with a password - let password = if *no_password { - None - } else if let Some(password) = password { - Some(password.to_owned()) - } else { - request_password(false) - }; - // Create the new wallet with the new key - let main_pubkey = main_sk.main_pubkey(); - let local_wallet = HotWallet::create_from_key(root_dir, main_sk, password)?; - let balance = local_wallet.balance(); - println!( - "Hot Wallet created (balance {balance}) for main public key: {main_pubkey:?}." - ); - Ok(()) - } - WalletCmds::Sign { tx, force } => sign_transaction(tx, root_dir, *force), - WalletCmds::Status => { - let mut wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - wallet.status()?; - Ok(()) - } - WalletCmds::Encrypt => { - println!("Encrypt your wallet with a password. WARNING: If you forget your password, you will lose access to your wallet!"); - // Ask user for a new password to encrypt the wallet with - if let Some(password) = request_password(true) { - WalletApiHelper::encrypt(root_dir, &password)?; - } - println!("Wallet successfully encrypted."); - Ok(()) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wallet_cmds( - cmds: WalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WalletCmds::Send { amount, to } => send(amount, to, client, root_dir, verify_store).await, - WalletCmds::Receive { file, transfer } => receive(transfer, file, client, root_dir).await, - WalletCmds::GetFaucet { - url, - maid_address, - signature, - } => get_faucet(root_dir, client, url.clone(), maid_address, signature).await, - WalletCmds::Audit { - dot, - royalties, - sk_str, - } => { - let sk_key = if let Some(s) = sk_str { - match SecretKey::from_hex(&s) { - Ok(sk_key) => Some(sk_key), - Err(err) => { - return Err(eyre!( - "Cann't parse Foundation SK from input string: {s} {err:?}" - )) - } - } - } else { - None - }; - audit(client, dot, royalties, root_dir, sk_key).await - } - WalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -async fn send( - amount: String, - to: String, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - let from = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let amount = match NanoTokens::from_str(&amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let cash_note = match sn_client::send(from, amount, to, client, verify_store).await { - Ok(cash_note) => { - let wallet = HotWallet::load_from(root_dir)?; - println!("Sent {amount:?} to {to:?}"); - println!("New wallet balance is {}.", wallet.balance()); - cash_note - } - Err(err) => { - match err { - ClientError::AmountIsZero => { - println!("Zero amount passed in. Nothing sent."); - } - ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - ))) => { - println!("Could not send due to low balance.\nBalance: {available:?}\nRequired: {required:?}"); - } - _ => { - println!("Failed to send {amount:?} to {to:?} due to {err:?}."); - } - } - return Err(err.into()); - } - }; - - let transfer = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("The encrypted transfer has been successfully created."); - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the 'receive' command to claim the funds."); - - Ok(()) -} - -fn sign_transaction(tx: &str, root_dir: &Path, force: bool) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let unsigned_tx = UnsignedTransaction::from_hex(tx)?; - - println!("The unsigned transaction has been successfully decoded:"); - for (i, (unique_pk, amount)) in unsigned_tx.spent_unique_keys().iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", unique_pk.to_hex()); - println!("\tAmount: {amount}"); - - for (descendant, amount) in unsigned_tx.output_unique_keys().iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!("\n** Please make sure the above information is correct before signing it. **\n"); - let confirmation = Confirm::new() - .with_prompt("Do you want to sign the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction not signed."); - return Ok(()); - } - } - - println!("Signing the transaction with local hot-wallet..."); - let signed_tx = wallet.sign(unsigned_tx)?; - - println!( - "The transaction has been successfully signed:\n\n{}\n", - signed_tx.to_hex()? - ); - println!( - "Please copy the above text, and broadcast it to the network with 'wallet broadcast' cmd." - ); - - Ok(()) -} - -fn request_password(required: bool) -> Option { - 'outer: loop { - let prompt = if required { - "Enter password: " - } else { - "Enter password (leave empty for none): " - }; - - let password_response = get_stdin_password_response(prompt); - - if required && password_response.is_empty() { - println!("Password is required."); - continue 'outer; - } - - // If a password is set, request user to repeat it - if !password_response.is_empty() { - const MAX_RETRIES: u8 = 2; - let mut retries = 0u8; - - loop { - let repeat_password = get_stdin_password_response("Repeat password: "); - - if repeat_password == password_response { - break; - } else if retries >= MAX_RETRIES { - // User forgot the password, let them reset it again - println!("You might have forgotten the password. Please set a new one."); - continue 'outer; - } else { - println!("Passwords do not match."); - retries += 1; - } - } - - break Some(password_response); - } - - break None; - } -} diff --git a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs b/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs deleted file mode 100644 index c4513754ba..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{audit::verify_spend_at, watch_only_wallet_from_pk, WalletApiHelper}; - -use bls::PublicKey; -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use dialoguer::Confirm; -use sn_client::transfers::{MainPubkey, NanoTokens, SignedTransaction, Transfer, WatchOnlyWallet}; -use sn_client::Client; -use std::{path::Path, str::FromStr}; -use walkdir::WalkDir; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WatchOnlyWalletCmds { - /// Print the watch-only wallets addresses. - Addresses, - /// Print the wallet balance. - Balance { - /// The hex-encoded public key of an existing watch-only wallet. - #[clap(name = "public key")] - pk: Option, - }, - /// Deposit CashNotes from the received directory to the chosen watch-only wallet. - /// Or Read a hex encoded CashNote from stdin. - /// - /// The default received directory is platform specific: - /// - Linux: $HOME/.local/share/safe/client/\/cash_notes - /// - macOS: $HOME/Library/Application Support/safe/client/\/cash_notes - /// - Windows: C:\Users\{username}\AppData\Roaming\safe\client\\\cash_notes - /// - /// If you find the default path unwieldy, you can also set the RECEIVED_CASHNOTES_PATH environment - /// variable to a path you would prefer to work with. - #[clap(verbatim_doc_comment)] - Deposit { - /// Read a hex encoded CashNote from stdin. - #[clap(long, default_value = "false")] - stdin: bool, - /// The hex encoded CashNote. - #[clap(long)] - cash_note: Option, - /// The hex-encoded public key of an existing watch-only wallet to deposit into it. - #[clap(name = "public key")] - pk: String, - }, - /// Create a watch-only wallet from the given (hex-encoded) key. - Create { - /// Hex-encoded main public key. - #[clap(name = "public key")] - pk: String, - }, - /// Builds an unsigned transaction to be signed offline. It requires an existing watch-only wallet. - Transaction { - /// Hex-encoded public key of the source watch-only wallet. - #[clap(name = "from")] - from: String, - /// The number of SafeNetworkTokens to transfer. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// This command turns an offline signed transaction into a valid sendable Transfer - /// The signed transaction's SignedSpends are broadcasted to the Network and the recipient's Transfer is returned - /// This Transfer can then be sent and redeemed by the recipient using the 'receive' command - Broadcast { - /// Hex-encoded signed transaction. - #[clap(name = "signed Tx")] - signed_tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, -} - -pub(crate) async fn wo_wallet_cmds_without_client( - cmds: &WatchOnlyWalletCmds, - root_dir: &Path, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Addresses => { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Addresses of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - for (wo_wallet, _) in wallets { - println!("- {:?}", wo_wallet.address()); - } - Ok(()) - } - WatchOnlyWalletCmds::Balance { pk } => { - if let Some(pk) = pk { - let main_pk = MainPubkey::from_hex(pk)?; - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - println!("{}", watch_only_wallet.balance()); - } else { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Balances of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - let mut total = NanoTokens::zero(); - for (wo_wallet, folder_name) in wallets { - let balance = wo_wallet.balance(); - println!("{folder_name}: {balance}"); - total = total - .checked_add(balance) - .ok_or(eyre!("Failed to add to total balance"))?; - } - println!("Total: {total}"); - } - Ok(()) - } - WatchOnlyWalletCmds::Deposit { - stdin, - cash_note, - pk, - } => { - let main_pk = MainPubkey::from_hex(pk)?; - let mut wallet = WalletApiHelper::watch_only_from_pk(main_pk, root_dir)?; - wallet.deposit(*stdin, cash_note.as_deref()) - } - WatchOnlyWalletCmds::Create { pk } => { - let pk = PublicKey::from_hex(pk) - .map_err(|err| eyre!("Failed to parse hex-encoded PK: {err:?}"))?; - let main_pk = MainPubkey::new(pk); - let main_pubkey = main_pk.public_key(); - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let balance = watch_only_wallet.balance(); - println!("Watch-only wallet created (balance {balance}) for main public key: {main_pubkey:?}."); - Ok(()) - } - WatchOnlyWalletCmds::Transaction { from, amount, to } => { - build_unsigned_transaction(from, amount, to, root_dir) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wo_wallet_cmds( - cmds: WatchOnlyWalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Broadcast { signed_tx, force } => { - broadcast_signed_tx(signed_tx, client, verify_store, force).await - } - WatchOnlyWalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -fn get_watch_only_wallets(root_dir: &Path) -> Result> { - let mut wallets = vec![]; - for entry in WalkDir::new(root_dir.display().to_string()) - .into_iter() - .flatten() - { - if let Some(file_name) = entry.path().file_name().and_then(|name| name.to_str()) { - if file_name.starts_with("pk_") { - let wallet_dir = root_dir.join(file_name); - if let Ok(wo_wallet) = WatchOnlyWallet::load_from_path(&wallet_dir) { - wallets.push((wo_wallet, file_name.to_string())); - } - } - } - } - if wallets.is_empty() { - bail!("No watch-only wallets found at {}", root_dir.display()); - } - - Ok(wallets) -} - -fn build_unsigned_transaction(from: &str, amount: &str, to: &str, root_dir: &Path) -> Result<()> { - let main_pk = MainPubkey::from_hex(from)?; - let mut wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let amount = match NanoTokens::from_str(amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let unsigned_transfer = wallet.build_unsigned_transaction(vec![(amount, to)], None)?; - - println!( - "The unsigned transaction has been successfully created:\n\n{}\n", - hex::encode(rmp_serde::to_vec(&unsigned_transfer)?) - ); - println!("Please copy the above text, sign it offline with 'wallet sign' cmd, and then use the signed transaction to broadcast it with 'wallet broadcast' cmd."); - - Ok(()) -} - -async fn broadcast_signed_tx( - signed_tx: String, - client: &Client, - verify_store: bool, - force: bool, -) -> Result<()> { - let signed_tx = match SignedTransaction::from_hex(&signed_tx) { - Ok(signed_tx) => signed_tx, - Err(err) => { - bail!("Failed to decode the signed transaction: {err:?}"); - } - }; - println!("The signed transaction has been successfully decoded:"); - - for (i, signed_spend) in signed_tx.spends.iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", signed_spend.unique_pubkey().to_hex()); - println!("\tAmount: {}", signed_spend.amount()); - - if let Err(err) = signed_spend.verify() { - bail!("Transaction is invalid: {err:?}"); - } - - for (descendant, amount) in signed_spend.spend.descendants.iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!( - "\n** Please make sure the above information is correct before broadcasting it. **\n" - ); - let confirmation = Confirm::new() - .with_prompt("Do you want to broadcast the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction was not broadcasted."); - return Ok(()); - } - } - - println!("Broadcasting the transaction to the network..."); - // return the first CashNote (assuming there is only one because we only sent to one recipient) - let cash_note = match &signed_tx.output_cashnotes[..] { - [cashnote] => cashnote, - [_multiple, ..] => bail!("Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG."), - [] =>bail!("No CashNotes were built from the Tx.") - }; - - // send to network - client - .send_spends(signed_tx.spends.iter(), verify_store) - .await - .map_err(|err| { - eyre!("The transfer was not successfully registered in the network: {err:?}") - })?; - - println!("Transaction broadcasted!."); - - let transfer = Transfer::transfer_from_cash_note(cash_note)?.to_hex()?; - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the wallet 'receive' command to claim the funds.\n"); - - if let Some(change_cn) = signed_tx.change_cashnote { - let change_transfer = Transfer::transfer_from_cash_note(&change_cn)?.to_hex()?; - println!("Please redeem the change from this Transaction:\n\n{change_transfer}\n"); - println!("You should use the wallet 'deposit' command to be able to use these funds.\n"); - } - - Ok(()) -} diff --git a/sn_cli/src/files.rs b/sn_cli/src/files.rs deleted file mode 100644 index 66341f4865..0000000000 --- a/sn_cli/src/files.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod chunk_manager; -mod download; -mod estimate; -mod files_uploader; -mod upload; - -pub use chunk_manager::ChunkManager; -pub use download::{download_file, download_files}; -pub use estimate::Estimator; -pub use files_uploader::{FilesUploadStatusNotifier, FilesUploadSummary, FilesUploader}; -pub use upload::{UploadedFile, UPLOADED_FILES}; - -use color_eyre::Result; -use indicatif::{ProgressBar, ProgressStyle}; -use std::time::Duration; - -pub fn get_progress_bar(length: u64) -> Result { - let progress_bar = ProgressBar::new(length); - progress_bar.set_style( - ProgressStyle::default_bar() - .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")? - .progress_chars("#>-"), - ); - progress_bar.enable_steady_tick(Duration::from_millis(100)); - Ok(progress_bar) -} diff --git a/sn_cli/src/files/chunk_manager.rs b/sn_cli/src/files/chunk_manager.rs deleted file mode 100644 index 577ff0e111..0000000000 --- a/sn_cli/src/files/chunk_manager.rs +++ /dev/null @@ -1,1045 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use super::upload::UploadedFile; -use bytes::Bytes; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress}, - FilesApi, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - fs::{self, File}, - io::Write, - path::{Path, PathBuf}, - time::Instant, -}; -use tracing::{debug, error, info, trace}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -const CHUNK_ARTIFACTS_DIR: &str = "chunk_artifacts"; -const METADATA_FILE: &str = "metadata"; - -// The unique hex encoded hash(path) -// This allows us to uniquely identify if a file has been chunked or not. -// An alternative to use instead of filename as it might not be unique -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -struct PathXorName(String); - -impl PathXorName { - fn new(path: &Path) -> PathXorName { - // we just need an unique value per path, thus we don't have to mind between the - // [u8]/[u16] differences - let path_as_lossy_str = path.as_os_str().to_string_lossy(); - let path_xor = XorName::from_content(path_as_lossy_str.as_bytes()); - PathXorName(hex::encode(path_xor)) - } -} - -/// Info about a file that has been chunked -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkedFile { - pub file_path: PathBuf, - pub file_name: OsString, - pub head_chunk_address: ChunkAddress, - pub chunks: BTreeSet<(XorName, PathBuf)>, - pub data_map: Chunk, -} - -/// Manages the chunking process by resuming pre-chunked files and chunking any -/// file that has not been chunked yet. -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkManager { - /// Whole client root dir - root_dir: PathBuf, - /// Dir for chunk artifacts - artifacts_dir: PathBuf, - files_to_chunk: Vec<(OsString, PathXorName, PathBuf)>, - chunks: BTreeMap, - completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - resumed_chunk_count: usize, - resumed_files_count: usize, -} - -impl ChunkManager { - // Provide the root_dir. The function creates a sub-directory to store the SE chunks - pub fn new(root_dir: &Path) -> Self { - let artifacts_dir = root_dir.join(CHUNK_ARTIFACTS_DIR); - Self { - root_dir: root_dir.to_path_buf(), - artifacts_dir, - files_to_chunk: Default::default(), - chunks: Default::default(), - completed_files: Default::default(), - resumed_files_count: 0, - resumed_chunk_count: 0, - } - } - - /// Chunk all the files in the provided `files_path` - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - /// - /// # Arguments - /// * files_path - &[Path] - /// * read_cache - Boolean. Set to true to resume the chunks from the artifacts dir. - /// * include_data_maps - Boolean. If set to true, will append all the ChunkedFile.data_map chunks - pub fn chunk_path( - &mut self, - files_path: &Path, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - self.chunk_with_iter( - WalkDir::new(files_path).into_iter().flatten(), - read_cache, - include_data_maps, - ) - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// verified - pub(crate) fn already_put_chunks( - &mut self, - entries_iter: impl Iterator, - make_files_public: bool, - ) -> Result> { - self.chunk_with_iter(entries_iter, false, make_files_public)?; - Ok(self.get_chunks()) - } - - /// Chunk all the files in the provided iterator - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - pub fn chunk_with_iter( - &mut self, - entries_iter: impl Iterator, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - let now = Instant::now(); - // clean up - self.files_to_chunk = Default::default(); - self.chunks = Default::default(); - self.completed_files = Default::default(); - self.resumed_chunk_count = 0; - self.resumed_files_count = 0; - - // collect the files to chunk - entries_iter.for_each(|entry| { - if entry.file_type().is_file() { - let path_xor = PathXorName::new(entry.path()); - info!( - "Added file {:?} with path_xor: {path_xor:?} to be chunked/resumed", - entry.path() - ); - self.files_to_chunk.push(( - entry.file_name().to_owned(), - path_xor, - entry.into_path(), - )); - } - }); - let total_files = self.files_to_chunk.len(); - - if total_files == 0 { - return Ok(()); - }; - - // resume the chunks from the artifacts dir - if read_cache { - self.resume_path(); - } - - // note the number of chunks that we've resumed - self.resumed_chunk_count = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - // note the number of files that we've resumed - self.resumed_files_count = self.chunks.keys().collect::>().len(); - - // Filter out files_to_chunk; Any PathXorName in chunks_to_upload is considered to be resumed. - { - let path_xors = self.chunks.keys().collect::>(); - self.files_to_chunk - .retain(|(_, path_xor, _)| !path_xors.contains(path_xor)); - } - - // Get the list of completed files - { - let completed_files = self.chunks.iter().filter_map(|(_, chunked_file)| { - if chunked_file.chunks.is_empty() { - Some(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )) - } else { - None - } - }); - - self.completed_files.extend(completed_files); - } - - // Return early if no more files to chunk - if self.files_to_chunk.is_empty() { - debug!( - "All files_to_chunk ({total_files:?}) were resumed. Returning the resumed chunks.", - ); - debug!("It took {:?} to resume all the files", now.elapsed()); - return Ok(()); - } - - let progress_bar = get_progress_bar(total_files as u64)?; - progress_bar.println(format!("Chunking {total_files} files...")); - - let artifacts_dir = &self.artifacts_dir.clone(); - let chunked_files = self.files_to_chunk - .par_iter() - .map(|(original_file_name, path_xor, path)| { - let file_chunks_dir = { - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - fs::create_dir_all(&file_chunks_dir).map_err(|err| { - error!("Failed to create folder {file_chunks_dir:?} for SE chunks with error {err:?}!"); - eyre!("Failed to create dir {file_chunks_dir:?} for SE chunks with error {err:?}") - })?; - file_chunks_dir - }; - - match FilesApi::chunk_file(path, &file_chunks_dir, include_data_maps) { - Ok((head_chunk_address, data_map, size, chunks)) => { - progress_bar.clone().inc(1); - debug!("Chunked {original_file_name:?} with {path_xor:?} into file's XorName: {head_chunk_address:?} of size {size}, and chunks len: {}", chunks.len()); - - let chunked_file = ChunkedFile { - head_chunk_address, - file_path: path.to_owned(), - file_name: original_file_name.clone(), - chunks: chunks.into_iter().collect(), - data_map - }; - Ok((path_xor.clone(), chunked_file)) - } - Err(err) => { - println!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - error!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - Err(eyre!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}")) - } - } - }) - .collect::>>()?; - debug!( - "Out of total files_to_chunk {total_files}, we have resumed {} files and chunked {} files", - self.resumed_files_count, - chunked_files.len() - ); - - // Self::resume_path would create an empty self.chunks entry if a file that was fully - // completed was resumed. Thus if it is empty, the user did not provide any valid file - // path. - if chunked_files.is_empty() && self.chunks.is_empty() { - bail!( - "The provided path does not contain any file. Please check your path!\nExiting..." - ); - } - - // write metadata and data_map - chunked_files - .par_iter() - .map(|(path_xor, chunked_file)| { - let metadata_path = artifacts_dir.join(&path_xor.0).join(METADATA_FILE); - - info!("Metadata path is: {metadata_path:?}"); - let metadata = rmp_serde::to_vec(&( - chunked_file.head_chunk_address, - chunked_file.data_map.clone(), - )) - .map_err(|_| { - error!("Failed to serialize file_xor_addr for writing metadata"); - eyre!("Failed to serialize file_xor_addr for writing metadata") - })?; - - let mut metadata_file = File::create(&metadata_path).map_err(|_| { - error!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}") - })?; - - metadata_file.write_all(&metadata).map_err(|_| { - error!("Failed to write metadata to {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to write metadata to {metadata_path:?} for {path_xor:?}") - })?; - - debug!("Wrote metadata for {path_xor:?}"); - Ok(()) - }) - .collect::>()?; - - progress_bar.finish_and_clear(); - debug!("It took {:?} to chunk {} files", now.elapsed(), total_files); - self.chunks.extend(chunked_files); - - Ok(()) - } - - // Try to resume the chunks - fn resume_path(&mut self) { - let artifacts_dir = self.artifacts_dir.clone(); - let resumed = self - .files_to_chunk - .par_iter() - .filter_map(|(original_file_name, path_xor, original_file_path)| { - // if this folder exists, and if we find chunks under this, we upload them. - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - if !file_chunks_dir.exists() { - return None; - } - Self::read_file_chunks_dir( - file_chunks_dir, - path_xor, - original_file_path.clone(), - original_file_name.clone(), - ) - }) - .collect::>(); - - self.chunks.extend(resumed); - } - - /// Get all the chunk name and their path. - /// If include_data_maps is true, append all the ChunkedFile.data_map chunks to the vec - pub fn get_chunks(&self) -> Vec<(XorName, PathBuf)> { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .cloned() - .collect::>() - } - - pub fn is_chunks_empty(&self) -> bool { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .next() - .is_none() - } - - /// Mark all the chunks as completed. This removes the chunks from the CHUNK_ARTIFACTS_DIR. - /// But keeps the folder and metadata file that denotes that the file has been already completed. - pub fn mark_completed_all(&mut self) -> Result<()> { - let all_chunks = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .map(|(chunk, _)| *chunk) - .collect::>(); - self.mark_completed(all_chunks.into_iter()) - } - - /// Mark a set of chunks as completed and remove them from CHUNK_ARTIFACTS_DIR - /// If the entire file is completed, keep the folder and metadata file - pub fn mark_completed(&mut self, chunks: impl Iterator) -> Result<()> { - let set_of_completed_chunks = chunks.collect::>(); - trace!("marking as completed: {set_of_completed_chunks:?}"); - - // remove those files - self.chunks - .par_iter() - .flat_map(|(_, chunked_file)| &chunked_file.chunks) - .map(|(chunk_xor, chunk_path)| { - if set_of_completed_chunks.contains(chunk_xor) { - debug!("removing {chunk_xor:?} at {chunk_path:?} as it is marked as completed"); - fs::remove_file(chunk_path).map_err(|_err| { - error!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}"); - eyre!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}") - })?; - } - Ok(()) - }) - .collect::>()?; - - let mut entire_file_is_done = BTreeSet::new(); - // remove the entries from the struct - self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - chunked_file - .chunks - // if chunk is part of completed_chunks, return false to remove it - .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - if chunked_file.chunks.is_empty() { - entire_file_is_done.insert(path_xor.clone()); - } - }); - - for path_xor in &entire_file_is_done { - // todo: should we remove the entry? ig so - if let Some(chunked_file) = self.chunks.remove(path_xor) { - trace!("removed {path_xor:?} from chunks list"); - - self.completed_files.push(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )); - - let uploaded_file_metadata = UploadedFile { - filename: chunked_file.file_name, - data_map: Some(chunked_file.data_map.value), - }; - // errors are logged by write() - let _result = - uploaded_file_metadata.write(&self.root_dir, &chunked_file.head_chunk_address); - } - } - Ok(()) - - // let mut entire_file_is_done = BTreeSet::new(); - // // remove the entries from the struct - // self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - // chunked_file - // .chunks - // // if chunk is part of completed_chunks, return false to remove it - // .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - // if chunked_file.chunks.is_empty() { - // entire_file_is_done.insert(path_xor.clone()); - // } - // }); - - // for path_xor in &entire_file_is_done { - // // todo: should we remove the entry? ig so - // if let Some(chunked_file) = self.chunks.remove(path_xor) { - // trace!("removed {path_xor:?} from chunks list"); - // self.verified_files - // .push((chunked_file.file_name, chunked_file.head_chunk_address)); - // } - // } - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// completed - pub(crate) fn completed_files(&self) -> &Vec<(PathBuf, OsString, ChunkAddress)> { - &self.completed_files - } - - /// Return the list of Filenames that have some chunks that are yet to be marked as completed. - pub(crate) fn incomplete_files(&self) -> Vec<(&PathBuf, &OsString, &ChunkAddress)> { - self.chunks - .values() - .map(|chunked_file| { - ( - &chunked_file.file_path, - &chunked_file.file_name, - &chunked_file.head_chunk_address, - ) - }) - .collect() - } - - /// Returns an iterator over the list of chunked files - pub(crate) fn iter_chunked_files(&mut self) -> impl Iterator { - self.chunks.values() - } - - // Try to read the chunks from `file_chunks_dir` - // Returns the ChunkedFile if the metadata file exists - // file_chunks_dir: artifacts_dir/path_xor - // path_xor: Used during logging and is returned - // original_file_name: Used to create ChunkedFile - fn read_file_chunks_dir( - file_chunks_dir: PathBuf, - path_xor: &PathXorName, - original_file_path: PathBuf, - original_file_name: OsString, - ) -> Option<(PathXorName, ChunkedFile)> { - let mut file_chunk_address: Option = None; - let mut data_map = Chunk::new(Bytes::new()); - debug!("Trying to resume {path_xor:?} as the file_chunks_dir exists"); - - let chunks = WalkDir::new(file_chunks_dir.clone()) - .into_iter() - .flatten() - .filter_map(|entry| { - if !entry.file_type().is_file() { - return None; - } - if entry.file_name() == METADATA_FILE { - if let Some((address, optional_data_map)) = - Self::try_read_metadata(entry.path()) - { - file_chunk_address = Some(address); - data_map = optional_data_map; - debug!("Obtained metadata for {path_xor:?}"); - } else { - error!("Could not read metadata for {path_xor:?}"); - } - // not a chunk, so don't return - return None; - } - - // try to get the chunk's xorname from its filename - if let Some(file_name) = entry.file_name().to_str() { - Self::hex_decode_xorname(file_name) - .map(|chunk_xorname| (chunk_xorname, entry.into_path())) - } else { - error!( - "Failed to convert OsString to str for {:?}", - entry.file_name() - ); - None - } - }) - .collect::>(); - - match file_chunk_address { - Some(head_chunk_address) => { - debug!("Resuming {} chunks for file {original_file_name:?} and with file_xor_addr {head_chunk_address:?}/{path_xor:?}", chunks.len()); - - Some(( - path_xor.clone(), - ChunkedFile { - file_path: original_file_path, - file_name: original_file_name, - head_chunk_address, - chunks, - data_map, - }, - )) - } - _ => { - error!("Metadata file or data map was not present for {path_xor:?}"); - // metadata file or data map was not present/was not read - None - } - } - } - - /// Try to read the metadata file - /// Returning (head_chunk_address, datamap Chunk) - fn try_read_metadata(path: &Path) -> Option<(ChunkAddress, Chunk)> { - let metadata = fs::read(path) - .map_err(|err| error!("Failed to read metadata with err {err:?}")) - .ok()?; - // head chunk address and the final datamap contents if a datamap exists for this file - let metadata: (ChunkAddress, Chunk) = rmp_serde::from_slice(&metadata) - .map_err(|err| error!("Failed to deserialize metadata with err {err:?}")) - .ok()?; - - Some(metadata) - } - - // Decode the hex encoded xorname - fn hex_decode_xorname(string: &str) -> Option { - let hex_decoded = hex::decode(string) - .map_err(|err| error!("Failed to decode {string} into bytes with err {err:?}")) - .ok()?; - let decoded_xorname: [u8; xor_name::XOR_NAME_LEN] = hex_decoded - .try_into() - .map_err(|_| error!("Failed to convert hex_decoded xorname into an [u8; 32]")) - .ok()?; - Some(XorName(decoded_xorname)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use color_eyre::{eyre::eyre, Result}; - use rand::{thread_rng, Rng}; - use rayon::prelude::IntoParallelIterator; - use sn_logging::LogBuilder; - use tempfile::TempDir; - - /// Assert any collection/iterator even if their orders do not match. - pub fn assert_list_eq(a: I, b: J) - where - K: Eq + Clone, - I: IntoIterator, - J: IntoIterator, - { - let vec1: Vec<_> = a.into_iter().collect::>(); - let mut vec2: Vec<_> = b.into_iter().collect(); - - assert_eq!(vec1.len(), vec2.len()); - - for item1 in &vec1 { - let idx2 = vec2 - .iter() - .position(|item2| item1 == item2) - .expect("Item not found in second list"); - - vec2.swap_remove(idx2); - } - - assert_eq!(vec2.len(), 0); - } - - #[test] - fn chunked_files_should_be_written_to_artifacts_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 4 chunks - assert_eq!(chunks.len(), 4); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 datamap + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn no_datamap_chunked_files_should_be_written_to_artifacts_dir_when_not_public() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - - // we do NOT want to include or write the data_map chunk here - manager.chunk_path(&random_files_dir, true, false)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 3 chunks without the datamap - assert_eq!(chunks.len(), 3); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn chunks_should_be_removed_from_artifacts_dir_if_marked_as_completed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let path_xor = manager.chunks.keys().next().unwrap().clone(); - let chunked_file = manager.chunks.values().next().unwrap().clone(); - let file_xor_addr = chunked_file.head_chunk_address; - let (chunk, _) = chunked_file - .chunks - .first() - .expect("Must contain 1 chunk") - .clone(); - let total_chunks = manager.chunks.values().next().unwrap().chunks.len(); - manager.mark_completed(vec![chunk].into_iter())?; - - // 1. chunk should be removed from the struct - assert_eq!( - manager - .chunks - .values() - .next() - .expect("Since the file was not fully completed, it should be present") - .chunks - .len(), - total_chunks - 1, - ); - - // 2. the folder should exists, but chunk removed - let file_chunks_dir = manager.artifacts_dir.join(&path_xor.0); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - &path_xor, - chunked_file.file_path, - chunked_file.file_name, - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), total_chunks - 1); - assert_eq!(chunked_file_from_dir.head_chunk_address, file_xor_addr); - assert_eq!(path_xor_from_dir, path_xor); - - // 2. file should not be marked as completed - assert!(manager.completed_files.is_empty()); - - Ok(()) - } - - #[test] - fn marking_all_chunks_as_completed_should_not_remove_the_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - // cloned after chunking - let manager_clone = manager.clone(); - - let n_folders = WalkDir::new(&manager.artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != manager.artifacts_dir) - .count(); - assert_eq!(n_folders, 5); - - manager.mark_completed_all()?; - - // all 5 files should be marked as completed - assert_eq!(manager.completed_files.len(), 5); - - // all 5 folders should exist - for (path_xor, chunked_file) in manager_clone.chunks.iter() { - let file_chunks_dir = manager_clone.artifacts_dir.join(path_xor.0.clone()); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - path_xor, - chunked_file.file_path.clone(), - chunked_file.file_name.to_owned(), - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), 0); - assert_eq!( - chunked_file_from_dir.head_chunk_address, - chunked_file.head_chunk_address - ); - assert_eq!(&path_xor_from_dir, path_xor); - } - - Ok(()) - } - - #[test] - fn mark_none_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. make sure the chunk counts match - let total_chunk_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunk_count); - - // 2. assert the two managers - assert_eq!(manager.chunks, new_manager.chunks); - assert_eq!(manager.completed_files, new_manager.completed_files); - - Ok(()) - } - - #[test] - fn mark_one_chunk_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let total_chunks_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - - // mark a chunk as completed - let removed_chunk = manager - .chunks - .values() - .next() - .expect("Atleast 1 file should be present") - .chunks - .iter() - .next() - .expect("Chunk should be present") - .0; - manager.mark_completed([removed_chunk].into_iter())?; - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have 1 completed chunk and (total_chunks_count-1) incomplete chunks - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunks_count - 1); - // also check the structs - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - total_chunks_count - 1 - ); - - // 2. files should not be added to completed files - assert_eq!(new_manager.completed_files.len(), 0); - - Ok(()) - } - - #[test] - fn mark_all_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - manager.mark_completed_all()?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have chunk entries, but 0 chunks inside them - assert_eq!(new_manager.chunks.len(), 5); - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - 0 - ); - // 2. the resumed stats should be 0 - assert_eq!(new_manager.resumed_chunk_count, 0); - - // 3. make sure the files are added to completed list - assert_eq!(new_manager.completed_files.len(), 5); - - Ok(()) - } - - #[test] - fn absence_of_metadata_file_should_re_chunk_the_entire_file() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _root_dir, random_files_dir) = init_manager()?; - - let mut random_files = create_random_files(&random_files_dir, 1, 1)?; - let random_file = random_files.remove(0); - manager.chunk_path(&random_files_dir, true, true)?; - - let mut old_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - old_chunks_list.insert(chunk_xorname_from_filename); - } - } - - // remove metadata file from artifacts_dir - let path_xor = PathXorName::new(&random_file); - let metadata_path = manager.artifacts_dir.join(path_xor.0).join(METADATA_FILE); - fs::remove_file(&metadata_path)?; - - // use the same manager to chunk the path - manager.chunk_path(&random_files_dir, true, true)?; - // nothing should be resumed - assert_eq!(manager.resumed_chunk_count, 0); - // but it should be re-chunked - assert_eq!( - manager.get_chunks().len(), - 4, - "we have correct chunk len including data_map" - ); - // metadata file should be created - assert!(metadata_path.exists()); - - let mut new_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - new_chunks_list.insert(chunk_xorname_from_filename); - } - } - assert_list_eq(new_chunks_list, old_chunks_list); - - Ok(()) - } - - fn init_manager() -> Result<(TempDir, ChunkManager, PathBuf, PathBuf)> { - let tmp_dir = tempfile::tempdir()?; - let random_files_dir = tmp_dir.path().join("random_files"); - let root_dir = tmp_dir.path().join("root_dir"); - fs::create_dir_all(&random_files_dir)?; - fs::create_dir_all(&root_dir)?; - let manager = ChunkManager::new(&root_dir); - - Ok((tmp_dir, manager, root_dir, random_files_dir)) - } - - fn create_random_files( - at: &Path, - num_files: usize, - mb_per_file: usize, - ) -> Result> { - let files = (0..num_files) - .into_par_iter() - .filter_map(|i| { - let mut path = at.to_path_buf(); - path.push(format!("random_file_{i}")); - match generate_file(&path, mb_per_file) { - Ok(_) => Some(path), - Err(err) => { - error!("Failed to generate random file with {err:?}"); - None - } - } - }) - .collect::>(); - if files.len() < num_files { - return Err(eyre!("Failed to create a Failedkk")); - } - Ok(files) - } - - fn generate_file(path: &PathBuf, file_size_mb: usize) -> Result<()> { - let mut file = File::create(path)?; - let mut rng = thread_rng(); - - // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks - let n_small_chunks = file_size_mb * 1024 * 32; - for _ in 0..n_small_chunks { - let random_data: [u8; 32] = rng.gen(); - file.write_all(&random_data)?; - } - let size = file.metadata()?.len() as f64 / (1024 * 1024) as f64; - assert_eq!(file_size_mb as f64, size); - - Ok(()) - } -} diff --git a/sn_cli/src/files/download.rs b/sn_cli/src/files/download.rs deleted file mode 100644 index d95f0a0646..0000000000 --- a/sn_cli/src/files/download.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - get_progress_bar, - upload::{UploadedFile, UPLOADED_FILES}, -}; - -use std::collections::BTreeSet; -use std::ffi::OsString; -use std::path::Path; - -use color_eyre::Result; -use indicatif::ProgressBar; -use walkdir::WalkDir; -use xor_name::XorName; - -use crate::utils::duration_to_minute_seconds_miliseconds_string; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - FilesApi, FilesDownload, FilesDownloadEvent, -}; -use tracing::{debug, error, info}; - -/// The default folder to download files to. -const DOWNLOAD_FOLDER: &str = "safe_files"; - -pub async fn download_files( - files_api: &FilesApi, - root_dir: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) -> Result<()> { - info!("Downloading with batch size of {}", batch_size); - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let download_path = dirs_next::download_dir() - .unwrap_or(root_dir.to_path_buf()) - .join(DOWNLOAD_FOLDER); - std::fs::create_dir_all(download_path.as_path())?; - - let mut uploaded_files = BTreeSet::new(); - - for entry in WalkDir::new(uploaded_files_path.clone()) { - let entry = entry?; - let path = entry.path(); - if path.is_file() { - let hex_xorname = path - .file_name() - .expect("Uploaded file to have name") - .to_str() - .expect("Failed to convert path to string"); - let bytes = hex::decode(hex_xorname)?; - let xor_name_bytes: [u8; 32] = bytes - .try_into() - .expect("Failed to parse XorName from hex string"); - let xor_name = XorName(xor_name_bytes); - let address = ChunkAddress::new(xor_name); - - let uploaded_file_metadata = UploadedFile::read(path)?; - let datamap_chunk = uploaded_file_metadata.data_map.map(|bytes| Chunk { - address, - value: bytes, - }); - uploaded_files.insert((xor_name, (uploaded_file_metadata.filename, datamap_chunk))); - } - } - - for (xorname, file_data) in uploaded_files.into_iter() { - download_file( - files_api.clone(), - xorname, - file_data, - &download_path, - show_holders, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(()) -} - -pub async fn download_file( - files_api: FilesApi, - xor_name: XorName, - // original file name and optional datamap chunk - (file_name, datamap): (OsString, Option), - download_path: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) { - let start_time = std::time::Instant::now(); - - let mut files_download = FilesDownload::new(files_api.clone()) - .set_batch_size(batch_size) - .set_show_holders(show_holders) - .set_retry_strategy(retry_strategy); - - println!("Downloading {file_name:?} from {xor_name:64x} with batch-size {batch_size}"); - debug!("Downloading {file_name:?} from {:64x}", xor_name); - let downloaded_file_path = download_path.join(&file_name); - - let mut download_events_rx = files_download.get_events(); - - let progress_handler = tokio::spawn(async move { - let mut progress_bar: Option = None; - - // The loop is guaranteed to end, as the channel will be closed when the download completes or errors out. - while let Some(event) = download_events_rx.recv().await { - match event { - FilesDownloadEvent::Downloaded(_) => { - if let Some(progress_bar) = &progress_bar { - progress_bar.inc(1); - } - } - FilesDownloadEvent::ChunksCount(count) => { - // terminate the progress bar from datamap download. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::DatamapCount(count) => { - // terminate the progress bar if it was loaded here. This should not happen. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::Error => { - error!("Got FilesDownloadEvent::Error"); - } - } - } - - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - }); - - let download_result = files_download - .download_file_to_path( - ChunkAddress::new(xor_name), - datamap, - downloaded_file_path.clone(), - ) - .await; - - let duration = start_time.elapsed(); - - // await on the progress handler first as we want to clear the progress bar before printing things. - let _ = progress_handler.await; - match download_result { - Ok(_) => { - debug!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - println!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - let elapsed_time = duration_to_minute_seconds_miliseconds_string(duration); - println!("File downloaded in {elapsed_time}"); - } - Err(error) => { - error!("Error downloading {file_name:?}: {error}"); - println!("Error downloading {file_name:?}: {error}") - } - } -} diff --git a/sn_cli/src/files/estimate.rs b/sn_cli/src/files/estimate.rs deleted file mode 100644 index a5c16f4a03..0000000000 --- a/sn_cli/src/files/estimate.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::ChunkManager; - -use std::path::{Path, PathBuf}; - -use color_eyre::Result; - -use sn_client::{ - protocol::{storage::ChunkAddress, NetworkAddress}, - transfers::NanoTokens, - FilesApi, -}; - -pub struct Estimator { - chunk_manager: ChunkManager, - files_api: FilesApi, -} - -impl Estimator { - pub fn new(chunk_manager: ChunkManager, files_api: FilesApi) -> Self { - Self { - chunk_manager, - files_api, - } - } - - /// Estimate the upload cost of a chosen file - pub async fn estimate_cost( - mut self, - path: PathBuf, - make_data_public: bool, - root_dir: &Path, - ) -> Result<()> { - self.chunk_manager - .chunk_path(&path, false, make_data_public)?; - - let mut estimate: u64 = 0; - - let balance = FilesApi::new(self.files_api.client().clone(), root_dir.to_path_buf()) - .wallet()? - .balance() - .as_nano(); - - for (chunk_address, _location) in self.chunk_manager.get_chunks() { - let c = self.files_api.clone(); - - tokio::spawn(async move { - let (_peer, _cost, quote) = c - .wallet() - .expect("estimate_cost: Wallet error.") - .get_store_cost_at_address(NetworkAddress::from_chunk_address( - ChunkAddress::new(chunk_address), - )) - .await - .expect("estimate_cost: Error with file."); - quote.cost.as_nano() - }) - .await - .map(|nanos| estimate += nanos) - .expect("estimate_cost: Concurrency error."); - } - - let total = balance.saturating_sub(estimate); - - println!("**************************************"); - println!("Your current balance: {}", NanoTokens::from(balance)); - println!("Transfer cost estimate: {}", NanoTokens::from(estimate)); - println!( - "Your balance estimate after transfer: {}", - NanoTokens::from(total) - ); - println!("**************************************"); - - Ok(()) - } -} diff --git a/sn_cli/src/files/files_uploader.rs b/sn_cli/src/files/files_uploader.rs deleted file mode 100644 index 6e20f2e788..0000000000 --- a/sn_cli/src/files/files_uploader.rs +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use crate::utils::duration_to_minute_seconds_string; -use crate::ChunkManager; -use bytes::Bytes; -use color_eyre::{eyre::eyre, Report, Result}; -use futures::StreamExt; -use rand::prelude::SliceRandom; -use rand::thread_rng; -use sn_client::{ - transfers::{TransferError, WalletError}, - Client, Error as ClientError, UploadCfg, UploadEvent, UploadSummary, Uploader, -}; -use sn_protocol::storage::{Chunk, ChunkAddress}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, - time::{Duration, Instant}, -}; -use tokio::{sync::mpsc::Receiver, task::JoinHandle}; -use tracing::{debug, error, info, warn}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// The result of a successful files upload. -pub struct FilesUploadSummary { - /// The cost and count summary of the upload. - pub upload_summary: UploadSummary, - /// The list of completed files (FilePath, FileName, HeadChunkAddress) - pub completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - /// The list of incomplete files (FilePath, FileName, HeadChunkAddress) - pub incomplete_files: Vec<(PathBuf, OsString, ChunkAddress)>, -} - -/// A trait designed to customize the standard output behavior for file upload processes. -pub trait FilesUploadStatusNotifier: Send { - fn collect_entries(&mut self, entries_iter: Vec); - fn collect_paths(&mut self, path: &Path); - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize); - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize); - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ); - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ); -} - -/// Combines the `Uploader` along with the `ChunkManager` -pub struct FilesUploader { - client: Client, - root_dir: PathBuf, - /// entries to upload - entries_to_upload: Vec, - /// The status notifier that can be overridden to perform custom actions instead of printing things to stdout. - status_notifier: Option>, - /// config - make_data_public: bool, - upload_cfg: UploadCfg, -} - -impl FilesUploader { - pub fn new(client: Client, root_dir: PathBuf) -> Self { - let status_notifier = Box::new(StdOutPrinter { - file_paths_to_print: Default::default(), - }); - Self { - client, - root_dir, - entries_to_upload: Default::default(), - status_notifier: Some(status_notifier), - make_data_public: false, - upload_cfg: Default::default(), - } - } - - pub fn set_upload_cfg(mut self, cfg: UploadCfg) -> Self { - self.upload_cfg = cfg; - self - } - - pub fn set_make_data_public(mut self, make_data_public: bool) -> Self { - self.make_data_public = make_data_public; - self - } - - /// Override the default status notifier. By default we print things to stdout. - pub fn set_status_notifier( - mut self, - status_notifier: Box, - ) -> Self { - self.status_notifier = Some(status_notifier); - self - } - - pub fn insert_entries(mut self, entries_iter: impl IntoIterator) -> Self { - self.entries_to_upload.extend(entries_iter); - self - } - - pub fn insert_path(mut self, path: &Path) -> Self { - if let Some(notifier) = &mut self.status_notifier { - notifier.collect_paths(path); - } - let entries = WalkDir::new(path).into_iter().flatten(); - self.entries_to_upload.extend(entries); - self - } - - pub async fn start_upload(mut self) -> Result { - let mut chunk_manager = ChunkManager::new(&self.root_dir); - let chunks_to_upload = self.get_chunks_to_upload(&mut chunk_manager).await?; - let chunks_to_upload_len = chunks_to_upload.len(); - - // Notify on chunking complete - if let Some(notifier) = &self.status_notifier { - notifier.on_chunking_complete( - &self.upload_cfg, - self.make_data_public, - chunks_to_upload_len, - ); - } - - let now = Instant::now(); - let mut uploader = Uploader::new(self.client, self.root_dir); - uploader.set_upload_cfg(self.upload_cfg); - uploader.insert_chunk_paths(chunks_to_upload); - - let events_handle = Self::spawn_upload_events_handler( - chunk_manager, - self.make_data_public, - chunks_to_upload_len, - uploader.get_event_receiver(), - self.status_notifier.take(), - )?; - - let upload_sum = match uploader.start_upload().await { - Ok(summary) => summary, - Err(ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )))) => { - return Err(eyre!( - "Not enough balance in wallet to pay for chunk. \ - We have {available:?} but need {required:?} to pay for the chunk" - )) - } - Err(err) => return Err(eyre!("Failed to upload chunk batch: {err}")), - }; - let (chunk_manager, status_notifier) = events_handle.await??; - self.status_notifier = status_notifier; - - // Notify on upload complete - if let Some(notifier) = &self.status_notifier { - notifier.on_upload_complete(&upload_sum, now.elapsed(), chunks_to_upload_len); - } - - let summary = FilesUploadSummary { - upload_summary: upload_sum, - completed_files: chunk_manager.completed_files().clone(), - incomplete_files: chunk_manager - .incomplete_files() - .into_iter() - .map(|(path, file_name, head_address)| { - (path.clone(), file_name.clone(), *head_address) - }) - .collect(), - }; - Ok(summary) - } - - // This will read from the cache if possible. We only re-verify with the network if the file has been cached but - // there are no pending chunks to upload. - async fn get_chunks_to_upload( - &self, - chunk_manager: &mut ChunkManager, - ) -> Result> { - // Initially try reading from the cache - chunk_manager.chunk_with_iter( - self.entries_to_upload.iter().cloned(), - true, - self.make_data_public, - )?; - // We verify if there are no chunks left to upload. - let mut chunks_to_upload = if !chunk_manager.is_chunks_empty() { - chunk_manager.get_chunks() - } else { - // re chunk it again to get back all the chunks - let chunks = chunk_manager.already_put_chunks( - self.entries_to_upload.iter().cloned(), - self.make_data_public, - )?; - - // Notify on verification init - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_init(chunks.len()); - } - - let failed_chunks = self.verify_uploaded_chunks(&chunks).await?; - - chunk_manager.mark_completed( - chunks - .into_iter() - .filter(|c| !failed_chunks.contains(c)) - .map(|(xor, _)| xor), - )?; - - if failed_chunks.is_empty() { - // Notify on verification success - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_success( - chunk_manager.completed_files(), - self.make_data_public, - ); - } - - return Ok(vec![]); - } - // Notify on verification failure - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_failure(failed_chunks.len()); - } - failed_chunks - }; - // shuffle the chunks - let mut rng = thread_rng(); - chunks_to_upload.shuffle(&mut rng); - - Ok(chunks_to_upload) - } - - async fn verify_uploaded_chunks( - &self, - chunks_paths: &[(XorName, PathBuf)], - ) -> Result> { - let mut stream = futures::stream::iter(chunks_paths) - .map(|(xorname, path)| async move { - let chunk = Chunk::new(Bytes::from(std::fs::read(path)?)); - let res = self.client.verify_chunk_stored(&chunk).await; - Ok::<_, Report>((xorname, path.clone(), res.is_err())) - }) - .buffer_unordered(self.upload_cfg.batch_size); - let mut failed_chunks = Vec::new(); - - while let Some(result) = stream.next().await { - let (xorname, path, is_error) = result?; - if is_error { - warn!("Failed to fetch a chunk {xorname:?}"); - failed_chunks.push((*xorname, path)); - } - } - - Ok(failed_chunks) - } - - #[expect(clippy::type_complexity)] - fn spawn_upload_events_handler( - mut chunk_manager: ChunkManager, - make_data_public: bool, - chunks_to_upload_len: usize, - mut upload_event_rx: Receiver, - status_notifier: Option>, - ) -> Result>)>>> - { - let progress_bar = get_progress_bar(chunks_to_upload_len as u64)?; - let handle = tokio::spawn(async move { - let mut upload_terminated_with_error = false; - // The loop is guaranteed to end, as the channel will be - // closed when the upload completes or errors out. - while let Some(event) = upload_event_rx.recv().await { - match event { - UploadEvent::ChunkUploaded(addr) - | UploadEvent::ChunkAlreadyExistsInNetwork(addr) => { - progress_bar.clone().inc(1); - if let Err(err) = - chunk_manager.mark_completed(std::iter::once(*addr.xorname())) - { - error!("Failed to mark chunk {addr:?} as completed: {err:?}"); - } - } - UploadEvent::Error => { - upload_terminated_with_error = true; - } - UploadEvent::RegisterUploaded { .. } - | UploadEvent::RegisterUpdated { .. } - | UploadEvent::PaymentMade { .. } => {} - } - } - progress_bar.finish_and_clear(); - - // this check is to make sure that we don't partially write to the uploaded_files file if the upload process - // terminates with an error. This race condition can happen as we bail on `upload_result` before we await the - // handler. - if upload_terminated_with_error { - error!("Got UploadEvent::Error inside upload event loop"); - } else { - // Notify on upload failure - if let Some(notifier) = &status_notifier { - notifier.on_failed_to_upload_all_files( - chunk_manager.incomplete_files(), - chunk_manager.completed_files(), - make_data_public, - ); - } - } - - Ok::<_, Report>((chunk_manager, status_notifier)) - }); - - Ok(handle) - } -} - -/// The default -struct StdOutPrinter { - file_paths_to_print: Vec, -} - -impl FilesUploadStatusNotifier for StdOutPrinter { - fn collect_entries(&mut self, _entries_iter: Vec) {} - - fn collect_paths(&mut self, path: &Path) { - self.file_paths_to_print.push(path.to_path_buf()); - } - - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize) { - println!("Files upload attempted previously, verifying {chunks_len} chunks",); - } - - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - println!("All files were already uploaded and verified"); - Self::print_uploaded_msg(make_data_public); - - if completed_files.is_empty() { - println!("chunk_manager doesn't have any verified_files, nor any failed_chunks to re-upload."); - } - Self::print_completed_file_list(completed_files); - } - - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize) { - println!("{failed_chunks_len} chunks were uploaded in the past but failed to verify. Will attempt to upload them again..."); - } - - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - for (_, file_name, _) in incomplete_files { - if let Some(file_name) = file_name.to_str() { - println!("Unverified file \"{file_name}\", suggest to re-upload again."); - info!("Unverified {file_name}"); - } else { - println!("Unverified file \"{file_name:?}\", suggest to re-upload again."); - info!("Unverified file {file_name:?}"); - } - } - - // log uploaded file information - Self::print_uploaded_msg(make_data_public); - Self::print_completed_file_list(completed_files); - } - - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ) { - for path in self.file_paths_to_print.iter() { - debug!( - "Uploading file(s) from {path:?} batch size {:?} will verify?: {}", - upload_cfg.batch_size, upload_cfg.verify_store - ); - if make_data_public { - info!("{path:?} will be made public and linkable"); - println!("{path:?} will be made public and linkable"); - } - } - if self.file_paths_to_print.len() == 1 { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print[0] - ); - } else { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print - ); - } - } - - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ) { - let elapsed = duration_to_minute_seconds_string(elapsed_time); - - println!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - info!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - println!("**************************************"); - println!("* Payment Details *"); - println!("**************************************"); - println!( - "Made payment of {:?} for {} chunks", - upload_sum.storage_cost, upload_sum.uploaded_count - ); - println!( - "Made payment of {:?} for royalties fees", - upload_sum.royalty_fees - ); - println!("New wallet balance: {}", upload_sum.final_balance); - } -} - -impl StdOutPrinter { - fn print_completed_file_list(completed_files: &[(PathBuf, OsString, ChunkAddress)]) { - for (_, file_name, addr) in completed_files { - let hex_addr = addr.to_hex(); - if let Some(file_name) = file_name.to_str() { - println!("Uploaded \"{file_name}\" to address {hex_addr}"); - info!("Uploaded {file_name} to {hex_addr}"); - } else { - println!("Uploaded \"{file_name:?}\" to address {hex_addr}"); - info!("Uploaded {file_name:?} to {hex_addr}"); - } - } - } - - fn print_uploaded_msg(make_data_public: bool) { - println!("**************************************"); - println!("* Uploaded Files *"); - if !make_data_public { - println!("* *"); - println!("* These are not public by default. *"); - println!("* Reupload with `-p` option *"); - println!("* to publish the datamaps. *"); - } - println!("**************************************"); - } -} diff --git a/sn_cli/src/files/upload.rs b/sn_cli/src/files/upload.rs deleted file mode 100644 index 2aa13d7dd8..0000000000 --- a/sn_cli/src/files/upload.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bytes::Bytes; -use color_eyre::Result; -use serde::Deserialize; -use sn_client::protocol::storage::ChunkAddress; -use std::{ffi::OsString, path::Path}; -use tracing::{error, warn}; - -/// Subdir for storing uploaded file into -pub const UPLOADED_FILES: &str = "uploaded_files"; - -/// The metadata related to file that has been uploaded. -/// This is written during upload and read during downloads. -#[derive(Clone, Debug, Deserialize)] -pub struct UploadedFile { - pub filename: OsString, - pub data_map: Option, -} - -impl UploadedFile { - /// Write an UploadedFile to a path identified by the hex of the head ChunkAddress. - /// If you want to update the data_map to None, calling this function will overwrite the previous value. - pub fn write(&self, root_dir: &Path, head_chunk_address: &ChunkAddress) -> Result<()> { - let uploaded_files = root_dir.join(UPLOADED_FILES); - - if !uploaded_files.exists() { - if let Err(error) = std::fs::create_dir_all(&uploaded_files) { - error!("Failed to create {uploaded_files:?} because {error:?}"); - } - } - - let uploaded_file_path = uploaded_files.join(head_chunk_address.to_hex()); - - if self.data_map.is_none() { - warn!( - "No data-map being written for {:?} as it is empty", - self.filename - ); - } - let serialized = - rmp_serde::to_vec(&(&self.filename, &self.data_map)).inspect_err(|_err| { - error!("Failed to serialize UploadedFile"); - })?; - - std::fs::write(&uploaded_file_path, serialized).inspect_err(|_err| { - error!( - "Could not write UploadedFile of {:?} to {uploaded_file_path:?}", - self.filename - ); - })?; - - Ok(()) - } - - pub fn read(path: &Path) -> Result { - let bytes = std::fs::read(path).inspect_err(|_err| { - error!("Error while reading the UploadedFile from {path:?}"); - })?; - let metadata = rmp_serde::from_slice(&bytes).inspect_err(|_err| { - error!("Error while deserializing UploadedFile for {path:?}"); - })?; - Ok(metadata) - } -} diff --git a/sn_cli/src/lib.rs b/sn_cli/src/lib.rs deleted file mode 100644 index 4d0e77b41e..0000000000 --- a/sn_cli/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod acc_packet; -mod files; -pub mod utils; - -pub use acc_packet::AccountPacket; -pub use files::{ - download_file, download_files, ChunkManager, Estimator, FilesUploadStatusNotifier, - FilesUploadSummary, FilesUploader, UploadedFile, UPLOADED_FILES, -}; diff --git a/sn_cli/src/utils.rs b/sn_cli/src/utils.rs deleted file mode 100644 index 093b939960..0000000000 --- a/sn_cli/src/utils.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::time::Duration; - -/// Returns whether a hex string is a valid secret key in hex format. -pub fn is_valid_key_hex(hex: &str) -> bool { - hex.len() == 64 && hex.chars().all(|c| c.is_ascii_hexdigit()) -} - -pub fn duration_to_minute_seconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds") - } else { - format!("{elapsed_seconds} seconds") - } -} - -pub fn duration_to_minute_seconds_miliseconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - let elapsed_millis = duration.subsec_millis(); - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else if elapsed_seconds > 0 { - format!("{elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else { - format!("{elapsed_millis} milliseconds") - } -} diff --git a/sn_client/CHANGELOG.md b/sn_client/CHANGELOG.md deleted file mode 100644 index fb045ff82c..0000000000 --- a/sn_client/CHANGELOG.md +++ /dev/null @@ -1,2712 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.107.7](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.6...sn_client-v0.107.7) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.107.6](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.5...sn_client-v0.107.6) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible -## [0.107.5](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.4...sn_client-v0.107.5) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.107.4](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.3...sn_client-v0.107.4) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.107.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.2...sn_client-v0.107.3) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.107.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.1...sn_client-v0.107.2) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.107.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.3...sn_client-v0.107.0) - 2024-06-03 - -### Added -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- integrate DAG crawling fixes from Josh and Qi -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.106.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.2...sn_client-v0.106.3) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- hide genesis keypair -- pass sk_str via cli opt -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- *(audit)* collect payment forward statistics -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- improve cli DAG collection -- remove now unused mostly duplicated code -- improve DAG verification redundancy -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "feat(cli): track spend creation reasons during audit" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- resolve errors after reverts -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.106.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.1...sn_client-v0.106.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.106.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0...sn_client-v0.106.1) - 2024-05-08 - -### Other -- *(release)* sn_registers-v0.3.13 - -## [0.106.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0-alpha.5...sn_client-v0.106.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(cli)* track spend creation reasons during audit -- *(node)* make spend and cash_note reason field configurable -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- [**breaking**] renamings in CashNote -- *(faucet)* log from sn_client -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(transfers)* do not genereate wallet by default -- [**breaking**] rename token to amount in Spend -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- create faucet via account load or generation -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* move acct_packet mnemonic into client layer -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- check DAG crawling performance -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- *(deps)* bump dependencies -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- addres review comments -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.105.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.105.1...sn_client-v0.105.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.105.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.105.0...sn_client-v0.105.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.105.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.31...sn_client-v0.105.0) - 2024-03-27 - -### Added -- svg caching, fault tolerance during DAG collection -- *(uploader)* collect all the uploaded registers -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* use ClientRegister instead of Registers -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- [**breaking**] remove gossip code -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* remove FilesApi dependency -- *(uploader)* initial test setup for uploader -- *(uploader)* implement UploaderInterface for easier testing -- *(uploader)* remove failed_to states -- *(register)* minor simplification in Register Permissions implementation -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.104.31](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.30...sn_client-v0.104.31) - 2024-03-21 - -### Added -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(node)* reduce bad_nodes check resource usage - -## [0.104.30](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29...sn_client-v0.104.30) - 2024-03-18 - -### Other -- updated the following local packages: sn_networking - -## [0.104.29-alpha.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.1...sn_client-v0.104.29-alpha.2) - 2024-03-14 - -### Added -- moved param to outside calc -- refactor spend validation - -### Fixed -- dont stop spend verification at spend error, generalise spend serde - -### Other -- store test utils under a new crate -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- improve code quality -- new `sn_service_management` crate -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.104.29-alpha.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.0...sn_client-v0.104.29-alpha.1) - 2024-03-08 - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.104.28](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.27...sn_client-v0.104.28) - 2024-03-06 - -### Added -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(folders)* sync up logic and CLI cmd -- *(register)* when a new entry is written return its hash -- refactor upload with iter -- actionable double spend reporting -- collect royalties through DAG -- *(folders)* store files data-map within Folders metadata chunk -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders -- expose sn related deps to app builders - -### Fixed -- filter out spent cashnotes in received client transfers - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(folders)* some simplifications to acc-packet codebase - -## [0.104.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.26...sn_client-v0.104.27) - 2024-02-23 - -### Other -- test docs test -- write online documentation -- push documentation -- sync documentation -- write atop write merg branches -- red and write register docs -- create register docs -- register docs - -## [0.104.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.25...sn_client-v0.104.26) - 2024-02-21 - -### Other -- *(release)* initial alpha test release - -## [0.104.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.24...sn_client-v0.104.25) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.23...sn_client-v0.104.24) - 2024-02-20 - -### Added -- estimate feature with ci and balance after with fn docs - -## [0.104.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.22...sn_client-v0.104.23) - 2024-02-20 - -### Other -- updated the following local packages: sn_networking - -## [0.104.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.21...sn_client-v0.104.22) - 2024-02-20 - -### Added -- spend and DAG utilities - -### Other -- improve SpendDagGet names - -## [0.104.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.20...sn_client-v0.104.21) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.104.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.19...sn_client-v0.104.20) - 2024-02-20 - -### Added -- *(registers)* expose MerkleReg of RegisterCrdt in all Register types - -### Fixed -- clippy warnings - -### Other -- marke merkle_reg() accessors as unstable (in comment) on Register types - -## [0.104.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.18...sn_client-v0.104.19) - 2024-02-20 - -### Other -- improve DAG crawling performance with better parallelisation - -## [0.104.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.17...sn_client-v0.104.18) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.16...sn_client-v0.104.17) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.15...sn_client-v0.104.16) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.14...sn_client-v0.104.15) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(client)* remove the payee-map from StoragePaymentResult - -## [0.104.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.13...sn_client-v0.104.14) - 2024-02-15 - -### Other -- updated the following local packages: sn_networking - -## [0.104.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.12...sn_client-v0.104.13) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.11...sn_client-v0.104.12) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.10...sn_client-v0.104.11) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.104.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.9...sn_client-v0.104.10) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.8...sn_client-v0.104.9) - 2024-02-13 - -### Added -- filtering dag errors -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.104.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.7...sn_client-v0.104.8) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.6...sn_client-v0.104.7) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.5...sn_client-v0.104.6) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review - -## [0.104.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.4...sn_client-v0.104.5) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.3...sn_client-v0.104.4) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.2...sn_client-v0.104.3) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.104.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.1...sn_client-v0.104.2) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download -- *(client)* perform more retries if we are verifying a register -- *(network)* impl RetryStrategy to make the reattempts flexible - -### Fixed -- *(ci)* update the reattempt flag to retry_strategy flag for the cli - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.104.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.0...sn_client-v0.104.1) - 2024-02-08 - -### Other -- updated the following local packages: sn_networking - -## [0.104.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.7...sn_client-v0.104.0) - 2024-02-07 - -### Added -- *(client)* put register to the peer that we paid to -- *(client)* [**breaking**] make the result of the storage payment into a struct - -### Fixed -- rust docs error - -## [0.103.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.6...sn_client-v0.103.7) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.103.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.5...sn_client-v0.103.6) - 2024-02-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.103.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.4...sn_client-v0.103.5) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.3...sn_client-v0.103.4) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.2...sn_client-v0.103.3) - 2024-02-05 - -### Other -- change to hot wallet -- docs formatting -- cargo fmt changes -- example for api verify uploaded chunks -- example for api verify cash note redemptions -- example for api publish on topic -- example for api unsubscribe to topic -- example for api subscribe to topic -- example for api get spend from network -- example for api verify register stored -- example for api get chunk -- example for api store chunk -- example for api create and pay for register -- example for api get register -- example for api get signed reg from network -- example for api signer pk -- example for api signer -- example for api sign -- example for api events channel -- example for api new -- apply format and params to doc templates -- better template set -- mark applicable functions with empty headers - -## [0.103.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.1...sn_client-v0.103.2) - 2024-02-05 - -### Other -- updated the following local packages: sn_protocol - -## [0.103.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.0...sn_client-v0.103.1) - 2024-02-02 - -### Other -- updated the following local packages: sn_networking - -## [0.103.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.22...sn_client-v0.103.0) - 2024-02-02 - -### Other -- [**breaking**] renaming LocalWallet to HotWallet as it holds the secret key for signing tx - -## [0.102.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.21...sn_client-v0.102.22) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.20...sn_client-v0.102.21) - 2024-02-01 - -### Fixed -- *(client)* error out when fetching large data_map - -## [0.102.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.19...sn_client-v0.102.20) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.18...sn_client-v0.102.19) - 2024-01-31 - -### Other -- nano tokens to network address -- change to question mark from expect -- test doc changes to remove code and refactor for pr -- broadcast signed spends -- send -- verify cash note -- receive and cargo fmt -- send spends - -## [0.102.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.17...sn_client-v0.102.18) - 2024-01-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.102.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.16...sn_client-v0.102.17) - 2024-01-30 - -### Other -- *(client)* log client upload failure error - -## [0.102.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.15...sn_client-v0.102.16) - 2024-01-30 - -### Fixed -- *(client)* error out on verify_chunk_store - -## [0.102.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.14...sn_client-v0.102.15) - 2024-01-30 - -### Other -- updated the following local packages: sn_networking - -## [0.102.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.13...sn_client-v0.102.14) - 2024-01-30 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.12...sn_client-v0.102.13) - 2024-01-29 - -### Other -- *(sn_transfers)* making some functions/helpers to be constructor methods of public structs - -## [0.102.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.11...sn_client-v0.102.12) - 2024-01-25 - -### Other -- improved pay for storage -- mut wallet description -- revert to mut wallet -- change to wallet result -- cargo fmt -- into wallet doc -- into wallet doc -- expand abbreviations mutable wallet -- pay for storage clone for test pass -- expand on abbreviation and added detail -- pay for records example -- pay for records and cleanup -- pay for storage once detail -- send unsigned detail -- pay for storage -- get store cost at addr unused - -## [0.102.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.10...sn_client-v0.102.11) - 2024-01-25 - -### Other -- updated the following local packages: sn_networking - -## [0.102.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.9...sn_client-v0.102.10) - 2024-01-25 - -### Added -- client webtransport-websys feat - -### Other -- use a single target_arch.rs to simplify imports for wasm32 or no - -## [0.102.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.8...sn_client-v0.102.9) - 2024-01-24 - -### Other -- updated the following local packages: sn_networking, sn_networking - -## [0.102.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.7...sn_client-v0.102.8) - 2024-01-24 - -### Added -- client webtransport-websys feat - -### Other -- tidy up wasm32 as target arch rather than a feat - -## [0.102.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.6...sn_client-v0.102.7) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.102.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.5...sn_client-v0.102.6) - 2024-01-22 - -### Other -- wallet docs - -## [0.102.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.4...sn_client-v0.102.5) - 2024-01-22 - -### Added -- spend dag utils - -## [0.102.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.3...sn_client-v0.102.4) - 2024-01-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.2...sn_client-v0.102.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.102.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.1...sn_client-v0.102.2) - 2024-01-18 - -### Other -- updated the following local packages: sn_transfers - -## [0.102.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.0...sn_client-v0.102.1) - 2024-01-17 - -### Other -- fixed typo -- filled missing arguments -- formatting -- formatting -- new wallet docs - -## [0.102.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.13...sn_client-v0.102.0) - 2024-01-17 - -### Fixed -- *(docs)* update Client signature for doc test -- *(client)* move out the peers added var to event handler loop - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.101.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.12...sn_client-v0.101.13) - 2024-01-17 - -### Other -- new wallet client example - -## [0.101.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.11...sn_client-v0.101.12) - 2024-01-16 - -### Other -- updated the following local packages: sn_transfers - -## [0.101.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.10...sn_client-v0.101.11) - 2024-01-15 - -### Fixed -- *(client)* avoid deadlock during upload in case of error - -## [0.101.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.9...sn_client-v0.101.10) - 2024-01-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.101.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.8...sn_client-v0.101.9) - 2024-01-15 - -### Fixed -- *(client)* cache payments via disk instead of memory map - -### Other -- *(client)* collect wallet handling time statistics - -## [0.101.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.7...sn_client-v0.101.8) - 2024-01-12 - -### Other -- updated the following local packages: sn_networking - -## [0.101.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.6...sn_client-v0.101.7) - 2024-01-12 - -### Fixed -- *(client)* avoid dead lock with less chunks - -## [0.101.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.5...sn_client-v0.101.6) - 2024-01-11 - -### Other -- *(client)* refactor client upload flow - -## [0.101.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.4...sn_client-v0.101.5) - 2024-01-11 - -### Added -- error if file size smaller than MIN_ENCRYPTABLE_BYTES - -### Other -- udpate self_encryption dep - -## [0.101.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.3...sn_client-v0.101.4) - 2024-01-11 - -### Other -- updated the following local packages: sn_networking - -## [0.101.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.2...sn_client-v0.101.3) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline - -### Other -- fixup send_spends and use ExcessiveNanoValue error - -## [0.101.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.1...sn_client-v0.101.2) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.101.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.0...sn_client-v0.101.1) - 2024-01-09 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.101.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.1...sn_client-v0.101.0) - 2024-01-09 - -### Added -- *(client)* use buffered future stream to download chunks - -### Fixed -- *(client)* empty out the download cache once the stream exits -- *(ci)* fix clippy error due to Send not being general - -### Other -- *(client)* add docs to FilesDownload -- *(client)* [**breaking**] move read_from range into `DownloadFiles` - -## [0.100.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.0...sn_client-v0.100.1) - 2024-01-09 - -### Other -- get spend from network only require Majority - -## [0.100.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.42...sn_client-v0.100.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli -- *(client)* emit events from download process - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.99.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.41...sn_client-v0.99.42) - 2024-01-08 - -### Other -- updated the following local packages: sn_networking - -## [0.99.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.40...sn_client-v0.99.41) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.99.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.39...sn_client-v0.99.40) - 2024-01-08 - -### Fixed -- *(client)* reset sequential_payment_fails on batch upload success - -## [0.99.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.38...sn_client-v0.99.39) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.99.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.37...sn_client-v0.99.38) - 2024-01-05 - -### Added -- *(network)* move the kad::put_record_to inside PutRecordCfg - -## [0.99.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.36...sn_client-v0.99.37) - 2024-01-03 - -### Added -- *(client)* clients no longer upload data_map by default - -### Other -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.99.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.35...sn_client-v0.99.36) - 2024-01-03 - -### Other -- updated the following local packages: sn_networking - -## [0.99.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.34...sn_client-v0.99.35) - 2024-01-02 - -### Fixed -- *(client)* wallet not progress with unconfirmed tx - -## [0.99.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.33...sn_client-v0.99.34) - 2024-01-02 - -### Other -- updated the following local packages: sn_networking - -## [0.99.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.32...sn_client-v0.99.33) - 2023-12-29 - -### Other -- updated the following local packages: sn_networking - -## [0.99.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.31...sn_client-v0.99.32) - 2023-12-29 - -### Added -- use put_record_to during upload chunk - -## [0.99.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.30...sn_client-v0.99.31) - 2023-12-26 - -### Other -- updated the following local packages: sn_networking - -## [0.99.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.29...sn_client-v0.99.30) - 2023-12-22 - -### Other -- updated the following local packages: sn_networking - -## [0.99.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.28...sn_client-v0.99.29) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.99.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.27...sn_client-v0.99.28) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.99.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.26...sn_client-v0.99.27) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.99.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.25...sn_client-v0.99.26) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.24...sn_client-v0.99.25) - 2023-12-19 - -### Fixed -- *(test)* tests should try to load just the faucet wallet - -## [0.99.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.23...sn_client-v0.99.24) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.22...sn_client-v0.99.23) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.99.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.21...sn_client-v0.99.22) - 2023-12-18 - -### Added -- *(transfers)* add api for cleaning up CashNotes - -## [0.99.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.20...sn_client-v0.99.21) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Fixed -- *(test)* use the Files struct to upload chunks - -### Other -- *(client)* add docs to the Files struct - -## [0.99.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.19...sn_client-v0.99.20) - 2023-12-14 - -### Other -- updated the following local packages: sn_networking, sn_protocol, sn_registers, sn_transfers - -## [0.99.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.18...sn_client-v0.99.19) - 2023-12-14 - -### Added -- *(client)* add backoff to payment retries -- *(networking)* use backoff for get_record - -## [0.99.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.17...sn_client-v0.99.18) - 2023-12-14 - -### Other -- *(test)* fix log messages during churn test - -## [0.99.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.16...sn_client-v0.99.17) - 2023-12-14 - -### Added -- *(cli)* simple retry mechanism for remaining chunks - -## [0.99.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.15...sn_client-v0.99.16) - 2023-12-13 - -### Other -- updated the following local packages: sn_networking - -## [0.99.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.14...sn_client-v0.99.15) - 2023-12-13 - -### Added -- add amounts to edges -- audit DAG collection and visualization -- cli double spends audit from genesis - -### Fixed -- docs - -### Other -- udeps and gitignore - -## [0.99.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.13...sn_client-v0.99.14) - 2023-12-12 - -### Other -- updated the following local packages: sn_protocol - -## [0.99.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.12...sn_client-v0.99.13) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.99.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.11...sn_client-v0.99.12) - 2023-12-12 - -### Added -- constant uploading across batches - -## [0.99.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.10...sn_client-v0.99.11) - 2023-12-11 - -### Other -- updated the following local packages: sn_networking - -## [0.99.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.9...sn_client-v0.99.10) - 2023-12-07 - -### Other -- updated the following local packages: sn_networking - -## [0.99.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.8...sn_client-v0.99.9) - 2023-12-06 - -### Other -- *(network)* use PUT Quorum::One for chunks -- *(network)* add more docs to the get_record_handlers - -## [0.99.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.7...sn_client-v0.99.8) - 2023-12-06 - -### Other -- updated the following local packages: sn_networking - -## [0.99.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.6...sn_client-v0.99.7) - 2023-12-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.5...sn_client-v0.99.6) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.99.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.4...sn_client-v0.99.5) - 2023-12-05 - -### Added -- *(network)* use custom enum for get_record errors - -### Other -- *(network)* avoid losing error info by converting them to a single type - -## [0.99.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.3...sn_client-v0.99.4) - 2023-12-05 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.2...sn_client-v0.99.3) - 2023-12-05 - -### Other -- updated the following local packages: sn_networking - -## [0.99.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.1...sn_client-v0.99.2) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.99.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.0...sn_client-v0.99.1) - 2023-12-05 - -### Fixed -- *(client)* dont assume verification is always set w/ VerificationConfig - -### Other -- tie node reward test to number of data. -- *(networking)* remove triggered bootstrap slowdown - -## [0.99.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.23...sn_client-v0.99.0) - 2023-12-01 - -### Added -- *(network)* use seperate PUT/GET configs - -### Other -- *(ci)* fix CI build cache parsing error -- *(network)* [**breaking**] use the Quorum struct provided by libp2p - -## [0.98.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.22...sn_client-v0.98.23) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.21...sn_client-v0.98.22) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.20...sn_client-v0.98.21) - 2023-11-29 - -### Added -- add missing quic features - -## [0.98.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.19...sn_client-v0.98.20) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.98.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.18...sn_client-v0.98.19) - 2023-11-28 - -### Added -- *(chunks)* serialise Chunks with MsgPack instead of bincode - -## [0.98.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.17...sn_client-v0.98.18) - 2023-11-28 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.16...sn_client-v0.98.17) - 2023-11-27 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.98.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.15...sn_client-v0.98.16) - 2023-11-23 - -### Added -- *(networking)* reduce batch size to 64 -- add centralised retries for all data payment kinds - -### Fixed -- previous code assumptions - -## [0.98.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.14...sn_client-v0.98.15) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.13...sn_client-v0.98.14) - 2023-11-23 - -### Other -- updated the following local packages: sn_transfers - -## [0.98.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.12...sn_client-v0.98.13) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.11...sn_client-v0.98.12) - 2023-11-22 - -### Other -- *(release)* non gossip handler shall not throw gossip msg up - -## [0.98.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.10...sn_client-v0.98.11) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.98.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.9...sn_client-v0.98.10) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -### Other -- *(sn_networking)* enable_gossip via the builder pattern - -## [0.98.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.8...sn_client-v0.98.9) - 2023-11-21 - -### Other -- updated the following local packages: sn_networking - -## [0.98.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.7...sn_client-v0.98.8) - 2023-11-20 - -### Other -- increase default batch size - -## [0.98.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.6...sn_client-v0.98.7) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.98.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.5...sn_client-v0.98.6) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking - -## [0.98.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.4...sn_client-v0.98.5) - 2023-11-20 - -### Added -- quotes - -## [0.98.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.3...sn_client-v0.98.4) - 2023-11-17 - -### Fixed -- *(client)* ensure we store spends at CLOSE_GROUP nodes. - -## [0.98.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.2...sn_client-v0.98.3) - 2023-11-16 - -### Other -- updated the following local packages: sn_networking - -## [0.98.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.1...sn_client-v0.98.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.98.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.0...sn_client-v0.98.1) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.6...sn_client-v0.98.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.97.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.5...sn_client-v0.97.6) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.97.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.4...sn_client-v0.97.5) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.97.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.3...sn_client-v0.97.4) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.2...sn_client-v0.97.3) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.1...sn_client-v0.97.2) - 2023-11-13 - -### Added -- no throwing up if not a gossip listener - -## [0.97.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.0...sn_client-v0.97.1) - 2023-11-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.97.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.6...sn_client-v0.97.0) - 2023-11-10 - -### Added -- verify chunks with Quorum::N(2) -- *(client)* only pay one node - -### Fixed -- *(client)* register validations checks for more than one node -- *(client)* set Quorum::One for registers -- *(test)* use client API to listen for gossipsub msgs when checking transfer notifs - -### Other -- *(transfers)* more logs around payments... -- *(churn)* small delay before validating chunks in data_with_churn -- *(client)* register get quorum->one -- *(tests)* make gossipsub verification more strict wrt number of msgs received - -## [0.96.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.5...sn_client-v0.96.6) - 2023-11-09 - -### Other -- updated the following local packages: sn_transfers - -## [0.96.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.4...sn_client-v0.96.5) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.3...sn_client-v0.96.4) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.2...sn_client-v0.96.3) - 2023-11-08 - -### Other -- updated the following local packages: sn_networking - -## [0.96.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.1...sn_client-v0.96.2) - 2023-11-08 - -### Added -- *(node)* set custom msg id in order to deduplicate transfer notifs - -## [0.96.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.0...sn_client-v0.96.1) - 2023-11-07 - -### Other -- Derive Clone on ClientRegister - -## [0.96.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.27...sn_client-v0.96.0) - 2023-11-07 - -### Fixed -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function - -## [0.95.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.26...sn_client-v0.95.27) - 2023-11-07 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.25...sn_client-v0.95.26) - 2023-11-06 - -### Added -- *(node)* log marker to track the number of peers in the routing table - -## [0.95.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.24...sn_client-v0.95.25) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.23...sn_client-v0.95.24) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.22...sn_client-v0.95.23) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.95.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.21...sn_client-v0.95.22) - 2023-11-03 - -### Other -- updated the following local packages: sn_networking - -## [0.95.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.20...sn_client-v0.95.21) - 2023-11-02 - -### Other -- updated the following local packages: sn_networking - -## [0.95.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.19...sn_client-v0.95.20) - 2023-11-02 - -### Added -- keep transfers in mem instead of heavy cashnotes - -## [0.95.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.18...sn_client-v0.95.19) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.17...sn_client-v0.95.18) - 2023-11-01 - -### Other -- log detailed intermediate errors - -## [0.95.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.16...sn_client-v0.95.17) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking - -## [0.95.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.15...sn_client-v0.95.16) - 2023-11-01 - -### Other -- updated the following local packages: sn_transfers - -## [0.95.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.14...sn_client-v0.95.15) - 2023-10-31 - -### Other -- updated the following local packages: sn_networking - -## [0.95.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.13...sn_client-v0.95.14) - 2023-10-30 - -### Other -- *(networking)* de/serialise directly to Bytes - -## [0.95.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.12...sn_client-v0.95.13) - 2023-10-30 - -### Added -- `bincode::serialize` into `Bytes` without intermediate allocation - -## [0.95.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.11...sn_client-v0.95.12) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(node)* make gossipsubpublish take Bytes - -## [0.95.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.10...sn_client-v0.95.11) - 2023-10-27 - -### Added -- *(rpc-client)* be able to decrpyt received Transfers by providing a secret key - -## [0.95.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.9...sn_client-v0.95.10) - 2023-10-27 - -### Other -- updated the following local packages: sn_networking - -## [0.95.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.8...sn_client-v0.95.9) - 2023-10-26 - -### Fixed -- client carry out merge when verify register storage - -## [0.95.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.7...sn_client-v0.95.8) - 2023-10-26 - -### Fixed -- add libp2p identity with rand dep for tests - -## [0.95.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.6...sn_client-v0.95.7) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_registers, sn_transfers - -## [0.95.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.5...sn_client-v0.95.6) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.4...sn_client-v0.95.5) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -## [0.95.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.3...sn_client-v0.95.4) - 2023-10-24 - -### Fixed -- *(tests)* nodes rewards tests to account for repayments amounts - -## [0.95.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.2...sn_client-v0.95.3) - 2023-10-24 - -### Other -- *(api)* wallet APIs to account for network royalties fees when returning total cost paid for storage - -## [0.95.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.1...sn_client-v0.95.2) - 2023-10-24 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.95.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.0...sn_client-v0.95.1) - 2023-10-24 - -### Added -- *(client)* do not retry verification GETs - -### Other -- log and debug SwarmCmd - -## [0.95.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.8...sn_client-v0.95.0) - 2023-10-24 - -### Added -- *(protocol)* [**breaking**] implement `PrettyPrintRecordKey` as a `Cow` type - -## [0.94.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.7...sn_client-v0.94.8) - 2023-10-23 - -### Other -- updated the following local packages: sn_networking - -## [0.94.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.6...sn_client-v0.94.7) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.94.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.5...sn_client-v0.94.6) - 2023-10-22 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.4...sn_client-v0.94.5) - 2023-10-21 - -### Other -- updated the following local packages: sn_networking - -## [0.94.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.3...sn_client-v0.94.4) - 2023-10-20 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.2...sn_client-v0.94.3) - 2023-10-20 - -### Added -- *(client)* stop futher bootstrapping if the client has K_VALUE peers - -## [0.94.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.1...sn_client-v0.94.2) - 2023-10-19 - -### Fixed -- *(network)* emit NetworkEvent when we publish a gossipsub msg - -## [0.94.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.0...sn_client-v0.94.1) - 2023-10-18 - -### Other -- updated the following local packages: sn_networking - -## [0.94.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.18...sn_client-v0.94.0) - 2023-10-18 - -### Added -- *(client)* verify register sync, and repay if not stored on all nodes -- *(client)* verify register uploads and retry and repay if failed - -### Other -- Revert "feat: keep transfers in mem instead of mem and i/o heavy cashnotes" -- *(client)* always validate storage payments -- repay for data in node rewards tests -- *(client)* remove price tolerance at the client - -## [0.93.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.17...sn_client-v0.93.18) - 2023-10-18 - -### Added -- keep transfers in mem instead of mem and i/o heavy cashnotes - -## [0.93.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.16...sn_client-v0.93.17) - 2023-10-17 - -### Fixed -- *(transfers)* dont overwrite existing payment transactions when we top up - -### Other -- adding comments and cleanup around quorum / payment fixes - -## [0.93.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.15...sn_client-v0.93.16) - 2023-10-16 - -### Fixed -- return correct error type -- consider record split an error, handle it for regs - -## [0.93.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.14...sn_client-v0.93.15) - 2023-10-16 - -### Other -- updated the following local packages: sn_networking - -## [0.93.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.13...sn_client-v0.93.14) - 2023-10-13 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.93.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.12...sn_client-v0.93.13) - 2023-10-13 - -### Fixed -- batch download process - -## [0.93.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.11...sn_client-v0.93.12) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.10...sn_client-v0.93.11) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.9...sn_client-v0.93.10) - 2023-10-12 - -### Other -- more detailed logging when client creating store cash_note - -## [0.93.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.8...sn_client-v0.93.9) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(transfers)* add somre more clarity around DoubleSpendAttemptedForCashNotes -- *(transfers)* remove pointless api - -## [0.93.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.7...sn_client-v0.93.8) - 2023-10-11 - -### Other -- updated the following local packages: sn_networking - -## [0.93.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.6...sn_client-v0.93.7) - 2023-10-11 - -### Added -- showing expected holders to CLI when required -- verify put_record with expected_holders - -## [0.93.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.5...sn_client-v0.93.6) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.93.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.4...sn_client-v0.93.5) - 2023-10-10 - -### Other -- compare files after download twice - -## [0.93.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.3...sn_client-v0.93.4) - 2023-10-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.93.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.2...sn_client-v0.93.3) - 2023-10-09 - -### Other -- updated the following local packages: sn_networking - -## [0.93.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.1...sn_client-v0.93.2) - 2023-10-08 - -### Other -- updated the following local packages: sn_networking - -## [0.93.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.0...sn_client-v0.93.1) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- *(client)* dont println for wallet errors - -## [0.93.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.9...sn_client-v0.93.0) - 2023-10-06 - -### Fixed -- *(client)* [**breaking**] unify send_without_verify and send functions - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.92.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.8...sn_client-v0.92.9) - 2023-10-06 - -### Other -- fix new clippy errors - -## [0.92.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.7...sn_client-v0.92.8) - 2023-10-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.92.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.6...sn_client-v0.92.7) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -## [0.92.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.5...sn_client-v0.92.6) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.92.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.4...sn_client-v0.92.5) - 2023-10-05 - -### Added -- quorum for records get - -### Fixed -- use specific verify func for chunk stored verification - -## [0.92.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.3...sn_client-v0.92.4) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- pay_for_chunks returns cost and new balance - -## [0.92.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.2...sn_client-v0.92.3) - 2023-10-04 - -### Fixed -- *(wallet)* remove expect statments - -## [0.92.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.1...sn_client-v0.92.2) - 2023-10-04 - -### Fixed -- record_to_verify for store_chunk shall be a Chunk - -## [0.92.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.0...sn_client-v0.92.1) - 2023-10-04 - -### Other -- updated the following local packages: sn_networking - -## [0.92.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.11...sn_client-v0.92.0) - 2023-10-04 - -### Added -- improve register API - -## [0.91.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.10...sn_client-v0.91.11) - 2023-10-04 - -### Added -- *(client)* reuse cashnotes for address payments - -### Other -- separate method and write test - -## [0.91.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.9...sn_client-v0.91.10) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.8...sn_client-v0.91.9) - 2023-10-03 - -### Added -- re-attempt when get chunk from network - -## [0.91.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.7...sn_client-v0.91.8) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.6...sn_client-v0.91.7) - 2023-10-02 - -### Other -- remove all spans. - -## [0.91.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.5...sn_client-v0.91.6) - 2023-10-02 - -### Other -- updated the following local packages: sn_transfers - -## [0.91.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.4...sn_client-v0.91.5) - 2023-10-02 - -### Other -- *(client)* more logs around StoreCost retrieveal - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.3...sn_client-v0.91.4) - 2023-09-29 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.2...sn_client-v0.91.3) - 2023-09-28 - -### Added -- client to client transfers - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.1...sn_client-v0.91.2) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps -- all records are Quorum::All once more - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.0...sn_client-v0.91.1) - 2023-09-27 - -### Added -- *(client)* fail fast when a chunk is missing - -## [0.91.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.6...sn_client-v0.91.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.90.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.5...sn_client-v0.90.6) - 2023-09-26 - -### Other -- updated the following local packages: sn_networking - -## [0.90.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.4...sn_client-v0.90.5) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.90.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.3...sn_client-v0.90.4) - 2023-09-25 - -### Other -- updated the following local packages: sn_transfers - -## [0.90.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.2...sn_client-v0.90.3) - 2023-09-25 - -### Other -- cleanup renamings in sn_transfers - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.1...sn_client-v0.90.2) - 2023-09-25 - -### Other -- *(client)* serialize ClientEvent - -## [0.90.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.0...sn_client-v0.90.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.90.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.23...sn_client-v0.90.0) - 2023-09-21 - -### Added -- dusking DBCs - -### Other -- rename Nano NanoTokens -- improve naming - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.22...sn_client-v0.89.23) - 2023-09-21 - -### Other -- updated the following local packages: sn_networking - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.21...sn_client-v0.89.22) - 2023-09-21 - -### Other -- clarify `files download` usage -- output address of uploaded file - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.20...sn_client-v0.89.21) - 2023-09-20 - -### Other -- updated the following local packages: sn_networking - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.19...sn_client-v0.89.20) - 2023-09-20 - -### Other -- major dep updates - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.18...sn_client-v0.89.19) - 2023-09-20 - -### Other -- allow chunks to be Quorum::One - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.17...sn_client-v0.89.18) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.16...sn_client-v0.89.17) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.15...sn_client-v0.89.16) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.14...sn_client-v0.89.15) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.13...sn_client-v0.89.14) - 2023-09-18 - -### Other -- updated the following local packages: sn_networking - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.12...sn_client-v0.89.13) - 2023-09-18 - -### Added -- *(client)* download file concurrently - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.11...sn_client-v0.89.12) - 2023-09-18 - -### Added -- serialisation for transfers for out of band sending - -### Other -- *(client)* simplify API -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.10...sn_client-v0.89.11) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.9...sn_client-v0.89.10) - 2023-09-15 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.8...sn_client-v0.89.9) - 2023-09-15 - -### Other -- *(client)* remove unused wallet_client - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.7...sn_client-v0.89.8) - 2023-09-14 - -### Added -- *(register)* client to pay for Register only if local wallet has not paymnt for it yet - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.6...sn_client-v0.89.7) - 2023-09-14 - -### Added -- split upload procedure into batches - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.5...sn_client-v0.89.6) - 2023-09-14 - -### Added -- *(network)* enable custom node metrics -- *(network)* use NetworkConfig for network construction - -### Other -- remove unused error variants -- *(network)* use builder pattern to construct the Network -- *(metrics)* rename feature flag and small fixes - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.4...sn_client-v0.89.5) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -### Other -- *(register)* adding Register payment storage tests to run in CI -- *(payments)* adaptig code to recent changes in Transfers - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.3...sn_client-v0.89.4) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.2...sn_client-v0.89.3) - 2023-09-12 - -### Other -- updated the following local packages: sn_networking - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.1...sn_client-v0.89.2) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.0...sn_client-v0.89.1) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.16...sn_client-v0.89.0) - 2023-09-11 - -### Added -- [**breaking**] Clients add a tolerance to store cost - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.15...sn_client-v0.88.16) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.14...sn_client-v0.88.15) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.13...sn_client-v0.88.14) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.12...sn_client-v0.88.13) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.11...sn_client-v0.88.12) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.10...sn_client-v0.88.11) - 2023-09-05 - -### Added -- encryptioni output to disk - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.9...sn_client-v0.88.10) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.8...sn_client-v0.88.9) - 2023-09-04 - -### Added -- feat!(protocol): make payments for all record types - -### Fixed -- fix permissions for public register creation - -### Other -- *(release)* sn_registers-v0.2.4 -- utilize encrypt_from_file - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.7...sn_client-v0.88.8) - 2023-09-04 - -### Other -- Add client and protocol detail - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.6...sn_client-v0.88.7) - 2023-09-01 - -### Other -- *(transfers)* store dbcs by ref to avoid more clones -- *(client)* make unconfonfirmed txs btreeset, remove unnecessary cloning -- *(client)* remove one signed_spend clone - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.5...sn_client-v0.88.6) - 2023-09-01 - -### Other -- updated the following local packages: sn_networking - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.4...sn_client-v0.88.5) - 2023-08-31 - -### Other -- remove unused async - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.3...sn_client-v0.88.4) - 2023-08-31 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.2...sn_client-v0.88.3) - 2023-08-31 - -### Other -- some logging updates - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.1...sn_client-v0.88.2) - 2023-08-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.0...sn_client-v0.88.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* reduce default concurrency -- *(client)* improve download concurrency. - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.29...sn_client-v0.88.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): gets keys with GetStoreCost -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(node)* refactor churn test order -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning -- *(client)* error out early for invalid transfers -- *(node)* reenable payment fail check - -## [0.87.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.28...sn_client-v0.87.29) - 2023-08-30 - -### Other -- updated the following local packages: sn_networking - -## [0.87.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.27...sn_client-v0.87.28) - 2023-08-29 - -### Other -- updated the following local packages: sn_networking - -## [0.87.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.26...sn_client-v0.87.27) - 2023-08-24 - -### Other -- updated the following local packages: sn_registers, sn_transfers - -## [0.87.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.25...sn_client-v0.87.26) - 2023-08-22 - -### Other -- updated the following local packages: sn_networking - -## [0.87.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.24...sn_client-v0.87.25) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.87.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.23...sn_client-v0.87.24) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.22...sn_client-v0.87.23) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.21...sn_client-v0.87.22) - 2023-08-18 - -### Added -- remove client and node initial join flow - -## [0.87.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.20...sn_client-v0.87.21) - 2023-08-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.19...sn_client-v0.87.20) - 2023-08-17 - -### Fixed -- *(client)* start bootstrap when we are connected to one peer - -## [0.87.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.18...sn_client-v0.87.19) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.17...sn_client-v0.87.18) - 2023-08-17 - -### Fixed -- *(client)* use boostrap and fire Connecting event - -## [0.87.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.16...sn_client-v0.87.17) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.15...sn_client-v0.87.16) - 2023-08-16 - -### Added -- *(client)* do not use cached proofs - -## [0.87.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.14...sn_client-v0.87.15) - 2023-08-16 - -### Added -- overpay by default to allow margin - -## [0.87.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.13...sn_client-v0.87.14) - 2023-08-15 - -### Other -- updated the following local packages: sn_networking - -## [0.87.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.12...sn_client-v0.87.13) - 2023-08-11 - -### Added -- *(transfers)* add resend loop for unconfirmed txs -- *(networking)* ensure we always use the highest price we find -- *(networking)* enable returning less than majority for store_cost -- *(client)* use store cost queries to pre populate cost and RT - -### Fixed -- *(client)* only_store_cost_if_higher missing else added - -### Other -- remove client inactivity random storage query -- *(node)* resend unconfirmed txs before asserting -- *(cli)* print cost info -- *(networking)* remove logs, fix typos and clippy issues -- overpay in advance to avoid storage cost calculation inconsistent - -## [0.87.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.11...sn_client-v0.87.12) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.87.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.10...sn_client-v0.87.11) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking - -## [0.87.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.9...sn_client-v0.87.10) - 2023-08-08 - -### Added -- *(transfers)* add get largest dbc for spending - -### Fixed -- *(node)* prevent panic in storage calcs - -### Other -- tidy store cost code - -## [0.87.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.8...sn_client-v0.87.9) - 2023-08-07 - -### Other -- updated the following local packages: sn_networking - -## [0.87.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.7...sn_client-v0.87.8) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- rename network addresses confusing name method to xorname - -## [0.87.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.6...sn_client-v0.87.7) - 2023-08-04 - -### Other -- updated the following local packages: sn_networking - -## [0.87.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.5...sn_client-v0.87.6) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.4...sn_client-v0.87.5) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.3...sn_client-v0.87.4) - 2023-08-02 - -### Fixed -- do not create genesis when facuet already funded - -## [0.87.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.2...sn_client-v0.87.3) - 2023-08-01 - -### Other -- *(client)* reattempt to get_spend_from_network -- add more verificaiton for payments - -## [0.87.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.1...sn_client-v0.87.2) - 2023-08-01 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.0...sn_client-v0.87.1) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- fix double spend and remove arbitrary wait -- *(node)* verify faucet transactions before continuing -- *(netowrking)* change default re-attempt behaviour - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.11...sn_client-v0.87.0) - 2023-08-01 - -### Other -- *(register)* [**breaking**] hashing the node of a Register to sign it instead of bincode-serialising it - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.10...sn_client-v0.86.11) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.9...sn_client-v0.86.10) - 2023-07-31 - -### Added -- carry out get_record re-attempts for critical record -- for put_record verification, NotEnoughCopies is acceptable - -### Fixed -- *(test)* using proper wallets during data_with_churn test - -### Other -- move PrettyPrintRecordKey to sn_protocol -- small refactors for failing CI -- more tracable logs regarding chunk payment prove - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.8...sn_client-v0.86.9) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.7...sn_client-v0.86.8) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.6...sn_client-v0.86.7) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.5...sn_client-v0.86.6) - 2023-07-28 - -### Other -- adapt all logging to use pretty record key - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.4...sn_client-v0.86.5) - 2023-07-27 - -### Other -- updated the following local packages: sn_networking - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.3...sn_client-v0.86.4) - 2023-07-26 - -### Fixed -- *(register)* Registers with same name but different tags were not being stored by the network - -### Other -- centralising RecordKey creation logic to make sure we always use the same for all content type - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.2...sn_client-v0.86.3) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.1...sn_client-v0.86.2) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.0...sn_client-v0.86.1) - 2023-07-25 - -### Added -- *(replication)* replicate when our close group changes - -### Fixed -- *(client)* keep an active `ClientEvent` receiver - -### Other -- *(client)* get k_value from const fn - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.55...sn_client-v0.86.0) - 2023-07-21 - -### Added -- *(protocol)* [**breaking**] make Chunks storage payment required - -### Other -- tokens transfers task in data_with_churn tests to use client apis instead of faucet helpers - -## [0.85.55](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.54...sn_client-v0.85.55) - 2023-07-20 - -### Other -- cleanup error types - -## [0.85.54](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.53...sn_client-v0.85.54) - 2023-07-19 - -### Added -- using kad::record for dbc spend ops -- *(CI)* dbc verfication during network churning test - -## [0.85.53](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.52...sn_client-v0.85.53) - 2023-07-19 - -### Other -- updated the following local packages: sn_protocol - -## [0.85.52](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.51...sn_client-v0.85.52) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.51](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.50...sn_client-v0.85.51) - 2023-07-18 - -### Added -- safer registers requiring signatures -- *(networking)* remove LostRecordEvent - -### Fixed -- address PR comments -- client - -## [0.85.50](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.49...sn_client-v0.85.50) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.49](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.48...sn_client-v0.85.49) - 2023-07-17 - -### Other -- updated the following local packages: sn_networking - -## [0.85.48](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.47...sn_client-v0.85.48) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -### Other -- *(networking)* log all connected peer count - -## [0.85.47](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.46...sn_client-v0.85.47) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.85.46](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.45...sn_client-v0.85.46) - 2023-07-12 - -### Other -- client to upload paid chunks in batches - -## [0.85.45](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.44...sn_client-v0.85.45) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.44](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.43...sn_client-v0.85.44) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.85.43](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.42...sn_client-v0.85.43) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.41...sn_client-v0.85.42) - 2023-07-10 - -### Other -- updated the following local packages: sn_networking - -## [0.85.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.40...sn_client-v0.85.41) - 2023-07-10 - -### Added -- client query register via get_record -- client upload Register via put_record - -## [0.85.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.39...sn_client-v0.85.40) - 2023-07-06 - -### Other -- updated the following local packages: sn_networking - -## [0.85.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.38...sn_client-v0.85.39) - 2023-07-06 - -### Added -- PutRecord response during client upload -- client upload chunk using kad::put_record - -### Other -- *(release)* sn_cli-v0.79.0/sn_logging-v0.2.0/sn_node-v0.86.0/sn_testnet-v0.1.76/sn_networking-v0.3.11 - -## [0.85.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.37...sn_client-v0.85.38) - 2023-07-05 - -### Added -- carry out validation for record_store::put - -## [0.85.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.36...sn_client-v0.85.37) - 2023-07-04 - -### Other -- demystify permissions - -## [0.85.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.35...sn_client-v0.85.36) - 2023-07-03 - -### Added -- append SAFE_PEERS to initial_peers after restart - -### Fixed -- *(text)* data_churn_test creates clients parsing SAFE_PEERS env - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test -- some client log tidy up - -## [0.85.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.34...sn_client-v0.85.35) - 2023-06-29 - -### Other -- updated the following local packages: sn_networking - -## [0.85.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.33...sn_client-v0.85.34) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.32...sn_client-v0.85.33) - 2023-06-28 - -### Added -- make the example work, fix sync when reg doesnt exist -- rework permissions, implement register cmd handlers -- register refactor, kad reg without cmds - -### Fixed -- rename UserRights to UserPermissions - -## [0.85.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.31...sn_client-v0.85.32) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.30...sn_client-v0.85.31) - 2023-06-28 - -### Added -- *(node)* dial without PeerId - -## [0.85.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.29...sn_client-v0.85.30) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.28...sn_client-v0.85.29) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.27...sn_client-v0.85.28) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.26...sn_client-v0.85.27) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.25...sn_client-v0.85.26) - 2023-06-26 - -### Other -- *(release)* sn_cli-v0.78.9/sn_logging-v0.1.4/sn_node-v0.83.55/sn_testnet-v0.1.59/sn_networking-v0.1.24 - -## [0.85.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.24...sn_client-v0.85.25) - 2023-06-26 - -### Other -- payment proof map to use xorname as index instead of merkletree nodes type - -## [0.85.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.23...sn_client-v0.85.24) - 2023-06-24 - -### Other -- updated the following local packages: sn_networking - -## [0.85.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.22...sn_client-v0.85.23) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.21...sn_client-v0.85.22) - 2023-06-23 - -### Added -- forward chunk when not being the closest -- repliate to peers lost record - -### Fixed -- client upload to peers closer to chunk - -## [0.85.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.20...sn_client-v0.85.21) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.19...sn_client-v0.85.20) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.18...sn_client-v0.85.19) - 2023-06-22 - -### Fixed -- improve client upload speed - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.17...sn_client-v0.85.18) - 2023-06-21 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.16...sn_client-v0.85.17) - 2023-06-21 - -### Other -- *(network)* remove `NetworkEvent::PutRecord` dead code - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.15...sn_client-v0.85.16) - 2023-06-21 - -### Other -- remove unused error variants -- *(node)* obtain parent_tx from SignedSpend -- *(release)* sn_cli-v0.77.46/sn_logging-v0.1.3/sn_node-v0.83.42/sn_testnet-v0.1.46/sn_networking-v0.1.15 - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.14...sn_client-v0.85.15) - 2023-06-20 - -### Added -- *(network)* validate `Record` on GET -- *(network)* validate and store `ReplicatedData` -- *(node)* perform proper validations on PUT -- *(network)* validate and store `Record` - -### Fixed -- *(node)* store parent tx along with `SignedSpend` - -### Other -- *(docs)* add more docs and comments - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.13...sn_client-v0.85.14) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.12...sn_client-v0.85.13) - 2023-06-20 - -### Added -- pay 1 nano per Chunk as temporary approach till net-invoices are implemented -- committing storage payment SignedSpends to the network -- nodes to verify input DBCs of Chunk payment proof were spent - -### Other -- specific error types for different payment proof verification scenarios -- include the Tx instead of output DBCs as part of storage payment proofs - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.11...sn_client-v0.85.12) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.10...sn_client-v0.85.11) - 2023-06-16 - -### Fixed -- reduce client mem usage during uploading - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.9...sn_client-v0.85.10) - 2023-06-15 - -### Added -- add double spend test - -### Fixed -- parent spend issue - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.8...sn_client-v0.85.9) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.7...sn_client-v0.85.8) - 2023-06-14 - -### Other -- updated the following local packages: sn_networking - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.6...sn_client-v0.85.7) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.5...sn_client-v0.85.6) - 2023-06-13 - -### Other -- *(release)* sn_cli-v0.77.12/sn_logging-v0.1.2/sn_node-v0.83.10/sn_testnet-v0.1.14/sn_networking-v0.1.6 - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.4...sn_client-v0.85.5) - 2023-06-12 - -### Added -- remove spendbook rw locks, improve logging - -### Other -- remove uneeded printlns -- *(release)* sn_cli-v0.77.10/sn_record_store-v0.1.3/sn_node-v0.83.8/sn_testnet-v0.1.12/sn_networking-v0.1.4 - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.3...sn_client-v0.85.4) - 2023-06-09 - -### Other -- manually change crate version - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.2...sn_client-v0.85.3) - 2023-06-09 - -### Other -- more replication flow statistics during mem_check test - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.1...sn_client-v0.85.2) - 2023-06-07 - -### Added -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Fixed -- remove progress bar after it's finished. - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- small log wording updates -- exposing definition of merkletree nodes data type and additional doc in code -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate - -## [0.85.1](https://github.com/jacderida/safe_network/compare/sn_client-v0.85.0...sn_client-v0.85.1) - 2023-06-06 - -### Added -- refactor replication flow to using pull model diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml deleted file mode 100644 index ce0f2b5ee8..0000000000 --- a/sn_client/Cargo.toml +++ /dev/null @@ -1,90 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network Client" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_client" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.110.4" - -[features] -default = [] -local = ["sn_networking/local"] -open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -test-utils = ["sn_peers_acquisition", "eyre"] -# required to pass on flag to node builds -websockets = ["sn_networking/websockets", "sn_protocol/websockets"] - - -[dependencies] -tokio = { version = "1.35.0", features = [ - "io-util", - "macros", - "rt", - "sync", - "time", -] } -bip39 = "2.0.0" -curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ - "num-bigint", -] } -eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } -async-trait = "0.1" -backoff = { version = "0.4.0", features = ["tokio"] } -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -crdts = "7.3.2" -custom_debug = "~0.6.1" -dashmap = "~6.1.0" -futures = "~0.3.13" -hex = "~0.4.3" -itertools = "~0.12.1" -libp2p = { version = "0.54.1", features = ["identify"] } -petgraph = { version = "0.6.4", features = ["serde-1"] } -prometheus-client = { version = "0.22", optional = true } -rand = { version = "~0.8.5", features = ["small_rng"] } -rayon = "1.8.0" -rmp-serde = "1.1.1" -self_encryption = "~0.30.0" -serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tempfile = "3.6.0" -thiserror = "1.0.23" -tiny-keccak = "~2.0.2" -tracing = { version = "~0.1.26" } -xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3", optional = true } -eyre = { version = "0.6.8", optional = true } - -[dev-dependencies] -assert_matches = "1.5.0" -dirs-next = "~2.0.0" -# add rand to libp2p -libp2p-identity = { version = "0.2.7", features = ["rand"] } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_registers = { path = "../sn_registers", version = "0.3.21", features = [ - "test-utils", -] } - -[lints] -workspace = true - -# to allow wasm compilation -[lib] -crate-type = ["cdylib", "rlib"] - -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.12", features = ["js"] } -wasm-bindgen = "0.2.90" -wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -console_error_panic_hook = "0.1.6" -tracing-wasm = "0.2.1" -wasmtimer = "0.2.0" -web-sys = { version = "0.3.22", features = ["console"] } diff --git a/sn_client/README.md b/sn_client/README.md deleted file mode 100644 index 48a4fe9cf9..0000000000 --- a/sn_client/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# `sn_client` - SAFE Network Client Library - -## Overview - -The `sn_client` library provides the core functionalities for interacting with the SAFE Network. It handles tasks such as connecting to the network, managing concurrency, and performing various network operations like data storage and retrieval. - -## Table of Contents - -- [Overview](#overview) -- [Installation](#installation) -- [Usage](#usage) - - [API Calls](#api-calls) -- [Running Tests](#running-tests) -- [Contributing](#contributing) - - [Conventional Commits](#conventional-commits) -- [License](#license) - -## Installation - -To include `sn_client` in your Rust project, add the following to your `Cargo.toml`: - -```toml -[dependencies] -sn_client = "latest_version_here" -``` - -## Usage - -To use `sn_client`, you first need to instantiate a client. Here's a simple example: - -```rust -use sn_client::Client; -let client = Client::new(signer, peers, req_response_timeout, custom_concurrency_limit).await?; -``` - -## Running Tests - -Prerequisites: -* A running local network. Refer to [`safe_network/README.md`](../README.md) to run a local test network. -* `SAFE_PEERS` environment variable or running the tests with `--feature=local`: - -```bash -$ cargo test --package sn_client --release --tests --features=local -``` - -## Contributing - -Please refer to the [Contributing Guidelines](../CONTRIBUTING.md) from the main directory for details on how to contribute to this project. - -### Conventional Commits - -We follow the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. Please adhere to this standard when contributing. - -## License - -This Safe Network repository is licensed under the General Public License (GPL), version 3 ([LICENSE](LICENSE) http://www.gnu.org/licenses/gpl-3.0.en.html). diff --git a/sn_client/src/acc_packet.rs b/sn_client/src/acc_packet.rs deleted file mode 100644 index 2d9570f34a..0000000000 --- a/sn_client/src/acc_packet.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::error::Result; -use bip39::Mnemonic; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; -use std::path::Path; - -pub mod user_secret; - -const DEFAULT_WALLET_DERIVIATION_PASSPHRASE: &str = "default"; - -/// Load a account from disk, with wallet, or create a new one using the mnemonic system -pub fn load_account_wallet_or_create_with_mnemonic( - root_dir: &Path, - derivation_passphrase: Option<&str>, -) -> Result { - let wallet = HotWallet::load_from(root_dir); - - match wallet { - Ok(wallet) => Ok(wallet), - Err(error) => { - warn!("Issue loading wallet, creating a new one: {error}"); - println!("Issue loading wallet from {root_dir:?}"); - - let mnemonic = load_or_create_mnemonic(root_dir)?; - let wallet = - secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?; - - Ok(HotWallet::create_from_key(root_dir, wallet, None)?) - } - } -} - -pub fn load_or_create_mnemonic(root_dir: &Path) -> Result { - match user_secret::read_mnemonic_from_disk(root_dir) { - Ok(mnemonic) => { - println!( - "Found existing mnemonic in {root_dir:?}, this will be used for key derivation." - ); - info!("Using existing mnemonic from {root_dir:?}"); - Ok(mnemonic) - } - Err(error) => { - println!("No existing mnemonic found, creating a new one in {root_dir:?}."); - warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}"); - let mnemonic = user_secret::random_eip2333_mnemonic()?; - user_secret::write_mnemonic_to_disk(root_dir, &mnemonic)?; - Ok(mnemonic) - } - } -} - -pub fn secret_key_from_mnemonic( - mnemonic: Mnemonic, - derivation_passphrase: Option, -) -> Result { - let passphrase = - derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned()); - user_secret::account_wallet_secret_key(mnemonic, &passphrase) -} - -pub fn create_faucet_account_and_wallet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - - println!("Loading faucet wallet... {root_dir:#?}"); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("Faucet wallet shall be created successfully.") -} diff --git a/sn_client/src/acc_packet/user_secret.rs b/sn_client/src/acc_packet/user_secret.rs deleted file mode 100644 index 800018cfb7..0000000000 --- a/sn_client/src/acc_packet/user_secret.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - error::{Error, Result}, - transfers::MainSecretKey, -}; -use bls::SecretKey; -use curv::elliptic::curves::ECScalar; -use rand::RngCore; -use std::path::Path; -use xor_name::XorName; - -const MNEMONIC_FILENAME: &str = "account_secret"; - -const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; - -const ACCOUNT_WALLET_DERIVATION: &str = "m/2/0"; - -pub fn random_eip2333_mnemonic() -> Result { - let mut entropy = [1u8; 32]; - let rng = &mut rand::rngs::OsRng; - rng.fill_bytes(&mut entropy); - let mnemonic = - bip39::Mnemonic::from_entropy(&entropy).map_err(|_error| Error::FailedToParseEntropy)?; - Ok(mnemonic) -} - -/// Derive a wallet secret key from the mnemonic for the account. -pub fn account_wallet_secret_key( - mnemonic: bip39::Mnemonic, - passphrase: &str, -) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_WALLET_DERIVATION); - let key_bytes = derived_key.serialize(); - let sk = SecretKey::from_bytes(key_bytes.into()).map_err(|_err| Error::InvalidKeyBytes)?; - Ok(MainSecretKey::new(sk)) -} - -#[expect(dead_code)] // as yet unused, will be used soon -/// Derive an xorname from the mnemonic for the account to store data. -pub(crate) fn account_root_xorname(mnemonic: bip39::Mnemonic, passphrase: &str) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_ROOT_XORNAME_DERIVATION); - let derived_key_bytes = derived_key.serialize(); - Ok(XorName::from_content(&derived_key_bytes)) -} - -pub fn write_mnemonic_to_disk(files_dir: &Path, mnemonic: &bip39::Mnemonic) -> Result<()> { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = mnemonic.to_string(); - std::fs::write(filename, content)?; - Ok(()) -} - -pub(super) fn read_mnemonic_from_disk(files_dir: &Path) -> Result { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = std::fs::read_to_string(filename)?; - let mnemonic = - bip39::Mnemonic::parse_normalized(&content).map_err(|_err| Error::FailedToParseMnemonic)?; - Ok(mnemonic) -} diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs deleted file mode 100644 index 5ed63210a6..0000000000 --- a/sn_client/src/api.rs +++ /dev/null @@ -1,1234 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - error::{Error, Result}, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, ClientRegister, - WalletClient, -}; -use bls::{PublicKey, SecretKey, Signature}; -use libp2p::{ - identity::Keypair, - kad::{KBucketDistance, Quorum, Record}, - Multiaddr, PeerId, -}; -use rand::{thread_rng, Rng}; -use sn_networking::{ - get_signed_spend_from_record, multiaddr_is_global, - target_arch::{interval, spawn, timeout, Instant}, - GetRecordCfg, GetRecordError, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, - VerificationKind, -}; -use sn_protocol::{ - error::Error as ProtocolError, - messages::ChunkProof, - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, - RecordKind, RegisterAddress, RetryStrategy, SpendAddress, - }, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -}; -use sn_registers::{Permissions, SignedRegister}; -use sn_transfers::{ - CashNote, CashNoteRedemption, MainPubkey, NanoTokens, Payment, SignedSpend, TransferError, - GENESIS_SPEND_UNIQUE_KEY, -}; -#[cfg(target_arch = "wasm32")] -use std::path::PathBuf; -use std::{ - collections::{HashMap, HashSet}, - num::NonZeroUsize, - sync::Arc, -}; -use tokio::time::Duration; -use tracing::trace; -use xor_name::XorName; - -/// The maximum duration the client will wait for a connection to the network before timing out. -pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); - -/// The timeout duration for the client to receive any response from the network. -const INACTIVITY_TIMEOUT: Duration = Duration::from_secs(30); - -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const QUORUM_N_IS_2: NonZeroUsize = match NonZeroUsize::new(2) { - Some(v) => v, - None => panic!("2 is not zero"), -}; - -impl Client { - /// A quick client with a random secret key and some peers. - pub async fn quick_start(peers: Option>) -> Result { - Self::new(SecretKey::random(), peers, None, None).await - } - - /// Instantiate a new client. - /// - /// Optionally specify the duration for the connection timeout. - /// - /// Defaults to [`CONNECTION_TIMEOUT`]. - /// - /// # Arguments - /// * 'signer' - [SecretKey] - /// * 'peers' - [Option]<[Vec]<[Multiaddr]>> - /// * 'connection_timeout' - [Option]<[Duration]> : Specification for client connection timeout set via Optional - /// * 'client_event_broadcaster' - [Option]<[ClientEventsBroadcaster]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn new( - signer: SecretKey, - peers: Option>, - connection_timeout: Option, - client_event_broadcaster: Option, - ) -> Result { - // If any of our contact peers has a global address, we'll assume we're in a global network. - let local = match peers { - Some(ref peers) => !peers.iter().any(multiaddr_is_global), - None => true, - }; - - info!("Startup a client with peers {peers:?} and local {local:?} flag"); - info!("Starting Kad swarm in client mode..."); - - #[cfg(target_arch = "wasm32")] - let root_dir = PathBuf::from("dummy path, wasm32/browser environments will not use this"); - #[cfg(not(target_arch = "wasm32"))] - let root_dir = std::env::temp_dir(); - trace!("Starting Kad swarm in client mode..{root_dir:?}."); - - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); - - let (network, mut network_event_receiver, swarm_driver) = network_builder.build_client()?; - info!("Client constructed network and swarm_driver"); - - // If the events broadcaster is not provided by the caller, then we create a new one. - // This is not optional as we wait on certain events to connect to the network and return from this function. - let events_broadcaster = client_event_broadcaster.unwrap_or_default(); - - let client = Self { - network: network.clone(), - events_broadcaster, - signer: Arc::new(signer), - }; - - // subscribe to our events channel first, so we don't have intermittent - // errors if it does not exist and we cannot send to it. - // (eg, if PeerAdded happens faster than our events channel is created) - let mut client_events_rx = client.events_channel(); - - let _swarm_driver = spawn({ - trace!("Starting up client swarm_driver"); - swarm_driver.run() - }); - - // spawn task to dial to the given peers - let network_clone = network.clone(); - let _handle = spawn(async move { - if let Some(peers) = peers { - for addr in peers { - trace!(%addr, "dialing initial peer"); - - if let Err(err) = network_clone.dial(addr.clone()).await { - tracing::error!(%addr, "Failed to dial: {err:?}"); - }; - } - } - }); - - // spawn task to wait for NetworkEvent and check for inactivity - let mut client_clone = client.clone(); - let _event_handler = spawn(async move { - let mut peers_added: usize = 0; - loop { - match timeout(INACTIVITY_TIMEOUT, network_event_receiver.recv()).await { - Ok(event) => { - let the_event = match event { - Some(the_event) => the_event, - None => { - error!("The `NetworkEvent` channel has been closed"); - continue; - } - }; - - let start = Instant::now(); - let event_string = format!("{the_event:?}"); - if let Err(err) = - client_clone.handle_network_event(the_event, &mut peers_added) - { - warn!("Error handling network event: {err}"); - } - trace!( - "Handled network event in {:?}: {:?}", - start.elapsed(), - event_string - ); - } - Err(_elapse_err) => { - debug!("Client inactivity... waiting for a network event"); - client_clone - .events_broadcaster - .broadcast(ClientEvent::InactiveClient(INACTIVITY_TIMEOUT)); - } - } - } - }); - - // loop to connect to the network - let mut is_connected = false; - let connection_timeout = connection_timeout.unwrap_or(CONNECTION_TIMEOUT); - let mut unsupported_protocol_tracker: Option<(String, String)> = None; - - debug!("Client connection timeout: {connection_timeout:?}"); - let mut connection_timeout_interval = interval(connection_timeout); - // first tick completes immediately - connection_timeout_interval.tick().await; - - loop { - tokio::select! { - _ = connection_timeout_interval.tick() => { - if !is_connected { - if let Some((our_protocol, their_protocols)) = unsupported_protocol_tracker { - error!("Timeout: Client could not connect to the network as it does not support the protocol"); - break Err(Error::UnsupportedProtocol(our_protocol, their_protocols)); - } - error!("Timeout: Client failed to connect to the network within {connection_timeout:?}"); - break Err(Error::ConnectionTimeout(connection_timeout)); - } - } - event = client_events_rx.recv() => { - match event { - // we do not error out directly as we might still connect if the other initial peers are from - // the correct network. - Ok(ClientEvent::PeerWithUnsupportedProtocol { our_protocol, their_protocol }) => { - warn!(%our_protocol, %their_protocol, "Client tried to connect to a peer with an unsupported protocol. Tracking the latest one"); - unsupported_protocol_tracker = Some((our_protocol, their_protocol)); - } - Ok(ClientEvent::ConnectedToNetwork) => { - is_connected = true; - info!("Client connected to the Network {is_connected:?}."); - break Ok(()); - } - Ok(ClientEvent::InactiveClient(timeout)) => { - if is_connected { - info!("The client was inactive for {timeout:?}."); - } else { - info!("The client still does not know enough network nodes."); - } - } - Err(err) => { - error!("Unexpected error during client startup {err:?}"); - println!("Unexpected error during client startup {err:?}"); - break Err(err.into()); - } - _ => {} - } - }} - }?; - - Ok(client) - } - - fn handle_network_event(&mut self, event: NetworkEvent, peers_added: &mut usize) -> Result<()> { - match event { - NetworkEvent::PeerAdded(peer_id, _connected_peer) => { - debug!("PeerAdded: {peer_id}"); - *peers_added += 1; - - // notify the listeners that we are waiting on CLOSE_GROUP_SIZE peers before emitting ConnectedToNetwork - self.events_broadcaster.broadcast(ClientEvent::PeerAdded { - max_peers_to_connect: CLOSE_GROUP_SIZE, - }); - // In case client running in non-local mode, - // it may take some time to fill up the RT. - // To avoid such delay may fail the query with RecordNotFound, - // wait till certain amount of peers populated into RT - if *peers_added >= CLOSE_GROUP_SIZE { - self.events_broadcaster - .broadcast(ClientEvent::ConnectedToNetwork); - } else { - debug!("{peers_added}/{CLOSE_GROUP_SIZE} initial peers found.",); - } - } - NetworkEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - } => { - self.events_broadcaster - .broadcast(ClientEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - }); - } - _other => {} - } - - Ok(()) - } - - /// Get the client events channel. - /// - /// Return Type: - /// - /// [ClientEventsReceiver] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error, ClientEvent}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Using client.events_channel() to publish messages - /// let mut events_channel = client.events_channel(); - /// while let Ok(event) = events_channel.recv().await { - /// // Handle the event - /// } - /// - /// # Ok(()) - /// # } - /// ``` - pub fn events_channel(&self) -> ClientEventsReceiver { - self.events_broadcaster.subscribe() - } - - /// Return the underlying network GetRange - pub async fn get_range(&self) -> Result { - self.network.get_range().await.map_err(Error::from) - } - - /// Sign the given data. - /// - /// # Arguments - /// * 'data' - bytes; i.e bytes of an sn_registers::Register instance - /// - /// Return Type: - /// - /// [Signature] - /// - pub fn sign>(&self, data: T) -> Signature { - self.signer.sign(data) - } - - /// Return a reference to the signer secret key. - /// - /// Return Type: - /// - /// [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let secret_key_reference = client.signer(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer(&self) -> &SecretKey { - &self.signer - } - - /// Return the public key of the data signing key. - /// - /// Return Type: - /// - /// [PublicKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let public_key_reference = client.signer_pk(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer_pk(&self) -> PublicKey { - self.signer.public_key() - } - - /// Set the signing key for this client. - /// - /// # Arguments - /// * 'sk' - [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let mut client = Client::new(SecretKey::random(), None, None, None).await?; - /// client.set_signer_key(SecretKey::random()); - /// # Ok(()) - /// # } - /// ``` - pub fn set_signer_key(&mut self, sk: SecretKey) { - self.signer = Arc::new(sk); - } - - /// Get a register from network - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get a signed register - /// let signed_register = client.get_signed_register_from_network(address, true); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_signed_register_from_network( - &self, - address: RegisterAddress, - is_verifying: bool, - ) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - let get_quorum = if is_verifying { - Quorum::All - } else { - Quorum::Majority - }; - let retry_strategy = if is_verifying { - Some(RetryStrategy::Balanced) - } else { - Some(RetryStrategy::Quick) - }; - let get_cfg = GetRecordCfg { - get_quorum, - retry_strategy, - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - - let maybe_record = self.network.get_record_from_network(key, &get_cfg).await; - let record = match &maybe_record { - Ok(r) => r, - Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { - let mut results_to_merge = HashMap::default(); - - for (address, (r, _peers)) in result_map { - results_to_merge.insert(*address, r.clone()); - } - - return merge_register_records(address, &results_to_merge); - } - Err(e) => { - warn!("Failed to get record at {address:?} from the network: {e:?}"); - return Err(ProtocolError::RegisterNotFound(Box::new(address)).into()); - } - }; - - debug!( - "Got record from the network, {:?}", - PrettyPrintRecordKey::from(&record.key) - ); - - let register = get_register_from_record(record) - .map_err(|_| ProtocolError::RegisterNotFound(Box::new(address)))?; - Ok(register) - } - - /// Retrieve a Register from the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[ClientRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get the register - /// let retrieved_register = client.get_register(address); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_register(&self, address: RegisterAddress) -> Result { - info!("Retrieving a Register replica at {address}"); - ClientRegister::retrieve(self.clone(), address).await - } - - /// Create a new Register on the Network. - /// Tops up payments and retries if necessary and verification failed - /// - /// # Arguments - /// * 'address' - [XorName] - /// * 'wallet_client' - [WalletClient] - /// * 'verify_store' - Boolean - /// * 'perms' - [Permissions] - /// - /// Return Type: - /// - /// Result<([ClientRegister], [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{MainSecretKey}; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client, Wallet, etc - /// use sn_registers::Permissions; - /// use sn_transfers::HotWallet; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Example: - /// let (mut client_register, _storage_cost, _royalties_fees) = client - /// .create_and_pay_for_register( - /// xorname, - /// &mut wallet_client, - /// true, - /// Permissions::default(), - /// ) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_and_pay_for_register( - &self, - address: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(ClientRegister, NanoTokens, NanoTokens)> { - info!("Instantiating a new Register replica with address {address:?}"); - let (reg, mut total_cost, mut total_royalties) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - false, - perms.clone(), - ) - .await?; - - debug!("{address:?} Created in theory"); - let reg_address = reg.address(); - if verify_store { - debug!("We should verify stored at {address:?}"); - let mut stored = self.verify_register_stored(*reg_address).await.is_ok(); - - while !stored { - info!("Register not completely stored on the network yet. Retrying..."); - // this verify store call here ensures we get the record from Quorum::all - let (reg, top_up_cost, royalties_top_up) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - true, - perms.clone(), - ) - .await?; - let reg_address = reg.address(); - - total_cost = total_cost - .checked_add(top_up_cost) - .ok_or(Error::TotalPriceTooHigh)?; - total_royalties = total_cost - .checked_add(royalties_top_up) - .ok_or(Error::Wallet(sn_transfers::WalletError::from( - sn_transfers::TransferError::ExcessiveNanoValue, - )))?; - stored = self.verify_register_stored(*reg_address).await.is_ok(); - } - } - - Ok((reg, total_cost, total_royalties)) - } - - /// Store `Chunk` as a record. Protected method. - /// - /// # Arguments - /// * 'chunk' - [Chunk] - /// * 'payee' - [PeerId] - /// * 'payment' - [Payment] - /// * 'verify_store' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - pub(super) async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - info!("Store chunk: {:?}", chunk.address()); - let key = chunk.network_address().to_record_key(); - let retry_strategy = Some(retry_strategy.unwrap_or(RetryStrategy::Quick)); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let verification = if verify_store { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(QUORUM_N_IS_2), - retry_strategy, - target_record: None, // Not used since we use ChunkProof - expected_holders: Default::default(), - is_register: false, - }; - // The `ChunkWithPayment` is only used to send out via PutRecord. - // The holders shall only hold the `Chunk` copies. - // Hence the fetched copies shall only be a `Chunk` - - let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk)?.to_vec(); - let random_nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - } else { - None - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy, - use_put_record_to: Some(vec![payee]), - verification, - }; - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get chunk from chunk address. - /// - /// # Arguments - /// * 'address' - [ChunkAddress] - /// * 'show_holders' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - /// Return Type: - /// - /// Result<[Chunk]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_protocol::storage::ChunkAddress; - /// // client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // chunk address - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let chunk_address = ChunkAddress::new(xorname); - /// // get chunk - /// let chunk = client.get_chunk(chunk_address,true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn get_chunk( - &self, - address: ChunkAddress, - show_holders: bool, - retry_strategy: Option, - ) -> Result { - info!("Getting chunk: {address:?}"); - let key = NetworkAddress::from_chunk_address(address).to_record_key(); - - let expected_holders = if show_holders { - let result: HashSet<_> = self - .network - .get_closest_peers(&NetworkAddress::from_chunk_address(address), true) - .await? - .iter() - .cloned() - .collect(); - result - } else { - Default::default() - }; - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(retry_strategy.unwrap_or(RetryStrategy::Quick)), - target_record: None, - expected_holders, - is_register: false, - }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; - let header = RecordHeader::from_record(&record)?; - if let RecordKind::Chunk = header.kind { - let chunk: Chunk = try_deserialize_record(&record)?; - Ok(chunk) - } else { - Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) - } - } - - /// Verify if a `Chunk` is stored by expected nodes on the network. - /// Single local use. Marked Private. - pub async fn verify_chunk_stored(&self, chunk: &Chunk) -> Result<()> { - let address = chunk.network_address(); - info!("Verifying chunk: {address:?}"); - let random_nonce = thread_rng().gen::(); - let record_value = try_serialize_record(&chunk, RecordKind::Chunk)?; - let expected_proof = ChunkProof::new(record_value.as_ref(), random_nonce); - - if let Err(err) = self - .network - .verify_chunk_existence( - address.clone(), - random_nonce, - expected_proof, - Quorum::N(QUORUM_N_IS_2), - None, - ) - .await - { - error!("Failed to verify the existence of chunk {address:?} with err {err:?}"); - return Err(err.into()); - } - - Ok(()) - } - - /// Verify if a `Register` is stored by expected nodes on the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_register_stored(&self, address: RegisterAddress) -> Result { - info!("Verifying register: {address:?}"); - self.get_signed_register_from_network(address, true).await - } - - /// Quickly checks if a `Register` is stored by expected nodes on the network. - /// - /// To be used for initial register put checks eg, if we expect the data _not_ - /// to exist, we can use it and essentially use the RetryStrategy::Quick under the hood - /// - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn quickly_check_if_register_stored( - &self, - address: RegisterAddress, - ) -> Result { - info!("Quickly checking for existing register : {address:?}"); - self.get_signed_register_from_network(address, false).await - } - - /// Send a `SpendCashNote` request to the network. Protected method. - /// - /// # Arguments - /// * 'spend' - [SignedSpend] - /// * 'verify_store' - Boolean - /// - pub(crate) async fn network_store_spend( - &self, - spend: SignedSpend, - verify_store: bool, - ) -> Result<()> { - let unique_pubkey = *spend.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let key = network_address.to_record_key(); - - let record_kind = RecordKind::Spend; - let record = Record { - key: key.clone(), - value: try_serialize_record(&[spend], record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let pretty_key = PrettyPrintRecordKey::from(&key); - info!("Sending spend {unique_pubkey:?} to the network via put_record, with addr of {cash_note_addr:?} - {pretty_key:?}, size of {}", - record.value.len()); - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = self - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - info!("Expecting holders: {expected_holders:?}"); - (Some(record.clone()), expected_holders) - } else { - (None, Default::default()) - }; - - // When there is retry on Put side, no need to have a retry on Get - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: None, - target_record: record_to_verify, - expected_holders, - is_register: false, - }; - - let verification = if verify_store { - Some((VerificationKind::Network, verification_cfg)) - } else { - None - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Persistent), - use_put_record_to: None, - verification, - }; - - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get a spend from network. - /// - /// # Arguments - /// * 'address' - [SpendAddress] - /// - /// Return Type: - /// - /// Result<[SignedSpend]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_transfers::SpendAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a SpendAddress - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let spend_address = SpendAddress::new(xorname); - /// // Here we get the spend address - /// let spend = client.get_spend_from_network(spend_address).await?; - /// // Example: We can use the spend to get its unique public key: - /// let unique_pubkey = spend.unique_pubkey(); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Balanced), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// Try to peek a spend by just fetching one copy of it. - /// Useful to help decide whether a re-put is necessary, or a spend exists already - /// (client side verification). - pub async fn peek_a_spend(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// This is a similar funcation to `get_spend_from_network` to get a spend from network. - /// Just using different `RetryStrategy` to improve the performance during crawling. - pub async fn crawl_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// Try to confirm the Genesis spend doesn't present in the network yet. - /// It shall be quick, and any signle returned copy shall consider as error. - pub async fn is_genesis_spend_present(&self) -> bool { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - self.peek_a_spend(genesis_addr).await.is_ok() - } - - async fn try_fetch_spend_from_network( - &self, - address: SpendAddress, - get_cfg: GetRecordCfg, - ) -> Result { - let key = NetworkAddress::from_spend_address(address).to_record_key(); - - info!( - "Getting spend at {address:?} with record_key {:?}", - PrettyPrintRecordKey::from(&key) - ); - let record = self - .network - .get_record_from_network(key.clone(), &get_cfg) - .await?; - info!( - "For spend at {address:?} got record from the network, {:?}", - PrettyPrintRecordKey::from(&record.key) - ); - - let signed_spend = get_signed_spend_from_record(&address, &record)?; - - // check addr - let spend_addr = SpendAddress::from_unique_pubkey(signed_spend.unique_pubkey()); - if address != spend_addr { - let s = format!("Spend got from the Network at {address:?} contains different spend address: {spend_addr:?}"); - warn!("{s}"); - return Err(Error::Transfer(TransferError::InvalidSpendValue( - *signed_spend.unique_pubkey(), - ))); - } - - // check spend - match signed_spend.verify() { - Ok(()) => { - trace!("Verified signed spend got from network for {address:?}"); - Ok(signed_spend.clone()) - } - Err(err) => { - warn!("Invalid signed spend got from network for {address:?}: {err:?}."); - Err(Error::from(err)) - } - } - } - - /// This function is used to receive a Vector of CashNoteRedemptions and turn them back into spendable CashNotes. - /// For this we need a network connection. - /// Verify CashNoteRedemptions and rebuild spendable currency from them. - /// Returns an `Error::InvalidTransfer` if any CashNoteRedemption is not valid - /// Else returns a list of CashNotes that can be spent by the owner. - /// - /// # Arguments - /// * 'main_pubkey' - [MainPubkey] - /// * 'cashnote_redemptions' - [CashNoteRedemption] - /// - /// Return Type: - /// - /// Result<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use sn_transfers::{CashNote, CashNoteRedemption, MainPubkey}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a main public key - /// let pk = SecretKey::random().public_key(); - /// let main_pub_key = MainPubkey::new(pk); - /// // Create a Cash Note Redemption Vector - /// let cash_note = CashNote::from_hex("&hex").unwrap(); - /// let cashNoteRedemption = CashNoteRedemption::from_cash_note(&cash_note); - /// let vector = vec![cashNoteRedemption.clone(), cashNoteRedemption.clone()]; - /// // Verify the cash note redemptions - /// let cash_notes = client.verify_cash_notes_redemptions(main_pub_key,&vector); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_cash_notes_redemptions( - &self, - main_pubkey: MainPubkey, - cashnote_redemptions: &[CashNoteRedemption], - ) -> Result> { - let cash_notes = self - .network - .verify_cash_notes_redemptions(main_pubkey, cashnote_redemptions) - .await?; - Ok(cash_notes) - } -} - -fn get_register_from_record(record: &Record) -> Result { - let header = RecordHeader::from_record(record)?; - - if let RecordKind::Register = header.kind { - let register = try_deserialize_record::(record)?; - Ok(register) - } else { - error!("RecordKind mismatch while trying to retrieve a signed register"); - Err(NetworkError::RecordKindMismatch(RecordKind::Register).into()) - } -} - -/// if multiple register records where found for a given key, merge them into a single register -fn merge_register_records( - address: RegisterAddress, - map: &HashMap, -) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&key); - info!( - "Got {} register records from the network for key: {pretty_key:?}", - map.len() - ); - let mut all_registers = vec![]; - for record in map.values() { - match get_register_from_record(record) { - Ok(r) => all_registers.push(r), - Err(e) => { - warn!("Ignoring invalid register record {pretty_key:?} with error {e:?}"); - continue; - } - } - } - - // get the first valid register - let one_valid_reg = if let Some(r) = all_registers.clone().iter().find(|r| r.verify().is_ok()) { - r.clone() - } else { - error!("No valid register records found for {key:?}"); - return Err(Error::Protocol(ProtocolError::RegisterNotFound(Box::new( - address, - )))); - }; - - // merge it with the others if they are valid - let register: SignedRegister = all_registers.into_iter().fold(one_valid_reg, |mut acc, r| { - if acc.verified_merge(&r).is_err() { - warn!("Skipping register that failed to merge. Entry found for {key:?}"); - } - acc - }); - - Ok(register) -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeSet; - - use sn_registers::{Register, RegisterCrdt, RegisterOp}; - - use super::*; - - fn write_atop( - signed_reg: &mut SignedRegister, - crdt_reg: &mut RegisterCrdt, - entry: &[u8], - owner: &SecretKey, - ) -> eyre::Result<()> { - let children: BTreeSet<_> = crdt_reg.read().iter().map(|(hash, _)| *hash).collect(); - - let (_hash, address, crdt_op) = crdt_reg.write(entry.to_vec(), &children)?; - - let op = RegisterOp::new(address, crdt_op, owner); - - signed_reg.add_op(op)?; - - Ok(()) - } - - #[test] - fn test_merge_register_records() -> eyre::Result<()> { - let mut rng = rand::thread_rng(); - let meta = XorName::random(&mut rng); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(meta, owner_pk); - - let base_register = Register::new(owner_pk, meta, Default::default()); - let signature = owner_sk.sign(base_register.bytes()?); - - // prepare registers - let mut register_root = SignedRegister::new(base_register, signature, BTreeSet::new()); - let mut crdt_reg_root = RegisterCrdt::new(address); - - write_atop( - &mut register_root, - &mut crdt_reg_root, - b"root_entry", - &owner_sk, - )?; - - let mut signed_register1 = register_root.clone(); - let mut crdt_reg1 = crdt_reg_root.clone(); - write_atop(&mut signed_register1, &mut crdt_reg1, b"entry1", &owner_sk)?; - - let mut signed_register2 = register_root.clone(); - let mut crdt_reg2 = crdt_reg_root.clone(); - write_atop(&mut signed_register2, &mut crdt_reg2, b"entry2", &owner_sk)?; - - let base_register_bad = Register::new(owner_pk, meta, Default::default()); - let bad_sk = SecretKey::random(); - let signature_bad = bad_sk.sign(base_register_bad.bytes()?); - let signed_register_bad = - SignedRegister::new(base_register_bad, signature_bad, BTreeSet::new()); - - // prepare records - let record1 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register1, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname1 = XorName::from_content(&record1.value); - let record2 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register2, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname2 = XorName::from_content(&record2.value); - let record_bad = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register_bad, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname_bad = XorName::from_content(&record_bad.value); - - // test with 2 valid records: should return the two merged - let mut expected_merge = signed_register1.clone(); - expected_merge.merge(&signed_register2)?; - let map = HashMap::from_iter(vec![(xorname1, record1.clone()), (xorname2, record2)]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, expected_merge); - - // test with 1 valid record and 1 invalid record: should return the valid one - let map = HashMap::from_iter(vec![(xorname1, record1), (xorname2, record_bad.clone())]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, signed_register1); - - // test with 2 invalid records: should error out - let map = HashMap::from_iter(vec![ - (xorname_bad, record_bad.clone()), - (xorname_bad, record_bad), - ]); - let res = merge_register_records(address, &map); // Err - assert!(res.is_err()); - - Ok(()) - } -} diff --git a/sn_client/src/audit.rs b/sn_client/src/audit.rs deleted file mode 100644 index 0d9bb8daec..0000000000 --- a/sn_client/src/audit.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod dag_crawling; -mod dag_error; -mod spend_dag; - -#[cfg(test)] -mod tests; - -pub use dag_error::{DagError, SpendFault}; -pub use spend_dag::{SpendDag, SpendDagGet}; diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs deleted file mode 100644 index fa00a5078f..0000000000 --- a/sn_client/src/audit/dag_crawling.rs +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{Client, Error, SpendDag}; - -use dashmap::DashMap; -use futures::{ - future::join_all, - stream::{self, StreamExt}, -}; -use sn_networking::{GetRecordError, NetworkError}; -use sn_transfers::{ - NanoTokens, SignedSpend, SpendAddress, SpendReason, UniquePubkey, WalletError, WalletResult, - DEFAULT_NETWORK_ROYALTIES_PK, GENESIS_SPEND_UNIQUE_KEY, NETWORK_ROYALTIES_PK, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::mpsc::Sender; - -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -enum InternalGetNetworkSpend { - Spend(Box), - DoubleSpend(Vec), - NotFound, - Error(Error), -} - -impl Client { - pub async fn new_dag_with_genesis_only(&self) -> WalletResult { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - let mut dag = SpendDag::new(genesis_addr); - match self.get_spend_from_network(genesis_addr).await { - Ok(spend) => { - dag.insert(genesis_addr, spend); - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - println!("Burnt spend detected at Genesis: {genesis_addr:?}"); - warn!("Burnt spend detected at Genesis: {genesis_addr:?}"); - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!( - "burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants - ); - dag.insert(genesis_addr, spend); - } - } - Err(e) => return Err(WalletError::FailedToGetSpend(e.to_string())), - }; - - Ok(dag) - } - - /// Builds a SpendDag from a given SpendAddress recursively following descendants all the way to UTxOs - /// Started from Genesis this gives the entire SpendDag of the Network at a certain point in time - /// Once the DAG collected, optionally verifies and records errors in the DAG - /// - /// ```text - /// -> Spend7 ---> UTXO_11 - /// / - /// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 - /// \ - /// ---> Spend3 ---> Spend6 ---> UTXO_9 - /// \ - /// -> Spend4 ---> UTXO_8 - /// - /// ``` - pub async fn spend_dag_build_from( - &self, - spend_addr: SpendAddress, - spend_processing: Option>, - verify: bool, - ) -> WalletResult { - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - - // start crawling from the given spend address - let self_clone = self.clone(); - let crawl_handle = - tokio::spawn(async move { self_clone.spend_dag_crawl_from(spend_addr, tx).await }); - - // start DAG building from the spends gathered while crawling - // forward spends to processing if provided - let build_handle: tokio::task::JoinHandle> = - tokio::spawn(async move { - debug!("Starting building DAG from {spend_addr:?}..."); - let now = std::time::Instant::now(); - let mut dag = SpendDag::new(spend_addr); - while let Some(spend) = rx.recv().await { - let addr = spend.address(); - debug!( - "Inserting spend at {addr:?} size: {}", - dag.all_spends().len() - ); - dag.insert(addr, spend.clone()); - if let Some(sender) = &spend_processing { - let outputs = spend.spend.descendants.len() as u64; - sender - .send((spend, outputs, false)) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - info!( - "Done gathering DAG of size: {} in {:?}", - dag.all_spends().len(), - now.elapsed() - ); - Ok(dag) - }); - - // wait for both to finish - let (crawl_res, build_res) = tokio::join!(crawl_handle, build_handle); - crawl_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join crawling results {e}")) - })??; - let mut dag = build_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join DAG building results {e}")) - })??; - - // verify the DAG - if verify { - info!("Now verifying SpendDAG from {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG from {spend_addr:?} in {elapsed:?}"); - } - - Ok(dag) - } - - /// Get spends from a set of given SpendAddresses - /// Drain the addresses at the same layer first, then: - /// 1, return failed_utxos for re-attempt (with insertion time stamp) - /// 2, return fetched_address to avoid un-necessary re-attempts - /// 3, return addrs_for_further_track for further track - pub async fn crawl_to_next_utxos( - &self, - addrs_to_get: BTreeMap, - sender: Sender<(SignedSpend, u64, bool)>, - reattempt_seconds: u64, - ) -> ( - BTreeMap, - Vec, - BTreeSet<(SpendAddress, NanoTokens)>, - ) { - // max concurrency for the tasks of fetching records from network. - const MAX_CONCURRENT: usize = 64; - - let failed_utxos_arc: Arc> = Arc::new(DashMap::new()); - let addrs_for_further_track_arc: Arc> = Arc::new(DashMap::new()); - let fetched_addrs_arc: Arc> = Arc::new(DashMap::new()); - - stream::iter(addrs_to_get.into_iter()) - .map(|(addr, (failed_times, amount))| { - let client_clone = self.clone(); - let sender_clone = sender.clone(); - - let failed_utxos = Arc::clone(&failed_utxos_arc); - let addrs_for_further_track = Arc::clone(&addrs_for_further_track_arc); - let fetched_addrs = Arc::clone(&fetched_addrs_arc); - async move { - let result = client_clone.crawl_spend(addr).await; - - match result { - InternalGetNetworkSpend::Spend(spend) => { - let for_further_track = beta_track_analyze_spend(&spend); - let _ = sender_clone - .send((*spend, for_further_track.len() as u64, false)) - .await; - for entry in for_further_track { - let _ = addrs_for_further_track.insert(entry, ()); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - warn!( - "Detected burnt spend regarding {addr:?} - {:?}", - spends.len() - ); - - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!("burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::NotFound => { - let reattempt_interval = if amount.as_nano() > 100000 { - info!("Not find spend of big-UTXO {addr:?} with {amount}"); - reattempt_seconds - } else { - reattempt_seconds * (failed_times * 8 + 1) - }; - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_interval), - amount, - ), - ); - } - InternalGetNetworkSpend::Error(e) => { - warn!("Fetching spend {addr:?} with {amount:?} result in error {e:?}"); - // Error of `NotEnoughCopies` could be re-attempted and succeed eventually. - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_seconds), - amount, - ), - ); - } - } - - (addr, amount) - } - }) - .buffer_unordered(MAX_CONCURRENT) - .for_each(|(address, amount)| async move { - info!("Completed fetching attempt of {address:?} with amount {amount:?}"); - }) - .await; - - let mut failed_utxos_result = BTreeMap::new(); - for entry in failed_utxos_arc.iter() { - let key = entry.key(); - let val = entry.value(); - let _ = failed_utxos_result.insert(*key, *val); - } - - let mut fetched_addrs = Vec::new(); - for entry in fetched_addrs_arc.iter() { - let key = entry.key(); - fetched_addrs.push(*key); - } - - let mut addrs_for_further_track = BTreeSet::new(); - for entry in addrs_for_further_track_arc.iter() { - let key = entry.key(); - let _ = addrs_for_further_track.insert(*key); - } - - (failed_utxos_result, fetched_addrs, addrs_for_further_track) - } - - /// Crawls the Spend Dag from a given SpendAddress recursively - /// following descendants all the way to UTXOs - /// Returns the UTXOs reached - pub async fn spend_dag_crawl_from( - &self, - spend_addr: SpendAddress, - spend_processing: Sender, - ) -> WalletResult> { - info!("Crawling spend DAG from {spend_addr:?}"); - let mut utxos = BTreeSet::new(); - - // get first spend - let mut descendants_to_follow = match self.crawl_spend(spend_addr).await { - InternalGetNetworkSpend::Spend(spend) => { - let spend = *spend; - let descendants_to_follow = spend.spend.descendants.clone(); - - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - descendants_to_follow - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - let mut descendants_to_follow = BTreeMap::new(); - for spend in spends.into_iter() { - descendants_to_follow.extend(spend.spend.descendants.clone()); - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - descendants_to_follow - } - InternalGetNetworkSpend::NotFound => { - // the cashnote was not spent yet, so it's an UTXO - info!("UTXO at {spend_addr:?}"); - utxos.insert(spend_addr); - return Ok(utxos); - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(e.to_string())); - } - }; - - // use iteration instead of recursion to avoid stack overflow - let mut known_descendants: BTreeSet = BTreeSet::new(); - let mut gen: u32 = 0; - let start = std::time::Instant::now(); - - while !descendants_to_follow.is_empty() { - let mut next_gen_descendants = BTreeMap::new(); - - // list up all descendants - let mut addrs = vec![]; - for (descendant, _amount) in descendants_to_follow.iter() { - let addrs_to_follow = SpendAddress::from_unique_pubkey(descendant); - info!("Gen {gen} - Following descendant : {descendant:?}"); - addrs.push(addrs_to_follow); - } - - // get all spends in parallel - let mut stream = futures::stream::iter(addrs.clone()) - .map(|a| async move { (self.crawl_spend(a).await, a) }) - .buffer_unordered(crate::MAX_CONCURRENT_TASKS); - info!( - "Gen {gen} - Getting {} spends from {} txs in batches of: {}", - addrs.len(), - descendants_to_follow.len(), - crate::MAX_CONCURRENT_TASKS, - ); - - // insert spends in the dag as they are collected - while let Some((get_spend, addr)) = stream.next().await { - match get_spend { - InternalGetNetworkSpend::Spend(spend) => { - next_gen_descendants.extend(spend.spend.descendants.clone()); - spend_processing - .send(*spend.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - info!("Fetched double spend(s) of len {} at {addr:?} from network, following all of them.", spends.len()); - for s in spends.into_iter() { - next_gen_descendants.extend(s.spend.descendants.clone()); - spend_processing - .send(s.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - InternalGetNetworkSpend::NotFound => { - info!("Reached UTXO at {addr:?}"); - utxos.insert(addr); - } - InternalGetNetworkSpend::Error(err) => { - error!("Failed to get spend at {addr:?} during DAG collection: {err:?}") - } - } - } - - // only follow descendants we haven't already gathered - let followed_descendants: BTreeSet = - descendants_to_follow.keys().copied().collect(); - known_descendants.extend(followed_descendants); - descendants_to_follow = next_gen_descendants - .into_iter() - .filter(|(key, _)| !known_descendants.contains(key)) - .collect(); - - // go on to next gen - gen += 1; - } - - let elapsed = start.elapsed(); - info!("Finished crawling SpendDAG from {spend_addr:?} in {elapsed:?}"); - Ok(utxos) - } - - /// Extends an existing SpendDag with a new SignedSpend, - /// tracing back the ancestors of that Spend all the way to a known Spend in the DAG or else back to Genesis - /// Verifies the DAG and records faults if any - /// This is useful to keep a partial SpendDag to be able to verify that new spends come from Genesis - /// - /// ```text - /// ... -- - /// \ - /// ... ---- ... -- - /// \ \ - /// Spend0 -> Spend1 -----> Spend2 ---> Spend5 ---> Spend2 ---> Genesis - /// \ / - /// ---> Spend3 ---> Spend6 -> - /// \ / - /// -> Spend4 -> - /// / - /// ... - /// - /// ``` - pub async fn spend_dag_extend_until( - &self, - dag: &mut SpendDag, - spend_addr: SpendAddress, - new_spend: SignedSpend, - ) -> WalletResult<()> { - // check existence of spend in dag - let is_new_spend = dag.insert(spend_addr, new_spend.clone()); - if !is_new_spend { - return Ok(()); - } - - // use iteration instead of recursion to avoid stack overflow - let mut ancestors_to_verify = new_spend.spend.ancestors.clone(); - let mut depth = 0; - let mut known_ancestors = BTreeSet::from_iter([dag.source()]); - let start = std::time::Instant::now(); - - while !ancestors_to_verify.is_empty() { - let mut next_gen_ancestors = BTreeSet::new(); - - for ancestor in ancestors_to_verify { - let addrs_to_verify = vec![SpendAddress::from_unique_pubkey(&ancestor)]; - debug!("Depth {depth} - checking parent : {ancestor:?} - {addrs_to_verify:?}"); - - // get all parent spends in parallel - let tasks: Vec<_> = addrs_to_verify - .iter() - .map(|a| self.crawl_spend(*a)) - .collect(); - let mut spends = BTreeSet::new(); - for (spend_get, a) in join_all(tasks) - .await - .into_iter() - .zip(addrs_to_verify.clone()) - { - match spend_get { - InternalGetNetworkSpend::Spend(s) => { - spends.insert(*s); - } - InternalGetNetworkSpend::DoubleSpend(s) => { - spends.extend(s.into_iter()); - } - InternalGetNetworkSpend::NotFound => { - return Err(WalletError::FailedToGetSpend(format!( - "Missing ancestor spend at {a:?}" - ))) - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(format!( - "Failed to get ancestor spend at {a:?}: {e}" - ))) - } - } - } - let spends_len = spends.len(); - debug!("Depth {depth} - Got {spends_len} spends for parent: {addrs_to_verify:?}"); - trace!("Spends for {addrs_to_verify:?} - {spends:?}"); - - // add spends to the dag - known_ancestors.extend(addrs_to_verify.clone()); - for (spend, addr) in spends.clone().into_iter().zip(addrs_to_verify) { - let is_new_spend = dag.insert(addr, spend.clone()); - - // no need to check this spend's parents if it was already in the DAG - if is_new_spend { - next_gen_ancestors.extend(spend.spend.ancestors.clone()); - } - } - } - - // only verify parents we haven't already verified - ancestors_to_verify = next_gen_ancestors - .into_iter() - .filter(|ancestor| { - !known_ancestors.contains(&SpendAddress::from_unique_pubkey(ancestor)) - }) - .collect(); - - depth += 1; - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Now at depth {depth} - Collected spends from {n} transactions in {elapsed:?}"); - } - - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Collected the DAG branch all the way to known spends or genesis! Through {depth} generations, collecting spends from {n} transactions in {elapsed:?}"); - - // verify the DAG - info!("Now verifying SpendDAG extended at {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG extended from {spend_addr:?} in {elapsed:?}"); - Ok(()) - } - - /// Extends an existing SpendDag starting from the given utxos - /// If verify is true, records faults in the DAG - pub async fn spend_dag_continue_from( - &self, - dag: &mut SpendDag, - utxos: BTreeSet, - spend_processing: Option>, - verify: bool, - ) { - let main_dag_src = dag.source(); - info!( - "Expanding spend DAG with source: {main_dag_src:?} from {} utxos", - utxos.len() - ); - - let sender = spend_processing.clone(); - let tasks = utxos - .iter() - .map(|utxo| self.spend_dag_build_from(*utxo, sender.clone(), false)); - let sub_dags = join_all(tasks).await; - for (res, addr) in sub_dags.into_iter().zip(utxos.into_iter()) { - match res { - Ok(sub_dag) => { - debug!("Gathered sub DAG from: {addr:?}"); - if let Err(e) = dag.merge(sub_dag, verify) { - warn!("Failed to merge sub dag from {addr:?} into dag: {e}"); - } - } - Err(e) => warn!("Failed to gather sub dag from {addr:?}: {e}"), - }; - } - - info!("Done gathering spend DAG from utxos"); - } - - /// Extends an existing SpendDag starting from the utxos in this DAG - /// Covers the entirety of currently existing Spends if the DAG was built from Genesis - /// If verify is true, records faults in the DAG - /// Stops gathering after max_depth generations - pub async fn spend_dag_continue_from_utxos( - &self, - dag: &mut SpendDag, - spend_processing: Option>, - verify: bool, - ) { - let utxos = dag.get_utxos(); - self.spend_dag_continue_from(dag, utxos, spend_processing, verify) - .await - } - - /// Internal get spend helper for DAG purposes - /// For crawling, a special fetch policy is deployed to improve the performance: - /// 1. Expect `majority` copies as it is a `Spend`; - /// 2. But don't retry as most will be `UTXO` which won't be found. - async fn crawl_spend(&self, spend_addr: SpendAddress) -> InternalGetNetworkSpend { - match self.crawl_spend_from_network(spend_addr).await { - Ok(s) => { - debug!("DAG crawling: fetched spend {spend_addr:?} from network"); - InternalGetNetworkSpend::Spend(Box::new(s)) - } - Err(Error::Network(NetworkError::GetRecordError(GetRecordError::RecordNotFound))) => { - debug!("DAG crawling: spend at {spend_addr:?} not found on the network"); - InternalGetNetworkSpend::NotFound - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - debug!("DAG crawling: got a double spend(s) of len {} at {spend_addr:?} on the network", spends.len()); - InternalGetNetworkSpend::DoubleSpend(spends) - } - Err(e) => { - debug!( - "DAG crawling: got an error for spend at {spend_addr:?} on the network: {e}" - ); - InternalGetNetworkSpend::Error(e) - } - } - } -} - -/// Helper function to analyze spend for beta_tracking optimization. -/// returns the new_utxos that needs to be further tracked. -fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet<(SpendAddress, NanoTokens)> { - // Filter out royalty outputs - let royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - let default_royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| DEFAULT_NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - - let spend_addr = spend.address(); - let new_utxos: BTreeSet<_> = spend - .spend - .descendants - .iter() - .filter_map(|(unique_pubkey, amount)| { - if default_royalty_pubkeys.contains(unique_pubkey) - || royalty_pubkeys.contains(unique_pubkey) - { - None - } else { - let addr = SpendAddress::from_unique_pubkey(unique_pubkey); - - if amount.as_nano() > 100000 { - info!("Spend {spend_addr:?} has a big-UTXO {addr:?} with {amount}"); - } - - Some((addr, *amount)) - } - }) - .collect(); - - if let SpendReason::BetaRewardTracking(_) = spend.reason() { - // Do not track down forwarded payment further - Default::default() - } else { - trace!( - "Spend {spend_addr:?} original has {} outputs, tracking {} of them.", - spend.spend.descendants.len(), - new_utxos.len() - ); - new_utxos - } -} diff --git a/sn_client/src/audit/dag_error.rs b/sn_client/src/audit/dag_error.rs deleted file mode 100644 index 6fb79953fd..0000000000 --- a/sn_client/src/audit/dag_error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::{Deserialize, Serialize}; -use sn_transfers::SpendAddress; -use thiserror::Error; - -/// Errors that mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum DagError { - #[error("DAG has no source at {0:?}")] - MissingSource(SpendAddress), - #[error("DAG is incoherent at {0:?}: {1}")] - IncoherentDag(SpendAddress, String), - #[error("DAG with root {0:?} contains a cycle")] - DagContainsCycle(SpendAddress), -} - -/// List of possible faults that can be found in the DAG during verification -/// This indicates a certain spend is invalid and the reason for it -/// but does not mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum SpendFault { - #[error("Double Spend at {0:?}")] - DoubleSpend(SpendAddress), - #[error("Spend at {addr:?} has a missing ancestor at {ancestor:?}, until this ancestor is added to the DAG, it cannot be verified")] - MissingAncestry { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error( - "Spend at {addr:?} has a double spent ancestor at {ancestor:?}, making it unspendable" - )] - DoubleSpentAncestor { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error("Invalid transaction for spend at {0:?}: {1}")] - InvalidTransaction(SpendAddress, String), - #[error("Poisoned ancestry for spend at {0:?}: {1}")] - PoisonedAncestry(SpendAddress, String), - #[error("Spend at {addr:?} does not descend from given source: {src:?}")] - OrphanSpend { - addr: SpendAddress, - src: SpendAddress, - }, -} - -impl DagError { - pub fn spend_address(&self) -> SpendAddress { - match self { - DagError::MissingSource(addr) - | DagError::IncoherentDag(addr, _) - | DagError::DagContainsCycle(addr) => *addr, - } - } -} - -impl SpendFault { - pub fn spend_address(&self) -> SpendAddress { - match self { - SpendFault::DoubleSpend(addr) - | SpendFault::MissingAncestry { addr, .. } - | SpendFault::DoubleSpentAncestor { addr, .. } - | SpendFault::InvalidTransaction(addr, _) - | SpendFault::PoisonedAncestry(addr, _) - | SpendFault::OrphanSpend { addr, .. } => *addr, - } - } -} diff --git a/sn_client/src/audit/spend_dag.rs b/sn_client/src/audit/spend_dag.rs deleted file mode 100644 index fbf00bd947..0000000000 --- a/sn_client/src/audit/spend_dag.rs +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::SecretKey; -use petgraph::dot::Dot; -use petgraph::graph::{DiGraph, NodeIndex}; -use petgraph::visit::EdgeRef; -use serde::{Deserialize, Serialize}; -use sn_transfers::{ - is_genesis_spend, CashNoteRedemption, DerivationIndex, Hash, NanoTokens, SignedSpend, - SpendAddress, UniquePubkey, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - path::Path, -}; - -use super::dag_error::{DagError, SpendFault}; - -/// A DAG representing the spends from a specific Spend all the way to the UTXOs. -/// Starting from Genesis, this would encompass all the spends that have happened on the network -/// at a certain point in time. -/// -/// ```text -/// -> Spend7 ---> UTXO_11 -/// / -/// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 -/// \ -/// ---> Spend3 ---> Spend6 ---> UTXO_9 -/// \ -/// -> Spend4 ---> UTXO_8 -/// -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SpendDag { - /// A directed graph of spend addresses - dag: DiGraph, - /// All the spends refered to in the dag indexed by their SpendAddress - spends: BTreeMap, - /// The source of the DAG (aka Genesis) - source: SpendAddress, - /// Recorded faults in the DAG - faults: BTreeMap>, -} - -type DagIndex = usize; - -/// Internal Dag entry type -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -enum DagEntry { - NotGatheredYet(DagIndex), - DoubleSpend(Vec<(SignedSpend, DagIndex)>), - Spend(Box, DagIndex), -} - -impl DagEntry { - fn indexes(&self) -> Vec { - match self { - DagEntry::NotGatheredYet(idx) => vec![*idx], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(_, idx)| *idx).collect(), - DagEntry::Spend(_, idx) => vec![*idx], - } - } - - fn spends(&self) -> Vec<&SignedSpend> { - match self { - DagEntry::Spend(spend, _) => vec![&**spend], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(s, _)| s).collect(), - DagEntry::NotGatheredYet(_) => vec![], - } - } -} - -/// The result of a get operation on the DAG -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -pub enum SpendDagGet { - /// Spend does not exist in the DAG - SpendNotFound, - /// Spend key is refered to by known spends but does not exist in the DAG yet - Utxo, - /// Spend is a double spend - DoubleSpend(Vec), - /// Spend is in the DAG - Spend(Box), -} - -impl SpendDag { - /// Create a new DAG with a given source - pub fn new(source: SpendAddress) -> Self { - Self { - dag: DiGraph::new(), - spends: BTreeMap::new(), - source, - faults: BTreeMap::new(), - } - } - - pub fn source(&self) -> SpendAddress { - self.source - } - - pub fn load_from_file>(path: P) -> crate::Result { - let bytes = std::fs::read(path)?; - let dag: SpendDag = rmp_serde::from_slice(&bytes)?; - Ok(dag) - } - - pub fn dump_to_file>(&self, path: P) -> crate::Result<()> { - let bytes = rmp_serde::to_vec(&self)?; - std::fs::write(path, bytes)?; - Ok(()) - } - - /// Insert a spend into the dag - /// Creating edges (links) from its ancestors and to its descendants - /// If the inserted spend is already known, it will be ignored - /// If the inserted spend is a double spend, it will be saved along with the previous spend - /// Return true if the spend was inserted and false if it was already in the DAG - pub fn insert(&mut self, spend_addr: SpendAddress, spend: SignedSpend) -> bool { - let existing_entry = self.spends.get(&spend_addr).cloned(); - let new_node_idx = match existing_entry { - // add new spend to the DAG - None => { - let node_idx = self.dag.add_node(spend_addr); - self.spends.insert( - spend_addr, - DagEntry::Spend(Box::new(spend.clone()), node_idx.index()), - ); - node_idx - } - // or upgrade a known but not gathered entry to spend - Some(DagEntry::NotGatheredYet(idx)) => { - self.spends - .insert(spend_addr, DagEntry::Spend(Box::new(spend.clone()), idx)); - let node_idx = NodeIndex::new(idx); - self.remove_all_edges(node_idx); - node_idx - } - // or upgrade spend to double spend if it is different from the existing one - Some(DagEntry::Spend(s, idx)) => { - let existing_spend = *s.clone(); - if existing_spend == spend { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let double_spend = DagEntry::DoubleSpend(vec![ - (existing_spend.clone(), idx), - (spend.clone(), node_idx.index()), - ]); - self.spends.insert(spend_addr, double_spend); - node_idx - } - // or add extra spend to an existing double spend if it is unknown yet - Some(DagEntry::DoubleSpend(vec_s)) => { - if vec_s.iter().any(|(s, _idx)| s == &spend) { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let mut vec_s = vec_s.clone(); - vec_s.push((spend.clone(), node_idx.index())); - self.spends.insert(spend_addr, DagEntry::DoubleSpend(vec_s)); - node_idx - } - }; - - // link to descendants - for (descendant, amount) in spend.spend.descendants.iter() { - let descendant_addr = SpendAddress::from_unique_pubkey(descendant); - - // add descendant if not already in dag - let spends_at_addr = self.spends.entry(descendant_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(descendant_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to descendant - for idx in spends_at_addr.indexes() { - let descendant_idx = NodeIndex::new(idx); - self.dag.update_edge(new_node_idx, descendant_idx, *amount); - } - } - - // do not link to ancestors if the spend is the source - if spend_addr == self.source { - return true; - } - - // link to ancestors - const PENDING_AMOUNT: NanoTokens = NanoTokens::from(0); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - - // add ancestor if not already in dag - let spends_at_addr = self.spends.entry(ancestor_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(ancestor_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to ancestor - match spends_at_addr { - DagEntry::NotGatheredYet(idx) => { - let ancestor_idx = NodeIndex::new(*idx); - self.dag - .update_edge(ancestor_idx, new_node_idx, PENDING_AMOUNT); - } - DagEntry::Spend(ancestor_spend, idx) => { - let ancestor_idx = NodeIndex::new(*idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| **descendant == spend.spend.unique_pubkey) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - DagEntry::DoubleSpend(multiple_ancestors) => { - for (ancestor_spend, ancestor_idx) in multiple_ancestors { - if ancestor_spend - .spend - .descendants - .contains_key(spend.unique_pubkey()) - { - let ancestor_idx = NodeIndex::new(*ancestor_idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| { - **descendant == spend.spend.unique_pubkey - }) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - } - } - } - } - - true - } - - /// Get spend addresses that probably exist as they are refered to by spends we know, - /// but we haven't gathered them yet - /// This includes UTXOs and unknown ancestors - pub fn get_pending_spends(&self) -> BTreeSet { - self.spends - .iter() - .filter_map(|(addr, entry)| match entry { - DagEntry::NotGatheredYet(_) => Some(*addr), - _ => None, - }) - .collect() - } - - /// Get the UTXOs: all the addresses that are refered to as children by other spends - /// but that don't have children themselves. - /// Those will eventually exist on the Network as the address is spent by their owners. - pub fn get_utxos(&self) -> BTreeSet { - let mut leaves = BTreeSet::new(); - for node_index in self.dag.node_indices() { - if !self - .dag - .neighbors_directed(node_index, petgraph::Direction::Outgoing) - .any(|_| true) - { - let utxo_addr = self.dag[node_index]; - leaves.insert(utxo_addr); - } - } - leaves - } - - pub fn dump_dot_format(&self) -> String { - format!("{:?}", Dot::with_config(&self.dag, &[])) - } - - pub fn dump_payment_forward_statistics(&self, sk: &SecretKey) -> String { - let mut statistics: BTreeMap> = Default::default(); - - let mut hash_dictionary: BTreeMap = Default::default(); - - // The following three is used in the memcheck test script. - // Update whenever these three got changed in the script. - let bootstrap_string = "bootstrap".to_string(); - let restart_string = "restart".to_string(); - let restarted_string = "restarted".to_string(); - let _ = hash_dictionary.insert(Hash::hash(bootstrap_string.as_bytes()), bootstrap_string); - let _ = hash_dictionary.insert(Hash::hash(restart_string.as_bytes()), restart_string); - let _ = hash_dictionary.insert(Hash::hash(restarted_string.as_bytes()), restarted_string); - for i in 0..50 { - let node_string = format!("node_{i}"); - let _ = hash_dictionary.insert(Hash::hash(node_string.as_bytes()), node_string); - } - - for spend_dag_entry in self.spends.values() { - if let DagEntry::Spend(signed_spend, _) = spend_dag_entry { - if let Some(sender_hash) = signed_spend.spend.reason.decrypt_discord_cypher(sk) { - let sender = if let Some(readable_sender) = hash_dictionary.get(&sender_hash) { - readable_sender.clone() - } else { - format!("{sender_hash:?}") - }; - let holders = statistics.entry(sender).or_default(); - holders.push(signed_spend.spend.amount()); - } - } - } - - let mut content = "Sender, Times, Amount".to_string(); - for (sender, payments) in statistics.iter() { - let total_amount: u64 = payments - .iter() - .map(|nano_tokens| nano_tokens.as_nano()) - .sum(); - content = format!("{content}\n{sender}, {}, {total_amount}", payments.len()); - } - content - } - - /// Merges the given dag into ours, optionally recomputing the faults after merge - /// If verify is set to false, the faults will not be computed, this can be useful when batching merges to avoid re-verifying - /// be sure to manually verify afterwards - pub fn merge(&mut self, sub_dag: SpendDag, verify: bool) -> Result<(), DagError> { - let source = self.source(); - info!( - "Merging sub DAG starting at {:?} into our DAG with source {:?}", - sub_dag.source(), - source - ); - for (addr, spends) in sub_dag.spends { - // only add spends to the dag, ignoring utxos and not yet gathered relatives - // utxos will be added automatically as their ancestors are added - // edges are updated by the insert method - match spends { - DagEntry::NotGatheredYet(_) => continue, - DagEntry::DoubleSpend(spends) => { - for (spend, _) in spends { - self.insert(addr, spend); - } - } - DagEntry::Spend(spend, _) => { - self.insert(addr, *spend); - } - } - } - - // recompute faults - if verify { - self.record_faults(&source)?; - } - - Ok(()) - } - - /// Get the spend at a given address - pub fn get_spend(&self, addr: &SpendAddress) -> SpendDagGet { - match self.spends.get(addr) { - None => SpendDagGet::SpendNotFound, - Some(DagEntry::NotGatheredYet(_)) => SpendDagGet::Utxo, - Some(DagEntry::DoubleSpend(spends)) => { - SpendDagGet::DoubleSpend(spends.iter().map(|(s, _)| s.clone()).collect()) - } - Some(DagEntry::Spend(spend, _)) => SpendDagGet::Spend(spend.clone()), - } - } - - /// Get the recorded faults if any for a given spend address - pub fn get_spend_faults(&self, addr: &SpendAddress) -> BTreeSet { - self.faults.get(addr).cloned().unwrap_or_default() - } - - /// Helper to get underlying index of spend entry in the DAG - /// This unstable API is used to access the underlying graph for testing purposes - /// An empty vec is returned if the spend is not in the DAG - pub fn get_spend_indexes(&self, addr: &SpendAddress) -> Vec { - self.spends - .get(addr) - .map(|spends| spends.indexes()) - .unwrap_or_default() - } - - /// Get all spends from the DAG - pub fn all_spends(&self) -> Vec<&SignedSpend> { - self.spends - .values() - .flat_map(|entry| entry.spends()) - .collect() - } - - /// Get the faults recorded in the DAG - pub fn faults(&self) -> &BTreeMap> { - &self.faults - } - - /// Get all royalties from the DAG - pub fn all_royalties(&self) -> crate::Result> { - let spends = self.all_spends(); - let mut royalties_by_unique_pk: BTreeMap< - UniquePubkey, - Vec<(DerivationIndex, SpendAddress)>, - > = BTreeMap::new(); - for s in spends { - let parent_spend_addr = SpendAddress::from_unique_pubkey(&s.spend.unique_pubkey); - for (roy_pk, _, derivation_idx) in s.spend.network_royalties() { - royalties_by_unique_pk - .entry(roy_pk) - .and_modify(|v| v.push((derivation_idx, parent_spend_addr))) - .or_insert(vec![(derivation_idx, parent_spend_addr)]); - } - } - - // assemble those and check - let mut royalties = vec![]; - for (unique_pk, vec) in royalties_by_unique_pk.into_iter() { - let parents_spend_addrs = vec.iter().map(|(_di, spend_addr)| *spend_addr).collect(); - let derivation_idx_uniq: BTreeSet<_> = - vec.iter().map(|(di, _spend_addr)| *di).collect(); - let idx_vec: Vec<_> = derivation_idx_uniq.into_iter().collect(); - let derivation_index = match idx_vec.as_slice() { - [one_unique] => *one_unique, - _ => { - warn!("DerivationIndex in single royalty output for {unique_pk:?} should have been unique, found parents and reported derivation index {vec:?}"); - continue; - } - }; - royalties.push(CashNoteRedemption::new( - derivation_index, - parents_spend_addrs, - )) - } - Ok(royalties) - } - - /// Remove all edges from a Node in the DAG - fn remove_all_edges(&mut self, node: NodeIndex) { - let incoming: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Incoming) - .map(|e| e.id()) - .collect(); - let outgoing: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Outgoing) - .map(|e| e.id()) - .collect(); - for edge in incoming.into_iter().chain(outgoing.into_iter()) { - self.dag.remove_edge(edge); - } - } - - /// helper that returns the direct ancestors of a given spend - /// along with any faults detected - /// On error returns the address of the missing ancestor - fn get_direct_ancestors( - &self, - spend: &SignedSpend, - ) -> Result<(BTreeSet, BTreeSet), SpendAddress> { - let addr = spend.address(); - let mut ancestors = BTreeSet::new(); - let mut faults = BTreeSet::new(); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - match self.spends.get(&ancestor_addr) { - Some(DagEntry::Spend(ancestor_spend, _)) => { - ancestors.insert(*ancestor_spend.clone()); - } - Some(DagEntry::NotGatheredYet(_)) | None => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is missing"); - return Err(ancestor_addr); - } - Some(DagEntry::DoubleSpend(multiple_ancestors)) => { - debug!("Direct ancestor for spend {spend:?} at {ancestor_addr:?} is a double spend"); - faults.insert(SpendFault::DoubleSpentAncestor { - addr, - ancestor: ancestor_addr, - }); - let actual_ancestor: Vec<_> = multiple_ancestors - .iter() - .filter(|(s, _)| s.spend.descendants.contains_key(spend.unique_pubkey())) - .map(|(s, _)| s.clone()) - .collect(); - match actual_ancestor.as_slice() { - [ancestor_spend] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend but one of those match our parent_tx hash, using it for verification"); - ancestors.insert(ancestor_spend.clone()); - } - [ancestor1, _ancestor2, ..] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and mutliple match our parent_tx hash, using the first one for verification"); - ancestors.insert(ancestor1.clone()); - } - [] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and none of them match the spend parent_tx, which means the parent for this spend is missing!"); - return Err(ancestor_addr); - } - } - } - } - } - Ok((ancestors, faults)) - } - - /// helper that returns all the descendants (recursively all the way to UTXOs) of a given spend - fn all_descendants(&self, addr: &SpendAddress) -> Result, DagError> { - let mut descendants = BTreeSet::new(); - let mut to_traverse = BTreeSet::from_iter(vec![addr]); - while let Some(current_addr) = to_traverse.pop_first() { - // get the spend at this address - let dag_entry = match self.spends.get(current_addr) { - Some(entry) => entry, - None => { - warn!("Incoherent DAG, missing descendant spend when expecting one at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("Missing descendant spend in DAG at: {current_addr:?}"), - )); - } - }; - let (spends, indexes) = (dag_entry.spends(), dag_entry.indexes()); - - // get descendants via spend - let descendants_via_spend: BTreeSet = spends - .into_iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - - // get descendants via DAG - let descendants_via_dag: BTreeSet<&SpendAddress> = indexes - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Outgoing) - .map(|i| &self.dag[i]) - }) - .collect(); - - // report inconsistencies - if descendants_via_dag != descendants_via_spend.iter().collect() { - if matches!(dag_entry, DagEntry::NotGatheredYet(_)) { - debug!("Spend at {current_addr:?} was not gathered yet and has children refering to it, continuing traversal through those children..."); - } else { - warn!("Incoherent DAG at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("descendants via DAG: {descendants_via_dag:?} do not match descendants via spend: {descendants_via_spend:?}") - )); - } - } - - // continue traversal - let not_transversed = descendants_via_dag.difference(&descendants); - to_traverse.extend(not_transversed); - descendants.extend(descendants_via_dag.iter().cloned()); - } - Ok(descendants) - } - - /// find all the orphans in the DAG and record them as OrphanSpend - /// returns the list of OrphanSpend and other errors encountered in the way - fn find_orphans(&self, source: &SpendAddress) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let all_addresses: BTreeSet<&SpendAddress> = self.spends.keys().collect(); - let all_descendants = self.all_descendants(source)?; - let parents: BTreeSet<_> = self - .get_spend_indexes(source) - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Incoming) - }) - .map(|parent_idx| &self.dag[parent_idx]) - .collect(); - let non_orphans = - BTreeSet::from_iter(all_descendants.into_iter().chain(parents).chain([source])); - - // orphans are those that are neither descandants nor source's parents nor source itself - let orphans: BTreeSet<&SpendAddress> = - all_addresses.difference(&non_orphans).cloned().collect(); - for orphan in orphans { - let src = *source; - let addr = *orphan; - debug!("Found orphan: {orphan:?} of {src:?}"); - recorded_faults.insert(SpendFault::OrphanSpend { addr, src }); - } - - Ok(recorded_faults) - } - - /// Checks if a double spend has multiple living descendant branches that fork - fn double_spend_has_forking_descendant_branches(&self, spends: &Vec<&SignedSpend>) -> bool { - // gather all living descendants for each branch - let mut set_of_living_descendants: BTreeSet> = BTreeSet::new(); - for spend in spends { - let gathered_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .filter_map(|a| self.spends.get(&a)) - .filter_map(|s| { - if matches!(s, DagEntry::NotGatheredYet(_)) { - None - } else { - Some(s.spends()) - } - }) - .flatten() - .collect::>(); - set_of_living_descendants.insert(gathered_descendants); - } - - // make sure there is no fork - for set1 in set_of_living_descendants.iter() { - for set2 in set_of_living_descendants.iter() { - if set1.is_subset(set2) || set2.is_subset(set1) { - continue; - } else { - return true; - } - } - } - - false - } - - /// Verify the DAG and record faults in the DAG - /// If the DAG is invalid, return an error immediately, without mutating the DAG - pub fn record_faults(&mut self, source: &SpendAddress) -> Result<(), DagError> { - let faults = self.verify(source)?; - - self.faults.clear(); - for f in faults { - self.faults.entry(f.spend_address()).or_default().insert(f); - } - Ok(()) - } - - /// Verify the DAG and return faults detected in the DAG - /// If the DAG itself is invalid, return an error immediately - pub fn verify(&self, source: &SpendAddress) -> Result, DagError> { - info!("Verifying DAG starting off: {source:?}"); - let mut recorded_faults = BTreeSet::new(); - - // verify the DAG is acyclic - if petgraph::algo::is_cyclic_directed(&self.dag) { - warn!("DAG is cyclic"); - return Err(DagError::DagContainsCycle(*source)); - } - - // verify DAG source exists in the DAG (Genesis in case of a complete DAG) - debug!("Verifying DAG source: {source:?}"); - match self.spends.get(source) { - None => { - debug!("DAG does not contain its source: {source:?}"); - return Err(DagError::MissingSource(*source)); - } - Some(DagEntry::DoubleSpend(_)) => { - debug!("DAG source is a double spend: {source:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*source)); - } - _ => (), - } - - // identify orphans (spends that don't come from the source) - debug!("Looking for orphans of {source:?}"); - recorded_faults.extend(self.find_orphans(source)?); - - // check all transactions - for (addr, _) in self.spends.iter() { - debug!("Verifying transaction at: {addr:?}"); - // get the spend at this address - let spends = self - .spends - .get(addr) - .map(|s| s.spends()) - .unwrap_or_default(); - - // record double spends - if spends.len() > 1 { - debug!("Found a double spend entry in DAG at {addr:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*addr)); - let direct_descendants: BTreeSet = spends - .iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - debug!("Making the direct descendants of the double spend at {addr:?} as faulty: {direct_descendants:?}"); - for a in direct_descendants.iter() { - recorded_faults.insert(SpendFault::DoubleSpentAncestor { - addr: *a, - ancestor: *addr, - }); - } - if self.double_spend_has_forking_descendant_branches(&spends) { - debug!("Double spend at {addr:?} has multiple living descendant branches, poisoning them..."); - let poison = format!( - "spend is on one of multiple branches of a double spent ancestor: {addr:?}" - ); - let direct_living_descendant_spends: BTreeSet<_> = direct_descendants - .iter() - .filter_map(|a| self.spends.get(a)) - .flat_map(|s| s.spends()) - .collect(); - for s in direct_living_descendant_spends { - recorded_faults.extend(self.poison_all_descendants(s, poison.clone())?); - } - } - continue; - } - - // skip parent verification for source as we don't know its ancestors - if addr == source { - debug!("Skip parent verification for source at: {addr:?}"); - continue; - } - - // verify parents - for s in spends { - recorded_faults.extend(self.verify_spend_parents(s)?); - } - } - - info!( - "Found {} faults: {recorded_faults:#?}", - recorded_faults.len() - ); - Ok(recorded_faults) - } - - /// Verifies a single spend and returns resulting errors and DAG poisoning spread - fn verify_spend_parents(&self, spend: &SignedSpend) -> Result, DagError> { - let addr = spend.address(); - let mut recorded_faults = BTreeSet::new(); - debug!("Verifying spend: {spend:?}"); - - // skip if spend matches genesis - if is_genesis_spend(spend) { - debug!("Skip transaction verification for Genesis: {spend:?}"); - return Ok(recorded_faults); - } - - // get the ancestors of this spend - let (ancestor_spends, faults) = match self.get_direct_ancestors(spend) { - Ok(a) => a, - Err(missing_ancestor) => { - debug!("Failed to get ancestor spends of {spend:?} as ancestor at {missing_ancestor:?} is missing"); - recorded_faults.insert(SpendFault::MissingAncestry { - addr, - ancestor: missing_ancestor, - }); - - let poison = format!("missing ancestor at: {missing_ancestor:?}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - return Ok(recorded_faults); - } - }; - recorded_faults.extend(faults); - - // verify the parents - if let Err(e) = spend.verify_parent_spends(&ancestor_spends) { - warn!("Parent verfication failed for spend at: {spend:?}: {e}"); - recorded_faults.insert(SpendFault::InvalidTransaction(addr, format!("{e}"))); - let poison = format!("ancestor transaction was poisoned at: {spend:?}: {e}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - } - - Ok(recorded_faults) - } - - /// Poison all descendants of a spend with given the poison message - fn poison_all_descendants( - &self, - spend: &SignedSpend, - poison: String, - ) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let direct_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .collect::>(); - let mut all_descendants = direct_descendants - .iter() - .map(|addr| self.all_descendants(addr)) - .collect::, _>>()? - .into_iter() - .flatten() - .collect::>(); - all_descendants.extend(direct_descendants.iter()); - - for d in all_descendants { - recorded_faults.insert(SpendFault::PoisonedAncestry(*d, poison.clone())); - } - - Ok(recorded_faults) - } -} - -#[cfg(test)] -mod tests { - use xor_name::XorName; - - use super::*; - - #[test] - fn test_spend_dag_serialisation() { - let mut rng = rand::thread_rng(); - let dummy_source = SpendAddress::new(XorName::random(&mut rng)); - let dag = SpendDag::new(dummy_source); - let serialized_data = rmp_serde::to_vec(&dag).expect("Serialization failed"); - let deserialized_instance: SpendDag = - rmp_serde::from_slice(&serialized_data).expect("Deserialization failed"); - let reserialized_data = - rmp_serde::to_vec(&deserialized_instance).expect("Serialization failed"); - assert_eq!(reserialized_data, serialized_data); - } -} diff --git a/sn_client/src/audit/tests/mod.rs b/sn_client/src/audit/tests/mod.rs deleted file mode 100644 index d00e4b1055..0000000000 --- a/sn_client/src/audit/tests/mod.rs +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use std::collections::BTreeSet; - -use setup::MockNetwork; - -use eyre::Result; -use sn_transfers::SpendAddress; - -use crate::{SpendDag, SpendFault}; - -#[test] -fn test_spend_dag_verify_valid_simple() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - net.send(&owner3, &owner4, 100)?; - net.send(&owner4, &owner5, 100)?; - net.send(&owner5, &owner6, 100)?; - - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_verify_valid_simple")?; - - assert_eq!(dag.verify(&genesis), Ok(BTreeSet::new())); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_poisonning() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner_cheat = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend1 = net.send(&owner2, &owner3, 100)?; - let spend_ko3 = net.send(&owner3, &owner4, 100)?; - let spend_ok4 = net.send(&owner4, &owner5, 100)?; - let spend_ok5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend far back in history - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2 = net.send(&owner2, &owner_cheat, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_poisonning")?; - - // make sure double spend is detected - assert_eq!(spend1, spend2, "both spends should be at the same address"); - let double_spent = spend1.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are unspendable - let upk = net - .wallets - .get(&owner_cheat) - .expect("owner_cheat wallet to exist") - .cn - .first() - .expect("owner_cheat wallet to have 1 cashnote") - .unique_pubkey(); - let utxo = SpendAddress::from_unique_pubkey(&upk); - let got = dag.get_spend_faults(&utxo); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "UTXO of double spend should be unspendable"); - let s3 = spend_ko3.first().expect("spend_ko3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend_ko3 should be unspendable"); - - // make sure this didn't poison the rest of the DAG - let s4 = spend_ok4.first().expect("spend_ok4 to be unique"); - let s5 = spend_ok5.first().expect("spend_ok5 to be unique"); - let unaffected = BTreeSet::new(); - - assert_eq!(dag.get_spend_faults(s4), unaffected); - assert_eq!(dag.get_spend_faults(s5), unaffected); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_branches() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner3a = net.new_pk_with_balance(0)?; - let owner4a = net.new_pk_with_balance(0)?; - let owner5a = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend2 = net.send(&owner2, &owner3, 100)?; - let spend3 = net.send(&owner3, &owner4, 100)?; - let spend4 = net.send(&owner4, &owner5, 100)?; - let spend5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend and create a branch - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2a = net.send(&owner2, &owner3a, 100)?; - let spend3a = net.send(&owner3a, &owner4a, 100)?; - let spend4a = net.send(&owner4a, &owner5a, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - println!("Adding into dag with spend {spend:?}"); - dag.insert(spend.address(), spend.clone()); - } - - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_branches")?; - - // make sure double spend is detected - assert_eq!(spend2, spend2a, "both spends should be at the same address"); - let double_spent = spend2.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are marked as double spent - let s3 = spend3.first().expect("spend3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3 should be unspendable"); - let s3a = spend3a.first().expect("spend3a to have an element"); - let got = dag.get_spend_faults(s3a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3a, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3a should be unspendable"); - - // make sure all the descendants further down the branch are poisoned due to a double spent ancestor - let utxo_of_5a = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner5a) - .expect("owner5a wallet to exist") - .cn - .first() - .expect("owner5a wallet to have 1 cashnote") - .unique_pubkey(), - ); - let utxo_of_6 = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(), - ); - let all_descendants = [spend4, spend5, vec![utxo_of_6], spend4a, vec![utxo_of_5a]]; - for d in all_descendants.iter() { - let got = dag.get_spend_faults(d.first().expect("descendant spend to have an element")); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - *d.first().expect("d to have an element"), - format!( - "spend is on one of multiple branches of a double spent ancestor: {double_spent:?}" - ), - )]); - assert_eq!(got, expected, "all descendants should be marked as bad"); - } - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_detection() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2a = net.new_pk_with_balance(0)?; - let owner2b = net.new_pk_with_balance(0)?; - - // perform double spend - let cn_to_reuse = net - .wallets - .get(&owner1) - .expect("owner1 wallet to exist") - .cn - .clone(); - let spend1_addr = net.send(&owner1, &owner2a, 100)?; - net.wallets - .get_mut(&owner1) - .expect("owner1 wallet to still exist") - .cn = cn_to_reuse; - let spend2_addr = net.send(&owner1, &owner2b, 100)?; - - // get the UTXOs of the two spends - let upk_of_2a = net - .wallets - .get(&owner2a) - .expect("owner2a wallet to exist") - .cn - .first() - .expect("owner2a wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2a = SpendAddress::from_unique_pubkey(&upk_of_2a); - let upk_of_2b = net - .wallets - .get(&owner2b) - .expect("owner2b wallet to exist") - .cn - .first() - .expect("owner2b wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2b = SpendAddress::from_unique_pubkey(&upk_of_2b); - - // make DAG - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_detection")?; - - // make sure the double spend is detected - assert_eq!( - spend1_addr, spend2_addr, - "both spends should be at the same address" - ); - assert_eq!(spend1_addr.len(), 1, "there should only be one spend"); - let double_spent = spend1_addr.first().expect("spend1_addr to have an element"); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!( - dag.get_spend_faults(double_spent), - expected, - "DAG should have detected double spend" - ); - - // make sure the UTXOs of the double spend are unspendable - let got = dag.get_spend_faults(&utxo_of_2a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2a, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO a of double spend should be unspendable" - ); - - let got = dag.get_spend_faults(&utxo_of_2b); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2b, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO b of double spend should be unspendable" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_missing_ancestry() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let spent_after2 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after2 should have 1 element") - .to_owned(); - let utxo_after3 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after3); - - // create dag with one missing spend - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_missing_ancestry")?; - - // make sure the missing spend makes its descendants invalid - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing, - }]); - assert_eq!(got, expected, "DAG should have detected missing ancestry"); - - let got = dag.get_spend_faults(&spent_after2); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - spent_after2, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error to descendants" - ); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - utxo_addr, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error all the way to descendant utxos" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_orphans() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing1 = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spend_missing2 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spend_missing2 should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let utxo_after2 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after2); - - // create dag with two missing spends in the chain - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing1 != s.address() && spend_missing2 != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_orphans")?; - - // make sure the spends after the two missing spends are orphans - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([ - SpendFault::OrphanSpend { - addr: spent_after1, - src: dag.source(), - }, - SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing2, - }, - ]); - assert_eq!(got, expected, "DAG should have detected orphan spend"); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = SpendFault::OrphanSpend { - addr: utxo_addr, - src: dag.source(), - }; - assert!( - got.contains(&expected), - "Utxo of orphan spend should also be an orphan" - ); - Ok(()) -} diff --git a/sn_client/src/audit/tests/setup.rs b/sn_client/src/audit/tests/setup.rs deleted file mode 100644 index 4fa777ff22..0000000000 --- a/sn_client/src/audit/tests/setup.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::collections::{BTreeMap, BTreeSet}; - -use bls::SecretKey; -use eyre::{eyre, Result}; -use sn_transfers::{ - get_genesis_sk, CashNote, DerivationIndex, MainPubkey, MainSecretKey, NanoTokens, SignedSpend, - SignedTransaction, SpendAddress, SpendReason, GENESIS_CASHNOTE, -}; - -pub struct MockWallet { - pub sk: MainSecretKey, - pub cn: Vec, -} - -pub struct MockNetwork { - pub genesis_spend: SpendAddress, - pub spends: BTreeSet, - pub wallets: BTreeMap, -} - -impl MockNetwork { - pub fn genesis() -> Result { - let mut net = MockNetwork { - genesis_spend: SpendAddress::from_unique_pubkey(&GENESIS_CASHNOTE.unique_pubkey()), - spends: BTreeSet::new(), - wallets: BTreeMap::new(), - }; - - // create genesis wallet - let genesis_cn = GENESIS_CASHNOTE.clone(); - let genesis_pk = *GENESIS_CASHNOTE.main_pubkey(); - net.wallets.insert( - genesis_pk, - MockWallet { - sk: get_genesis_sk(), - cn: vec![genesis_cn], - }, - ); - - // spend genesis - let everything = GENESIS_CASHNOTE.value().as_nano(); - let spent_addrs = net - .send(&genesis_pk, &genesis_pk, everything) - .map_err(|e| eyre!("failed to send genesis: {e}"))?; - net.genesis_spend = match spent_addrs.as_slice() { - [one] => *one, - _ => { - return Err(eyre!( - "Expected Genesis spend to be unique but got {spent_addrs:?}" - )) - } - }; - - Ok(net) - } - - pub fn new_pk_with_balance(&mut self, balance: u64) -> Result { - let owner = MainSecretKey::new(SecretKey::random()); - let owner_pk = owner.main_pubkey(); - self.wallets.insert( - owner_pk, - MockWallet { - sk: owner, - cn: Vec::new(), - }, - ); - - if balance > 0 { - let genesis_pk = GENESIS_CASHNOTE.main_pubkey(); - println!("Sending {balance} from genesis {genesis_pk:?} to {owner_pk:?}"); - self.send(genesis_pk, &owner_pk, balance) - .map_err(|e| eyre!("failed to get money from genesis: {e}"))?; - } - Ok(owner_pk) - } - - pub fn send( - &mut self, - from: &MainPubkey, - to: &MainPubkey, - amount: u64, - ) -> Result> { - let mut rng = rand::thread_rng(); - let from_wallet = self - .wallets - .get(from) - .ok_or_else(|| eyre!("from wallet not found: {from:?}"))?; - let to_wallet = self - .wallets - .get(to) - .ok_or_else(|| eyre!("to wallet not found: {to:?}"))?; - - // perform offline transfer - let derivation_index = DerivationIndex::random(&mut rng); - let recipient = vec![( - NanoTokens::from(amount), - to_wallet.sk.main_pubkey(), - derivation_index, - false, - )]; - let tx = SignedTransaction::new( - from_wallet.cn.clone(), - recipient, - from_wallet.sk.main_pubkey(), - SpendReason::default(), - &from_wallet.sk, - ) - .map_err(|e| eyre!("failed to create transfer: {}", e))?; - let spends = tx.spends; - - // update wallets - let mut updated_from_wallet_cns = from_wallet.cn.clone(); - updated_from_wallet_cns.retain(|cn| { - !spends - .iter() - .any(|s| s.unique_pubkey() == &cn.unique_pubkey()) - }); - if let Some(ref change_cn) = tx.change_cashnote { - if !updated_from_wallet_cns - .iter() - .any(|cn| cn.unique_pubkey() == change_cn.unique_pubkey()) - { - updated_from_wallet_cns.extend(tx.change_cashnote); - } - } - - self.wallets - .entry(*from) - .and_modify(|w| w.cn = updated_from_wallet_cns); - self.wallets - .entry(*to) - .and_modify(|w| w.cn.extend(tx.output_cashnotes)); - - // update network spends - let spent_addrs = spends.iter().map(|s| s.address()).collect(); - self.spends.extend(spends); - Ok(spent_addrs) - } -} diff --git a/sn_client/src/chunks.rs b/sn_client/src/chunks.rs deleted file mode 100644 index 7dbcaef92b..0000000000 --- a/sn_client/src/chunks.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod error; -mod pac_man; - -pub(crate) use self::error::{Error, Result}; -pub(crate) use pac_man::{encrypt_large, DataMapLevel}; diff --git a/sn_client/src/chunks/error.rs b/sn_client/src/chunks/error.rs deleted file mode 100644 index 6f9c83474e..0000000000 --- a/sn_client/src/chunks/error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_protocol::PrettyPrintRecordKey; -use std::io; -use thiserror::Error; -use xor_name::XorName; - -pub(crate) type Result = std::result::Result; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Failed to get find payment for record: {0:?}")] - NoPaymentForRecord(PrettyPrintRecordKey<'static>), - - #[error("Failed to get chunk permit")] - CouldNotGetChunkPermit, - - #[error(transparent)] - SelfEncryption(#[from] self_encryption::Error), - - #[error(transparent)] - Io(#[from] io::Error), - - #[error(transparent)] - Serialisation(#[from] rmp_serde::encode::Error), - - #[error(transparent)] - Deserialisation(#[from] rmp_serde::decode::Error), - - #[error("Cannot store empty file.")] - EmptyFileProvided, - - #[error("File is too small to be encrypted, it is less than {MIN_ENCRYPTABLE_BYTES} bytes")] - FileTooSmall, - - #[error( - "The provided bytes ({size}) is too large to store as a `SmallFile` which maximum can be \ - {maximum}. Store as a LargeFile instead." - )] - TooLargeAsSmallFile { - /// Number of bytes - size: usize, - /// Maximum number of bytes for a `SmallFile` - maximum: usize, - }, - - #[error("Not all chunks were retrieved, expected {expected}, retrieved {retrieved}, missing {missing_chunks:?}.")] - NotEnoughChunksRetrieved { - /// Number of Chunks expected to be retrieved - expected: usize, - /// Number of Chunks retrieved - retrieved: usize, - /// Missing chunks - missing_chunks: Vec, - }, - - #[error("Chunk could not be retrieved from the network: {0:?}")] - ChunkMissing(XorName), - - #[error("Not all data was chunked, expected {expected}, but we have {chunked}.)")] - NotAllDataWasChunked { - /// Number of Chunks expected to be generated - expected: usize, - /// Number of Chunks generated - chunked: usize, - }, -} diff --git a/sn_client/src/chunks/pac_man.rs b/sn_client/src/chunks/pac_man.rs deleted file mode 100644 index 3cd368e320..0000000000 --- a/sn_client/src/chunks/pac_man.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::Result; -use bytes::{BufMut, Bytes, BytesMut}; -use rayon::prelude::*; -use self_encryption::{DataMap, StreamSelfEncryptor, MAX_CHUNK_SIZE}; -use serde::{Deserialize, Serialize}; -use sn_protocol::storage::Chunk; -use std::{ - fs::File, - io::Write, - path::{Path, PathBuf}, -}; -use xor_name::XorName; - -#[derive(Serialize, Deserialize)] -pub(crate) enum DataMapLevel { - // Holds the data map to the source data. - First(DataMap), - // Holds the data map of an _additional_ level of chunks - // resulting from chunking up a previous level data map. - // This happens when that previous level data map was too big to fit in a chunk itself. - Additional(DataMap), -} - -#[expect(unused)] -pub(crate) fn encrypt_from_path(path: &Path, output_dir: &Path) -> Result<(Chunk, Vec)> { - let (data_map, mut encrypted_chunks) = self_encryption::encrypt_from_file(path, output_dir)?; - - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - - for chunk in additional_chunks.iter() { - encrypted_chunks.push(*chunk.name()); - let file_path = output_dir.join(hex::encode(chunk.name())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn encrypt_large( - file_path: &Path, - output_dir: &Path, -) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - let mut encryptor = StreamSelfEncryptor::encrypt_from_file( - file_path.to_path_buf(), - Some(output_dir.to_path_buf()), - )?; - - let data_map; - loop { - match encryptor.next_encryption()? { - (None, Some(m)) => { - // Returning a data_map means file encryption is completed. - data_map = m; - break; - } - _ => continue, - } - } - let mut encrypted_chunks: Vec<_> = data_map - .infos() - .iter() - .map(|chunk_info| { - let chunk_file_path = output_dir.join(hex::encode(chunk_info.dst_hash)); - (chunk_info.dst_hash, chunk_file_path) - }) - .collect(); - - // Pack the datamap into chunks that under the same output folder as well. - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - for chunk in additional_chunks.iter() { - let file_path = output_dir.join(hex::encode(chunk.name())); - encrypted_chunks.push((*chunk.name(), file_path.to_path_buf())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn to_chunk(chunk_content: Bytes) -> Chunk { - Chunk::new(chunk_content) -} - -// Produces a chunk out of the first `DataMap`, which is validated for its size. -// If the chunk is too big, it is self-encrypted and the resulting (additional level) `DataMap` is put into a chunk. -// The above step is repeated as many times as required until the chunk size is valid. -// In other words: If the chunk content is too big, it will be -// self encrypted into additional chunks, and now we have a new `DataMap` -// which points to all of those additional chunks.. and so on. -fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec)> { - let mut chunks = vec![]; - let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?; - debug!("Max chunk size: {} bytes", *MAX_CHUNK_SIZE); - - let (data_map_chunk, additional_chunks) = loop { - let chunk = to_chunk(chunk_content); - // If datamap chunk is less than or equal to MAX_CHUNK_SIZE return it so it can be directly sent to the network. - if chunk.serialised_size() <= *MAX_CHUNK_SIZE { - chunks.reverse(); - // Returns the last datamap, and all the chunks produced. - break (chunk, chunks); - } else { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - chunk.serialize(&mut serialiser)?; - let serialized_chunk = bytes.into_inner().freeze(); - - let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk)?; - chunks = next_encrypted_chunks - .par_iter() - .map(|c| to_chunk(c.content.clone())) // no need to encrypt what is self-encrypted - .chain(chunks) - .collect(); - chunk_content = wrap_data_map(&DataMapLevel::Additional(data_map))?; - } - }; - - Ok((data_map_chunk, additional_chunks)) -} - -fn wrap_data_map(data_map: &DataMapLevel) -> Result { - // we use an initial/starting size of 300 bytes as that's roughly the current size of a DataMapLevel instance. - let mut bytes = BytesMut::with_capacity(300).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - data_map.serialize(&mut serialiser)?; - Ok(bytes.into_inner().freeze()) -} diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs deleted file mode 100644 index d19ce4d58d..0000000000 --- a/sn_client/src/error.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) type Result = std::result::Result; - -use crate::UploadSummary; - -use super::ClientEvent; -use sn_protocol::NetworkAddress; -use sn_registers::{Entry, EntryHash}; -use sn_transfers::SpendAddress; -use std::collections::BTreeSet; -use thiserror::Error; -use tokio::time::Duration; -use xor_name::XorName; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Genesis disbursement failed")] - GenesisDisbursement, - #[error("Faucet disbursement has already occured")] - FaucetDisbursement, - - #[error("Genesis error {0}")] - GenesisError(#[from] sn_transfers::GenesisError), - - #[error("Wallet Error {0}.")] - Wallet(#[from] sn_transfers::WalletError), - - #[error("Transfer Error {0}.")] - Transfer(#[from] sn_transfers::TransferError), - - #[error("Network Error {0}.")] - Network(#[from] sn_networking::NetworkError), - - #[error("Protocol error {0}.")] - Protocol(#[from] sn_protocol::error::Error), - - #[error("Register error {0}.")] - Register(#[from] sn_registers::Error), - - #[error("Chunks error {0}.")] - Chunks(#[from] super::chunks::Error), - - #[error("No cashnote found at {0:?}.")] - NoCashNoteFound(SpendAddress), - - #[error("Decrypting a Folder's item failed: {0}")] - FolderEntryDecryption(EntryHash), - - #[error("SelfEncryption Error {0}.")] - SelfEncryptionIO(#[from] self_encryption::Error), - - #[error("System IO Error {0}.")] - SystemIO(#[from] std::io::Error), - - #[error("Events receiver error {0}.")] - EventsReceiver(#[from] tokio::sync::broadcast::error::RecvError), - - #[error("Events sender error {0}.")] - EventsSender(#[from] tokio::sync::broadcast::error::SendError), - - #[error(transparent)] - JoinError(#[from] tokio::task::JoinError), - - #[error("Invalid DAG")] - InvalidDag, - #[error("Serialization error: {0:?}")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Deserialization error: {0:?}")] - Deserialization(#[from] rmp_serde::decode::Error), - - #[error( - "Content branches detected in the Register which need to be merged/resolved by user. \ - Entries hashes of branches are: {0:?}" - )] - ContentBranchDetected(BTreeSet<(EntryHash, Entry)>), - - #[error("The provided amount contains zero nanos")] - AmountIsZero, - - #[error("The payee for the address {0:?} was not found.")] - PayeeNotFound(NetworkAddress), - - /// CashNote add would overflow - #[error("Total price exceed possible token amount")] - TotalPriceTooHigh, - - #[error("Could not connect to the network in {0:?}")] - ConnectionTimeout(Duration), - - #[error("Could not send files event")] - CouldNotSendFilesEvent, - - #[error("Incorrect Download Option")] - IncorrectDownloadOption, - - #[error("The provided data map is empty")] - EmptyDataMap, - - #[error("Error occurred while assembling the downloaded chunks")] - FailedToAssembleDownloadedChunks, - - #[error("Task completion notification channel is done")] - FailedToReadFromNotificationChannel, - - #[error("Could not find register after batch sync: {0:?}")] - RegisterNotFoundAfterUpload(XorName), - - #[error("Could not connect due to incompatible network protocols. Our protocol: {0} Network protocol: {1}")] - UnsupportedProtocol(String, String), - - // ------ Upload Errors -------- - #[error("Overflow occurred while adding values")] - NumericOverflow, - - #[error("Uploadable item not found: {0:?}")] - UploadableItemNotFound(XorName), - - #[error("Invalid upload item found")] - InvalidUploadItemFound, - - #[error("The state tracked by the uploader is empty")] - UploadStateTrackerIsEmpty, - - #[error("Internal task channel dropped")] - InternalTaskChannelDropped, - - #[error("Multiple consecutive network errors reported during upload")] - SequentialNetworkErrors, - - #[error("Too many sequential payment errors reported during upload")] - SequentialUploadPaymentError, - - #[error("The maximum specified repayments has been reached for a single item: {0:?}")] - MaximumRepaymentsReached(XorName), - - #[error("The upload failed with maximum repayments reached for multiple items: {items:?} Summary: {summary:?}")] - UploadFailedWithMaximumRepaymentsReached { - items: Vec, - summary: UploadSummary, - }, - - #[error("Error occurred when access wallet file")] - FailedToAccessWallet, - - #[error("Error parsing entropy for mnemonic phrase")] - FailedToParseEntropy, - - #[error("Error parsing mnemonic phrase")] - FailedToParseMnemonic, - - #[error("Invalid mnemonic seed phrase")] - InvalidMnemonicSeedPhrase, - - #[error("SecretKey could not be created from the provided bytes")] - InvalidKeyBytes, -} diff --git a/sn_client/src/event.rs b/sn_client/src/event.rs deleted file mode 100644 index 14ba654d0f..0000000000 --- a/sn_client/src/event.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::Serialize; -use tokio::sync::broadcast::{self, error::RecvError}; - -// Channel where events will be broadcasted by the client. -#[derive(Clone, Debug)] -pub struct ClientEventsBroadcaster(broadcast::Sender); - -impl Default for ClientEventsBroadcaster { - fn default() -> Self { - Self(broadcast::channel(100).0) - } -} - -impl ClientEventsBroadcaster { - /// Returns a new receiver to listen to the channel. - /// Multiple receivers can be actively listening. - pub fn subscribe(&self) -> ClientEventsReceiver { - ClientEventsReceiver(self.0.subscribe()) - } - - // Broadcast a new event, meant to be a helper only used by the client's internals. - pub(crate) fn broadcast(&self, event: ClientEvent) { - if let Err(err) = self.0.send(event) { - if self.0.receiver_count() == 0 { - return; - } - trace!("Could not broadcast ClientEvent, though we do have listeners: {err:?}"); - } - } -} - -/// Type of events broadcasted by the client to the public API. -#[derive(Clone, custom_debug::Debug, Serialize)] -pub enum ClientEvent { - /// A peer has been added to the Routing table. - /// Also contains the max number of peers to connect to before we receive ClientEvent::ConnectedToNetwork - PeerAdded { max_peers_to_connect: usize }, - /// We've encountered a Peer with an unsupported protocol. - PeerWithUnsupportedProtocol { - our_protocol: String, - their_protocol: String, - }, - /// The client has been connected to the network - ConnectedToNetwork, - /// No network activity has been received for a given duration - /// we should error out - InactiveClient(tokio::time::Duration), -} - -/// Receiver Channel where users of the public API can listen to events broadcasted by the client. -#[derive(Debug)] -pub struct ClientEventsReceiver(pub(super) broadcast::Receiver); - -impl ClientEventsReceiver { - /// Receive a new event, meant to be used by the user of the public API. - pub async fn recv(&mut self) -> std::result::Result { - self.0.recv().await - } -} diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs deleted file mode 100644 index b3ccaace78..0000000000 --- a/sn_client/src/faucet.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::send, Client, Error, Result}; -use sn_transfers::{load_genesis_wallet, HotWallet, NanoTokens, FOUNDATION_PK}; - -const INITIAL_FAUCET_BALANCE: NanoTokens = NanoTokens::from(900000000000000000); - -/// Use the client to load the faucet wallet from the genesis Wallet. -/// With all balance transferred from the genesis_wallet to the faucet_wallet. -pub async fn fund_faucet_from_genesis_wallet( - client: &Client, - faucet_wallet: &mut HotWallet, -) -> Result<()> { - faucet_wallet.try_load_cash_notes()?; - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - println!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - debug!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - - return Ok(()); - } - - info!("funding faucet from genesis..."); - - // Confirm Genesis not used yet - if client.is_genesis_spend_present().await { - warn!("Faucet can't get funded from genesis, genesis is already spent!"); - println!("Faucet can't get funded from genesis, genesis is already spent!"); - // Try loading cash notes up to 100 times, waiting 1 second between attempts - for attempt in 1..=100 { - println!("Attempt {attempt} to load cash notes"); - debug!("Attempt {attempt} to load cash notes"); - faucet_wallet.try_load_cash_notes()?; - if !faucet_wallet.balance().is_zero() { - println!("Successfully loaded cash notes on attempt {attempt}"); - debug!("Successfully loaded cash notes on attempt {attempt}"); - return Ok(()); - } - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - - // If we've tried 100 times and still have zero balance, return an error - return Err(Error::FaucetDisbursement); - } - - println!("Initiating genesis..."); - debug!("Initiating genesis..."); - let genesis_wallet = load_genesis_wallet()?; - let genesis_balance = genesis_wallet.balance(); - - let (foundation_cashnote, faucet_cashnote) = { - println!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - debug!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - let faucet_cashnote = send( - genesis_wallet, - INITIAL_FAUCET_BALANCE, - faucet_wallet.address(), - client, - true, - ) - .await?; - - faucet_wallet - .deposit_and_store_to_disk(&vec![faucet_cashnote.clone()]) - .expect("Faucet wallet shall be stored successfully."); - - // now send the money to the foundation - let foundation_balance = genesis_balance - .checked_sub(INITIAL_FAUCET_BALANCE) - .ok_or(Error::GenesisDisbursement)?; - - println!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - debug!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - - let genesis_wallet = load_genesis_wallet()?; - - let foundation_cashnote = send( - genesis_wallet, - foundation_balance, - *FOUNDATION_PK, - client, - true, - ) - .await?; - - (foundation_cashnote, faucet_cashnote) - }; - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - - println!("Verifying the transfer from genesis..."); - debug!("Verifying the transfer from genesis..."); - if let Err(error) = client.verify_cashnote(&foundation_cashnote).await { - error!("Could not verify the transfer from genesis to foundation: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to foundation: {error}"); - } else { - println!( - "Successfully verified the transfer from genesis to foundation on the second try." - ); - - #[cfg(not(target_arch = "wasm32"))] - { - // write the foundation cashnote to disk - let root_dir = faucet_wallet.api().wallet_dir(); - - let foundation_transfer_path = root_dir.join("foundation_disbursement.transfer"); - - debug!("Writing cash note to: {foundation_transfer_path:?}"); - - let transfer = - sn_transfers::Transfer::transfer_from_cash_note(&foundation_cashnote)?.to_hex()?; - - if let Err(error) = std::fs::write(foundation_transfer_path, transfer) { - error!("Could not write the foundation transfer to disk: {error}."); - return Err(Error::from(error)); - } - } - - info!("Successfully verified the transfer from genesis to foundation on the second try."); - } - - if let Err(error) = client.verify_cashnote(&faucet_cashnote).await { - error!("Could not verify the transfer from genesis to faucet: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to faucet: {error}"); - } else { - println!("Successfully verified the transfer from genesis to faucet on the second try."); - info!("Successfully verified the transfer from genesis to faucet on the second try."); - } - - Ok(()) -} diff --git a/sn_client/src/files.rs b/sn_client/src/files.rs deleted file mode 100644 index 8643b71961..0000000000 --- a/sn_client/src/files.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) mod download; - -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, chunks::Error as ChunksError, - error::Result, wallet::StoragePaymentResult, Client, Error, WalletClient, -}; -use bytes::Bytes; -use self_encryption::{self, MIN_ENCRYPTABLE_BYTES}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; - -use std::{ - fs::{self, create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tempfile::tempdir; -use tracing::trace; -use xor_name::XorName; - -/// `BATCH_SIZE` determines the number of chunks that are processed in parallel during the payment and upload process. -pub const BATCH_SIZE: usize = 16; - -/// File APIs. -#[derive(Clone)] -pub struct FilesApi { - pub(crate) client: Client, - pub(crate) wallet_dir: PathBuf, -} - -/// This is the (file xorname, datamap_data, filesize, and chunks) -/// If the DataMapChunk exists and is not stored on the network, then it will not be accessible at this address of ChunkAddress(XorName) . -type ChunkFileResult = Result<(ChunkAddress, Chunk, u64, Vec<(XorName, PathBuf)>)>; - -impl FilesApi { - /// Create file apis instance. - pub fn new(client: Client, wallet_dir: PathBuf) -> Self { - Self { client, wallet_dir } - } - pub fn build(client: Client, wallet_dir: PathBuf) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&wallet_dir, None)?; - - if wallet.balance().is_zero() { - Err(Error::AmountIsZero) - } else { - Ok(FilesApi::new(client, wallet_dir)) - } - } - - /// Return the client instance - pub fn client(&self) -> &Client { - &self.client - } - - /// Create a new WalletClient for a given root directory. - pub fn wallet(&self) -> Result { - let path = self.wallet_dir.as_path(); - - let wallet = load_account_wallet_or_create_with_mnemonic(path, None)?; - - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Tries to chunk the file, returning `(head_address, data_map_chunk, file_size, chunk_names)` - /// and writes encrypted chunks to disk. - pub fn chunk_file( - file_path: &Path, - chunk_dir: &Path, - include_data_map_in_chunks: bool, - ) -> ChunkFileResult { - let file = File::open(file_path)?; - let metadata = file.metadata()?; - let file_size = metadata.len(); - - let (head_address, data_map_chunk, mut chunks_paths) = - if file_size < MIN_ENCRYPTABLE_BYTES as u64 { - Err(ChunksError::FileTooSmall)? - } else { - let (data_map_chunk, chunks) = encrypt_large(file_path, chunk_dir)?; - (*data_map_chunk.name(), data_map_chunk, chunks) - }; - - debug!("include_data_map_in_chunks {include_data_map_in_chunks:?}"); - - if include_data_map_in_chunks { - info!("Data_map_chunk to be written!"); - let data_map_path = chunk_dir.join(hex::encode(*data_map_chunk.name())); - - trace!("Data_map_chunk being written to {data_map_path:?}"); - let mut output_file = File::create(data_map_path.clone())?; - output_file.write_all(&data_map_chunk.value)?; - - chunks_paths.push((*data_map_chunk.name(), data_map_path)) - } - - Ok(( - ChunkAddress::new(head_address), - data_map_chunk, - file_size, - chunks_paths, - )) - } - - /// Directly writes Chunks to the network in the - /// form of immutable self encrypted chunks. - /// - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - pub async fn get_local_payment_and_upload_chunk( - &self, - chunk: Chunk, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - let chunk_addr = chunk.network_address(); - trace!("Client upload started for chunk: {chunk_addr:?}"); - - let wallet_client = self.wallet()?; - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&chunk_addr)?; - - debug!("Payments for chunk: {chunk_addr:?} to {payee:?}: {payment:?}"); - - self.client - .store_chunk(chunk, payee, payment, verify_store, retry_strategy) - .await?; - - wallet_client.remove_payment_for_addr(&chunk_addr)?; - - trace!("Client upload completed for chunk: {chunk_addr:?}"); - Ok(()) - } - - /// Pay for a given set of chunks. - /// - /// Returns the cost and the resulting new balance of the local wallet. - pub async fn pay_for_chunks(&self, chunks: Vec) -> Result { - let mut wallet_client = self.wallet()?; - info!("Paying for and uploading {:?} chunks", chunks.len()); - - let res = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|name| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - wallet_client.store_local_wallet()?; - Ok(res) - } - - // -------------------------------------------- - // ---------- Private helpers ----------------- - // -------------------------------------------- - - /// Used for testing - pub async fn upload_test_bytes(&self, bytes: Bytes, verify: bool) -> Result { - let temp_dir = tempdir()?; - let file_path = temp_dir.path().join("tempfile"); - let mut file = File::create(&file_path)?; - file.write_all(&bytes)?; - - let chunk_path = temp_dir.path().join("chunk_path"); - create_dir_all(chunk_path.clone())?; - - let (head_address, _data_map, _file_size, chunks_paths) = - Self::chunk_file(&file_path, &chunk_path, true)?; - - for (_chunk_name, chunk_path) in chunks_paths { - let chunk = Chunk::new(Bytes::from(fs::read(chunk_path)?)); - self.get_local_payment_and_upload_chunk(chunk, verify, None) - .await?; - } - - Ok(NetworkAddress::ChunkAddress(head_address)) - } -} - -/// Encrypts a [`LargeFile`] and returns the resulting address and all chunk names. -/// Correspondent encrypted chunks are written in the specified output folder. -/// Does not store anything to the network. -/// -/// Returns data map as a chunk, and the resulting chunks -fn encrypt_large(file_path: &Path, output_dir: &Path) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - Ok(crate::chunks::encrypt_large(file_path, output_dir)?) -} diff --git a/sn_client/src/files/download.rs b/sn_client/src/files/download.rs deleted file mode 100644 index 4444fab023..0000000000 --- a/sn_client/src/files/download.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - chunks::{DataMapLevel, Error as ChunksError}, - error::{Error as ClientError, Result}, - Client, FilesApi, BATCH_SIZE, -}; -use bytes::Bytes; -use futures::StreamExt; -use itertools::Itertools; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk, StreamSelfDecryptor}; -use sn_networking::target_arch::Instant; -use sn_protocol::storage::{Chunk, ChunkAddress, RetryStrategy}; - -use std::{collections::HashMap, fs, path::PathBuf}; -use tokio::sync::mpsc::{self}; -use xor_name::XorName; - -/// The events emitted from the download process. -pub enum FilesDownloadEvent { - /// Downloaded a Chunk from the network - Downloaded(ChunkAddress), - /// The total number of chunks we are about to download. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - ChunksCount(usize), - /// The total number of data map chunks that we are about to download. This happens if the datamap file is. - /// very large. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - DatamapCount(usize), - /// The download process has terminated with an error. - Error, -} - -// Internally used to differentiate between the various ways that the downloaded chunks are returned. -enum DownloadReturnType { - EncryptedChunks(Vec), - DecryptedBytes(Bytes), - WrittenToFileSystem, -} - -/// `FilesDownload` provides functionality for downloading chunks with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -pub struct FilesDownload { - // Configurations - batch_size: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - // API - api: FilesApi, - // Events - event_sender: Option>, - logged_event_sender_absence: bool, -} - -impl FilesDownload { - /// Creates a new instance of `FilesDownload` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - pub fn new(files_api: FilesApi) -> Self { - Self { - batch_size: BATCH_SIZE, - show_holders: false, - retry_strategy: RetryStrategy::Quick, - api: files_api, - event_sender: None, - logged_event_sender_absence: false, - } - } - - /// Sets the default batch size that determines the number of chunks that are downloaded in parallel - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 64`. - pub fn set_batch_size(mut self, batch_size: usize) -> Self { - self.batch_size = batch_size; - self - } - - /// Sets the option to display the holders that are expected to be holding a chunk during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(mut self, show_holders: bool) -> Self { - self.show_holders = show_holders; - self - } - - /// Sets the RetryStrategy to increase the re-try on failure attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(mut self, retry_strategy: RetryStrategy) -> Self { - self.retry_strategy = retry_strategy; - self - } - - /// Returns a receiver for file download events. - /// This method is optional and the download process can be performed without it. - pub fn get_events(&mut self) -> mpsc::Receiver { - let (event_sender, event_receiver) = mpsc::channel(10); - // should we return error if an sender is already set? - self.event_sender = Some(event_sender); - - event_receiver - } - - /// Download bytes from the network. The contents are spread across - /// multiple chunks in the network. This function invokes the self-encryptor and returns - /// the data that was initially stored. - /// - /// Takes `position` and `length` arguments which specify the start position - /// and the length of bytes to be read. - /// Passing `0` to position reads the data from the beginning, - /// and the `length` is just an upper limit. - pub async fn download_from( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self.download_from_inner(address, position, length).await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - pub async fn download_from_inner( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - debug!("Reading {length} bytes at: {address:?}, starting from position: {position}"); - let chunk = self - .api - .client - .get_chunk(address, false, Some(self.retry_strategy)) - .await?; - - // First try to deserialize a LargeFile, if it works, we go and seek it. - // If an error occurs, we consider it to be a SmallFile. - if let Ok(data_map) = self.unpack_chunk(chunk.clone()).await { - let info = self_encryption::seek_info(data_map.file_size(), position, length); - let range = &info.index_range; - let all_infos = data_map.infos(); - - let to_download = (range.start..range.end + 1) - .clone() - .map(|i| all_infos[i].clone()) - .collect_vec(); - let to_download = DataMap::new(to_download); - - // not written to file and return the encrypted chunks - if let DownloadReturnType::EncryptedChunks(encrypted_chunks) = - self.read(to_download, None, true, false).await? - { - let bytes = self_encryption::decrypt_range( - &data_map, - &encrypted_chunks, - info.relative_pos, - length, - ) - .map_err(ChunksError::SelfEncryption)?; - return Ok(bytes); - } else { - error!("IncorrectDownloadOption: expected to get the encrypted chunks back"); - return Err(ClientError::IncorrectDownloadOption); - } - } - - // The error above is ignored to avoid leaking the storage format detail of SmallFiles and LargeFiles. - // The basic idea is that we're trying to deserialize as one, and then the other. - // The cost of it is that some errors will not be seen without a refactor. - let mut bytes = chunk.value().clone(); - - let _ = bytes.split_to(position); - bytes.truncate(length); - - Ok(bytes) - } - - /// Download a file from the network and get the decrypted bytes. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - ) -> Result { - if let Some(bytes) = self - .download_entire_file(address, data_map_chunk, None) - .await? - { - Ok(bytes) - } else { - error!("IncorrectDownloadOption: expected to get decrypted bytes, but we got None"); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network and write it to the provided path. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file_to_path( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - path: PathBuf, - ) -> Result<()> { - if self - .download_entire_file(address, data_map_chunk, Some(path)) - .await? - .is_none() - { - Ok(()) - } else { - error!( - "IncorrectDownloadOption: expected to not get any decrypted bytes, but got Some" - ); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network. - /// If you want to track the download progress, use the `get_events` method. - async fn download_entire_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self - .download_entire_file_inner(address, data_map_chunk, downloaded_file_path) - .await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - async fn download_entire_file_inner( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - let head_chunk = if let Some(chunk) = data_map_chunk { - info!("Downloading via supplied local datamap"); - chunk - } else { - match self - .api - .client - .get_chunk(address, self.show_holders, Some(self.retry_strategy)) - .await - { - Ok(chunk) => chunk, - Err(err) => { - error!("Failed to fetch head chunk {address:?}"); - return Err(err); - } - } - }; - - // first try to deserialize a LargeFile, if it works, we go and seek it - match self.unpack_chunk(head_chunk.clone()).await { - Ok(data_map) => { - // read_all emits - match self - .read(data_map, downloaded_file_path, false, false) - .await? - { - DownloadReturnType::EncryptedChunks(_) => { - error!("IncorrectDownloadOption: we should not be getting the encrypted chunks back as it is set to false."); - Err(ClientError::IncorrectDownloadOption) - } - DownloadReturnType::DecryptedBytes(bytes) => Ok(Some(bytes)), - DownloadReturnType::WrittenToFileSystem => Ok(None), - } - } - Err(ClientError::Chunks(ChunksError::Deserialisation(_))) => { - // Only in case of a deserialisation error, - // shall consider the head chunk to be a SmallFile. - // With the min-size now set to 3 Bytes, such case shall be rare. - // Hence raise a warning for it. - warn!("Consider head chunk {address:?} as an SmallFile"); - println!("Consider head chunk {address:?} as an SmallFile"); - - self.send_event(FilesDownloadEvent::ChunksCount(1)).await?; - self.send_event(FilesDownloadEvent::Downloaded(address)) - .await?; - if let Some(path) = downloaded_file_path { - fs::write(path, head_chunk.value().clone())?; - Ok(None) - } else { - Ok(Some(head_chunk.value().clone())) - } - } - Err(err) => { - // For large data_map that consists of multiple chunks, - // `unpack_chunk` function will try to fetch those chunks from network. - // During the process, any chunk could be failed to download, - // hence trigger an error to be raised. - error!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - println!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - Err(err) - } - } - } - - /// The internal logic to download the provided chunks inside the datamap. - /// If the decrypted_file_path is provided, we return DownloadReturnType::WrittenToFileSystem - /// If return_encrypted_chunks is true, we return DownloadReturnType::EncryptedChunks - /// Else we return DownloadReturnType::DecryptedBytes - /// - /// Set we_are_downloading_a_datamap if we want to emit the DatamapCount else we emit ChunksCount - async fn read( - &mut self, - data_map: DataMap, - decrypted_file_path: Option, - return_encrypted_chunks: bool, - we_are_downloading_a_datamap: bool, - ) -> Result { - // used internally - enum DownloadKind { - FileSystem(StreamSelfDecryptor), - Memory(Vec), - } - - let mut download_kind = { - if let Some(path) = decrypted_file_path { - DownloadKind::FileSystem(StreamSelfDecryptor::decrypt_to_file(path, &data_map)?) - } else { - DownloadKind::Memory(Vec::new()) - } - }; - let chunk_infos = data_map.infos(); - let expected_count = chunk_infos.len(); - - if we_are_downloading_a_datamap { - self.send_event(FilesDownloadEvent::ChunksCount(expected_count)) - .await?; - } else { - // we're downloading the chunks related to a huge datamap - self.send_event(FilesDownloadEvent::DatamapCount(expected_count)) - .await?; - } - - let now = Instant::now(); - - let client_clone = self.api.client.clone(); - let show_holders = self.show_holders; - let retry_strategy = self.retry_strategy; - // the initial index is not always 0 as we might seek a range of bytes. So fetch the first index - let mut current_index = chunk_infos - .first() - .ok_or_else(|| ClientError::EmptyDataMap)? - .index; - let mut stream = futures::stream::iter(chunk_infos.into_iter()) - .map(|chunk_info| { - Self::get_chunk( - client_clone.clone(), - chunk_info.dst_hash, - chunk_info.index, - show_holders, - retry_strategy, - ) - }) - .buffer_unordered(self.batch_size); - - let mut chunk_download_cache = HashMap::new(); - - while let Some(result) = stream.next().await { - let (chunk_address, index, encrypted_chunk) = result?; - // notify about the download - self.send_event(FilesDownloadEvent::Downloaded(chunk_address)) - .await?; - info!("Downloaded chunk of index {index:?}. We are at current_index {current_index:?}"); - - // check if current_index is present in the cache before comparing the fetched index. - // try to keep removing from the cache until we run out of sequential chunks to insert. - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - // now check if we can process the fetched index, else cache it. - if index == current_index { - debug!("The downloaded chunk's index {index:?} matches the current index {current_index}. Processing it"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } else { - // since we download the chunks concurrently without order, we cache the results for an index that - // finished earlier - debug!("The downloaded chunk's index {index:?} does not match with the current_index {current_index}. Inserting into cache"); - let _ = chunk_download_cache.insert(index, encrypted_chunk); - } - } - - // finally empty out the cache. - debug!("Finally emptying out the download cache"); - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - if !chunk_download_cache.is_empty() { - error!( - "The chunk download cache is not empty. Current index {current_index:?}. The indices inside the cache: {:?}", - chunk_download_cache.keys() - ); - return Err(ClientError::FailedToAssembleDownloadedChunks); - } - - let elapsed = now.elapsed(); - info!("Client downloaded file in {elapsed:?}"); - - match download_kind { - DownloadKind::FileSystem(_) => Ok(DownloadReturnType::WrittenToFileSystem), - DownloadKind::Memory(collector) => { - let result = if return_encrypted_chunks { - DownloadReturnType::EncryptedChunks(collector) - } else { - let bytes = decrypt_full_set(&data_map, &collector) - .map_err(ChunksError::SelfEncryption)?; - DownloadReturnType::DecryptedBytes(bytes) - }; - - Ok(result) - } - } - } - - /// Extracts a file DataMapLevel from a chunk. - /// If the DataMapLevel is not the first level mapping directly to the user's contents, - /// the process repeats itself until it obtains the first level DataMapLevel. - pub async fn unpack_chunk(&mut self, mut chunk: Chunk) -> Result { - loop { - match rmp_serde::from_slice(chunk.value()).map_err(ChunksError::Deserialisation)? { - DataMapLevel::First(data_map) => { - return Ok(data_map); - } - DataMapLevel::Additional(data_map) => { - if let DownloadReturnType::DecryptedBytes(serialized_chunk) = - self.read(data_map, None, false, true).await? - { - chunk = rmp_serde::from_slice(&serialized_chunk) - .map_err(ChunksError::Deserialisation)?; - } else { - error!("IncorrectDownloadOption: we should be getting the decrypted bytes back."); - return Err(ClientError::IncorrectDownloadOption); - } - } - } - } - } - - async fn send_event(&mut self, event: FilesDownloadEvent) -> Result<()> { - if let Some(sender) = self.event_sender.as_ref() { - sender.send(event).await.map_err(|err| { - error!("Could not send files download event due to {err:?}"); - ClientError::CouldNotSendFilesEvent - })?; - } else if !self.logged_event_sender_absence { - info!("Files download event sender is not set. Use get_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - Ok(()) - } - - async fn get_chunk( - client: Client, - address: XorName, - index: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - ) -> std::result::Result<(ChunkAddress, usize, EncryptedChunk), ChunksError> { - let chunk = client - .get_chunk( - ChunkAddress::new(address), - show_holders, - Some(retry_strategy), - ) - .await - .map_err(|err| { - error!("Chunk missing {address:?} with {err:?}",); - ChunksError::ChunkMissing(address) - })?; - let encrypted_chunk = EncryptedChunk { - index, - content: chunk.value, - }; - Ok((chunk.address, index, encrypted_chunk)) - } -} diff --git a/sn_client/src/folders.rs b/sn_client/src/folders.rs deleted file mode 100644 index e2c94ef929..0000000000 --- a/sn_client/src/folders.rs +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{error::Result, Client, ClientRegister, WalletClient}; -use crate::{acc_packet::load_account_wallet_or_create_with_mnemonic, Error, FilesApi, UploadCfg}; -use bls::{Ciphertext, PublicKey}; -use bytes::{BufMut, BytesMut}; -use self_encryption::MAX_CHUNK_SIZE; -use serde::{Deserialize, Serialize}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::{Entry, EntryHash}; - -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - path::{Path, PathBuf}, -}; -use xor_name::{XorName, XOR_NAME_LEN}; - -/// Folder Entry representing either a file or subfolder. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum FolderEntry { - File(Chunk), - Folder(RegisterAddress), -} - -/// Metadata to be stored on a Chunk, linked from and belonging to Registers' entries. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Metadata { - pub name: String, - pub content: FolderEntry, -} - -// This is the entry value used in Folders to mark a removed file/folder. -const REMOVED_ENTRY_MARK: XorName = XorName([0; XOR_NAME_LEN]); - -/// Folders APIs. -#[derive(Clone)] -pub struct FoldersApi { - client: Client, - wallet_dir: PathBuf, - register: ClientRegister, - files_api: FilesApi, - // Cache of metadata chunks. We keep the Chunk itself till we upload it to the network. - metadata: BTreeMap)>, -} - -impl FoldersApi { - /// Create FoldersApi instance. - pub fn new( - client: Client, - wallet_dir: &Path, - address: Option, - ) -> Result { - let register = if let Some(addr) = address { - ClientRegister::create_with_addr(client.clone(), addr) - } else { - let mut rng = rand::thread_rng(); - ClientRegister::create(client.clone(), XorName::random(&mut rng)) - }; - - Self::create(client, wallet_dir, register) - } - - /// Clones the register instance. Any change made to one instance will not be reflected on the other register. - pub fn register(&self) -> ClientRegister { - self.register.clone() - } - - /// Return the address of the Folder (Register address) on the network - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Return the address of the Folder (Register address) as a NetworkAddress - pub fn as_net_addr(&self) -> NetworkAddress { - NetworkAddress::RegisterAddress(*self.address()) - } - - /// Return the list of metadata chunks addresses that need to be payed for in order to be - /// able to then store all data on the network upon calling `sync` method. - pub fn meta_addrs_to_pay(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(meta_xorname, (_, chunk))| { - chunk - .as_ref() - .map(|_| NetworkAddress::ChunkAddress(ChunkAddress::new(*meta_xorname))) - }) - .collect() - } - - /// Return the list of metadata chunks. - pub fn meta_chunks(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(_, (_, chunk))| chunk.clone()) - .collect() - } - - /// Create a new WalletClient from the directory set. - pub fn wallet(&self) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Add provided file as entry of this Folder (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn add_file( - &mut self, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Add subfolder as entry of this Folder (locally). - /// The new folder's metadata chunk will be encrypted if a key has been provided. - pub fn add_folder( - &mut self, - folder_name: OsString, - address: RegisterAddress, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: folder_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::Folder(address), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Replace an existing file with the provided one (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn replace_file( - &mut self, - existing_entry: EntryHash, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry( - metadata, - &vec![existing_entry].into_iter().collect(), - encryption_pk, - ) - } - - /// Remove a file/folder item from this Folder (locally). - pub fn remove_item(&mut self, existing_entry: EntryHash) -> Result<()> { - let _ = self.register.write_atop( - &REMOVED_ENTRY_MARK, - &vec![existing_entry].into_iter().collect(), - )?; - Ok(()) - } - - /// Sync local Folder with the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg) -> Result<()> { - let mut wallet_client = self.wallet()?; - - // First upload any newly created metadata chunk - for (_, meta_chunk) in self.metadata.values_mut() { - if let Some(chunk) = meta_chunk.take() { - self.files_api - .get_local_payment_and_upload_chunk( - chunk.clone(), - upload_cfg.verify_store, - Some(upload_cfg.retry_strategy), - ) - .await?; - } - } - - let payment_info = wallet_client.get_recent_payment_for_addr(&self.as_net_addr())?; - - self.register - .sync( - &mut wallet_client, - upload_cfg.verify_store, - Some(payment_info), - ) - .await?; - - Ok(()) - } - - /// Download a copy of the Folder from the network. - pub async fn retrieve( - client: Client, - wallet_dir: &Path, - address: RegisterAddress, - ) -> Result { - let register = ClientRegister::retrieve(client.clone(), address).await?; - Self::create(client, wallet_dir, register) - } - - /// Returns true if there is a file/folder which matches the given entry hash - pub fn contains(&self, entry_hash: &EntryHash) -> bool { - self.register - .read() - .iter() - .any(|(hash, _)| hash == entry_hash) - } - - /// Find file/folder in this Folder by its name, returning metadata chunk xorname and metadata itself. - pub fn find_by_name(&self, name: &str) -> Option<(&XorName, &Metadata)> { - // let's get the list of metadata xornames of non-removed entries - let non_removed_items: BTreeSet = self - .register - .read() - .iter() - .map(|(_, meta_xorname_entry)| xorname_from_entry(meta_xorname_entry)) - .collect(); - - self.metadata - .iter() - .find_map(|(meta_xorname, (metadata, _))| { - if metadata.name == name && non_removed_items.contains(meta_xorname) { - Some((meta_xorname, metadata)) - } else { - None - } - }) - } - - /// Returns the list of entries of this Folder, including their entry hash, - /// metadata chunk xorname, and metadata itself. - pub async fn entries(&mut self) -> Result> { - let mut entries = BTreeMap::new(); - for (entry_hash, entry) in self.register.read() { - let meta_xorname = xorname_from_entry(&entry); - if meta_xorname == REMOVED_ENTRY_MARK { - continue; - } - - let metadata = match self.metadata.get(&meta_xorname) { - Some((metadata, _)) => metadata.clone(), - None => { - // retrieve metadata Chunk from network - let chunk = self - .client - .get_chunk(ChunkAddress::new(meta_xorname), false, None) - .await?; - - // let's first assume it's unencrypted - let metadata: Metadata = match rmp_serde::from_slice(chunk.value()) { - Ok(metadata) => metadata, - Err(err) => { - // let's try to decrypt it then - let cipher = Ciphertext::from_bytes(chunk.value()).map_err(|_| err)?; - let data = self - .client - .signer() - .decrypt(&cipher) - .ok_or(Error::FolderEntryDecryption(entry_hash))?; - - // if this fails, it's either the wrong key or unexpected data - rmp_serde::from_slice(&data) - .map_err(|_| Error::FolderEntryDecryption(entry_hash))? - } - }; - self.metadata.insert(meta_xorname, (metadata.clone(), None)); - metadata - } - }; - entries.insert(entry_hash, (meta_xorname, metadata)); - } - Ok(entries) - } - - // Private helpers - - // Create a new FoldersApi instance with given register. - fn create(client: Client, wallet_dir: &Path, register: ClientRegister) -> Result { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - register, - files_api, - metadata: BTreeMap::new(), - }) - } - - // Add the given entry to the underlying Register as well as creating the metadata Chunk. - // If an encryption key is given, the metadata chunk will be encrpyted with it. - fn add_entry( - &mut self, - metadata: Metadata, - children: &BTreeSet, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE); - let serialised_metadata = rmp_serde::to_vec(&metadata)?; - if let Some(pk) = encryption_pk { - bytes.put( - pk.encrypt(serialised_metadata.as_slice()) - .to_bytes() - .as_slice(), - ); - } else { - bytes.put(serialised_metadata.as_slice()); - } - let meta_chunk = Chunk::new(bytes.freeze()); - let meta_xorname = *meta_chunk.name(); - - self.metadata - .insert(meta_xorname, (metadata.clone(), Some(meta_chunk))); - let entry_hash = self.register.write_atop(&meta_xorname, children)?; - - Ok((entry_hash, meta_xorname, metadata)) - } -} - -// Helper to convert a Register/Folder entry into a XorName -fn xorname_from_entry(entry: &Entry) -> XorName { - let mut xorname = [0; XOR_NAME_LEN]; - xorname.copy_from_slice(entry); - XorName(xorname) -} diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs deleted file mode 100644 index 27594bfa4a..0000000000 --- a/sn_client/src/lib.rs +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -//! > **Core functionalities for interacting with the SAFE Network** -//! -//! The `sn_client` crate is a part of the [Safe Network](https://safenetwork.tech/) (SN), -//! and plays a crucial role in this ecosystem by serving as the client library that allows -//! applications and users to interact with the Safe Network, and build applications that -//! leverage the Safe Network's capabilities, providing a high-level API that simplifies the development process. -//! -//! Here are the key functionalities provided by this crate: -//! -//! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to -//! send and receive messages from the decentralized nodes that make up the network. -//! -//! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network. -//! This includes both private and public data, ensuring privacy and security. -//! -//! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and -//! managing access to data, ensuring that only authorized users can access sensitive information. -//! -//! 4. **File Management**: The crate supports operations related to file management, such as uploading, -//! downloading, and managing files and directories on the Safe Network. -//! -//! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be -//! used for various purposes within the network, including paying for storage and services. -//! -//! ## Quick links -//! - [Crates.io](https://crates.io/crates/sn_client) -//! - [Forum](https://forum.autonomi.community/) -//! - [Issues on GitHub](https://github.com/maidsafe/safe_network/issues) -//! - -#[macro_use] -extern crate tracing; - -pub mod acc_packet; -pub mod api; -mod audit; -mod chunks; -mod error; -mod event; -mod faucet; -mod files; -mod folders; -mod register; -mod uploader; -mod wallet; - -/// Test utils -#[cfg(feature = "test-utils")] -pub mod test_utils; - -// re-export used crates to make them available to app builders -// this ensures the version of the crates used by the app builders are the same as the ones used by the client -// so they don't run into issues with incompatible types due to different versions of the same crate -pub use sn_networking as networking; -pub use sn_protocol as protocol; -pub use sn_registers as registers; -pub use sn_transfers as transfers; - -const MAX_CONCURRENT_TASKS: usize = 4096; - -pub use self::{ - audit::{DagError, SpendDag, SpendDagGet, SpendFault}, - error::Error, - event::{ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}, - faucet::fund_faucet_from_genesis_wallet, - files::{ - download::{FilesDownload, FilesDownloadEvent}, - FilesApi, BATCH_SIZE, - }, - folders::{FolderEntry, FoldersApi, Metadata}, - register::ClientRegister, - uploader::{UploadCfg, UploadEvent, UploadSummary, Uploader}, - wallet::{send, StoragePaymentResult, WalletClient}, -}; -pub(crate) use error::Result; - -use sn_networking::Network; -use std::sync::Arc; - -#[cfg(target_arch = "wasm32")] -use console_error_panic_hook; -#[cfg(target_arch = "wasm32")] -use wasm_bindgen::prelude::*; -#[cfg(target_arch = "wasm32")] -use web_sys::console; - -// This is like the `main` function, except for JavaScript. -#[cfg(target_arch = "wasm32")] -#[wasm_bindgen(start)] -pub async fn main_js() -> std::result::Result<(), JsValue> { - // This provides better error messages in debug mode. - // It's disabled in release mode so it doesn't bloat up the file size. - // #[cfg(debug_assertions)] - console_error_panic_hook::set_once(); - - console::log_1(&JsValue::from_str("Hello safe world!")); - - // Tracing - // TODO: dont log _everything_ - // right now it logs all libp2p entirely. - tracing_wasm::set_as_global_default(); - - Ok(()) -} - -/// A quick client that only takes some peers to connect to -#[wasm_bindgen] -#[cfg(target_arch = "wasm32")] -pub async fn get_data(peer: &str, data_address: &str) -> std::result::Result<(), JsError> { - let bytes = hex::decode(&data_address).expect("Input address is not a hex string"); - let xor_name = xor_name::XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - - use sn_protocol::storage::ChunkAddress; - console::log_1(&JsValue::from_str(peer)); - - let the_peer = sn_peers_acquisition::parse_peer_addr(peer)?; - - console::log_1(&JsValue::from_str(&format!( - "Provided Peer was {the_peer:?}" - ))); - - // TODO: We need to tidy this up, the client loops forever in the browser, and eventually crashes - // it does _do things_ but errors surface, and even after getting data, it continues... - let client = Client::quick_start(Some(vec![the_peer])) - .await - .map_err(|e| JsError::new(&format!("Client could not start: {e:?}")))?; - - console::log_1(&JsValue::from_str("Client started {chunk:?}")); - - let chunk = client - .get_chunk(ChunkAddress::new(xor_name), false, None) - .await - .map_err(|e| JsError::new(&format!("Client get data failed: {e:?}")))?; - - console::log_1(&JsValue::from_str(&format!("Data found {chunk:?}"))); - - Ok(()) -} - -/// Client API implementation to store and get data. -#[derive(Clone, Debug)] -pub struct Client { - network: Network, - events_broadcaster: ClientEventsBroadcaster, - signer: Arc, -} diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs deleted file mode 100644 index f657898bf6..0000000000 --- a/sn_client/src/register.rs +++ /dev/null @@ -1,833 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::StoragePaymentResult, Client, Error, Result, WalletClient}; -use bls::PublicKey; -use crdts::merkle_reg::MerkleReg; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{ - Entry, EntryHash, Error as RegisterError, Permissions, Register, RegisterAddress, RegisterCrdt, - RegisterOp, SignedRegister, -}; -use sn_transfers::{NanoTokens, Payment}; -use std::collections::{BTreeSet, HashSet}; -use xor_name::XorName; - -/// Cached operations made to an offline RegisterCrdt instance are applied locally only, -/// and accumulated until the user explicitly calls 'sync'. The user can -/// switch back to sync with the network for every op by invoking `online` API. -#[derive(Clone, custom_debug::Debug)] -pub struct ClientRegister { - #[debug(skip)] - client: Client, - register: Register, - /// CRDT data of the Register - crdt: RegisterCrdt, - /// Cached operations. - ops: BTreeSet, -} - -impl ClientRegister { - /// Create with specified meta and permission - pub fn create_register(client: Client, meta: XorName, perms: Permissions) -> Self { - let register = Register::new(client.signer_pk(), meta, perms); - let crdt = RegisterCrdt::new(*register.address()); - Self { - client, - register, - crdt, - ops: BTreeSet::new(), - } - } - - /// Create a new Register Locally. - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create(client: Client, meta: XorName) -> Self { - Self::create_register(client, meta, Permissions::default()) - } - - /// Create a new Register locally with a specific address. - /// # Arguments - /// * 'client' - [Client] - /// * 'addr' - [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use sn_protocol::storage::RegisterAddress; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = RegisterAddress::new(XorName::random(&mut rng), client.signer_pk()); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create_with_addr(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create_with_addr(client: Client, addr: RegisterAddress) -> Self { - let register = Register::new(addr.owner(), addr.meta(), Permissions::default()); - let crdt = RegisterCrdt::new(addr); - Self { - client, - register, - crdt, - ops: BTreeSet::new(), - } - } - - /// Create a new Register and send it to the Network. - /// - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// * 'wallet_client' - A borrowed mutable [WalletClient] - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// * 'perms' - [Permissions] - /// - /// Return type: Result<(Self, [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a new Register replica from a predefined address. - /// // The create_online function runs a [sync](ClientRegister::sync) internally. - /// let (client_register, mut total_cost, mut total_royalties) = ClientRegister::create_online( - /// client, - /// address, - /// &mut wallet_client, - /// false, - /// permissions, - /// ).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_online( - client: Client, - meta: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(Self, NanoTokens, NanoTokens)> { - let mut reg = Self::create_register(client, meta, perms); - let (storage_cost, royalties_fees) = reg.sync(wallet_client, verify_store, None).await?; - Ok((reg, storage_cost, royalties_fees)) - } - - /// Retrieve a Register from the network to work on it offline. - pub(super) async fn retrieve(client: Client, address: RegisterAddress) -> Result { - let signed_register = Self::get_register_from_network(&client, address).await?; - - let mut register = Self::create_with_addr(client, address); - register.merge(&signed_register); - - Ok(register) - } - - /// Return type: [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the address. In this example, we print it out: - /// println!("REGISTER_ADDRESS={}", client_register.address().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Returns the Owner of the Register. - /// - /// Return type: [PublicKey] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the owner. In this example, we print it out: - /// println!("REGISTER_OWNER={}", client_register.owner().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn owner(&self) -> PublicKey { - self.register.owner() - } - - /// Returns the Permissions of the Register. - /// - /// Return type: [Permissions] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the permissions. In this example, we print it out: - /// let permissions = client_register.permissions(); - /// println!("REGISTER_PERMS={:?}",permissions); - /// # Ok(()) - /// # } - /// ``` - pub fn permissions(&self) -> &Permissions { - self.register.permissions() - } - - /// Return the number of items held in the register. - /// - /// Return type: u64 - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can see the size. In this example, we print it out: - /// println!("REGISTER_SIZE={}", client_register.size()); - /// # Ok(()) - /// # } - /// ``` - pub fn size(&self) -> u64 { - self.crdt.size() - } - - /// Return a value corresponding to the provided 'hash', if present. - // No usages found in All Places - pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - if let Some(entry) = self.crdt.get(hash) { - Ok(entry) - } else { - Err(RegisterError::NoSuchEntry(hash).into()) - } - } - - /// Read the last entry, or entries when there are branches, if the register is not empty. - /// - /// Return type: [BTreeSet]<([EntryHash], [Entry])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Read as bytes into the ClientRegister instance - /// let register = ClientRegister::create(client.clone(), address).read(); - /// # Ok(()) - /// # } - /// ``` - pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.read() - } - - /// Write a new value onto the Register atop latest value. - /// It returns an error if it finds branches in the content/entries; if it is - /// required to merge/resolve the branches, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "Register entry"; - /// // Write as bytes into the ClientRegister instance - /// let mut register = ClientRegister::create(client.clone(), address).write(entry.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write(&mut self, entry: &[u8]) -> Result { - let children = self.crdt.read(); - if children.len() > 1 { - return Err(Error::ContentBranchDetected(children)); - } - - self.write_atop(entry, &children.into_iter().map(|(hash, _)| hash).collect()) - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are any branches of content or entries, it automatically merges them. - /// Leaving the new value as a single latest value on the Register. - /// Note you can use the `write` API if you need to handle - /// content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "entry_input_here"; - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let message = "Register entry"; - /// let register = mutable_register.write_merging_branches(message.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result { - let children: BTreeSet = - self.crdt.read().into_iter().map(|(hash, _)| hash).collect(); - - self.write_atop(entry, &children) - } - - /// Write a new value onto the Register atop the set of branches/entries - /// referenced by the provided list of their corresponding entry hash. - /// Note you can use `write_merging_branches` API instead if you - /// want to write atop all exiting branches/entries. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'children' - [BTreeSet]<[EntryHash]> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let meta = "Register entry".as_bytes(); - /// let register = mutable_register.write_atop(meta, &BTreeSet::default()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_atop( - &mut self, - entry: &[u8], - children: &BTreeSet, - ) -> Result { - // check permissions first - let public_key = self.client.signer_pk(); - self.register.check_user_permissions(public_key)?; - - let (hash, address, crdt_op) = self.crdt.write(entry.to_vec(), children)?; - - let op = RegisterOp::new(address, crdt_op, self.client.signer()); - - let _ = self.ops.insert(op); - - Ok(hash) - } - - // ********* Online methods ********* - - /// Sync this Register with the replicas on the network. - /// This will optionally verify the stored Register on the network is the same as the local one. - /// If payment info is provided it won't try to make the payment. - /// - /// # Arguments - /// * 'wallet_client' - WalletClient - /// * 'verify_store' - Boolean - /// - /// Return type: - /// Result<([NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Run sync of a Client Register instance - /// let mut register = - /// ClientRegister::create(client, address).sync(&mut wallet_client, true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn sync( - &mut self, - wallet_client: &mut WalletClient, - verify_store: bool, - mut payment_info: Option<(Payment, PeerId)>, - ) -> Result<(NanoTokens, NanoTokens)> { - let addr = *self.address(); - debug!("Syncing Register at {addr:?}!"); - let mut storage_cost = NanoTokens::zero(); - let mut royalties_fees = NanoTokens::zero(); - let reg_result = if verify_store { - debug!("VERIFYING REGISTER STORED {:?}", self.address()); - if payment_info.is_some() { - // we expect this to be a _fresh_ register. - // It still could have been PUT previously, but we'll do a quick verification - // instead of thorough one. - self.client - .quickly_check_if_register_stored(*self.address()) - .await - } else { - self.client.verify_register_stored(*self.address()).await - } - } else { - Self::get_register_from_network(&self.client, addr).await - }; - - match reg_result { - Ok(remote_replica) => { - self.merge(&remote_replica); - self.push(verify_store).await?; - } - // any error here will result in a repayment of the register - // TODO: be smart about this and only pay for storage if we need to - Err(err) => { - debug!("Failed to get register: {err:?}"); - debug!("Creating Register as it doesn't exist at {addr:?}!"); - - // Let's check if the user has already paid for this address first - if payment_info.is_none() { - let net_addr = NetworkAddress::RegisterAddress(addr); - let payment_result = self.make_payment(wallet_client, &net_addr).await?; - storage_cost = payment_result.storage_cost; - royalties_fees = payment_result.royalty_fees; - - // Get payment proofs needed to publish the Register - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&net_addr)?; - debug!("payments found: {payment:?}"); - payment_info = Some((payment, payee)); - } - - // The `creation register` has to come with `payment`. - // Hence it needs to be `published` to network separately. - self.publish_register(payment_info, verify_store).await?; - } - } - - Ok((storage_cost, royalties_fees)) - } - - /// Push all operations made locally to the replicas of this Register on the network. - /// This optionally verifies that the stored Register is the same as our local register. - /// - /// # Arguments - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Pass the boolean value to the Client Register instance via .Push() - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.push(false); - /// # Ok(()) - /// # } - /// ``` - pub async fn push(&mut self, verify_store: bool) -> Result<()> { - let ops_len = self.ops.len(); - let address = *self.address(); - if ops_len > 0 { - if let Err(err) = self.publish_register(None, verify_store).await { - warn!("Failed to push register {address:?} to network!: {err}"); - return Err(err); - } - - debug!("Successfully pushed register {address:?} to network!"); - } - - Ok(()) - } - - /// Write a new value onto the Register atop of the latest value. - /// It returns an error if it finds branches in the content / entries. If so, then it's - /// required to merge or resolve the branches. In that case, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Register entry".as_bytes(); - /// // Use of the 'write_online' example: - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_online(&mut self, entry: &[u8], verify_store: bool) -> Result<()> { - self.write(entry)?; - self.push(verify_store).await - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are branches of content/entries, it will automatically merge them. - /// This will leave a single new value as the latest entry into the Register. - /// Note that you can use the `write` API if you need to handle content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Entry".as_bytes(); - /// // Use of the 'write_merging_branches_online': - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_merging_branches_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_merging_branches_online( - &mut self, - entry: &[u8], - verify_store: bool, - ) -> Result<()> { - self.write_merging_branches(entry)?; - self.push(verify_store).await - } - - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.crdt.merkle_reg() - } - - /// Returns the local ops list - pub fn ops_list(&self) -> &BTreeSet { - &self.ops - } - - /// Log the crdt DAG in tree structured view - pub fn log_update_history(&self) -> String { - self.crdt.log_update_history() - } - - // ********* Private helpers ********* - - // Make a storage payment for the provided network address - async fn make_payment( - &self, - wallet_client: &mut WalletClient, - net_addr: &NetworkAddress, - ) -> Result { - // Let's make the storage payment - let payment_result = wallet_client - .pay_for_storage(std::iter::once(net_addr.clone())) - .await?; - let cost = payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - .ok_or(Error::TotalPriceTooHigh)?; - - println!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - info!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - - if let Err(err) = wallet_client.store_local_wallet() { - warn!("Failed to store wallet with cached payment proofs: {err:?}"); - println!("Failed to store wallet with cached payment proofs: {err:?}"); - } else { - println!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - info!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - } - - Ok(payment_result) - } - - /// Publish a `Register` command on the network. - /// If `verify_store` is true, it will verify the Register was stored on the network. - /// Optionally contains the Payment and the PeerId that we paid to. - pub async fn publish_register( - &self, - payment: Option<(Payment, PeerId)>, - verify_store: bool, - ) -> Result<()> { - let client = self.client.clone(); - let signed_reg = self.get_signed_reg()?; - - let network_address = NetworkAddress::from_register_address(*self.register.address()); - let key = network_address.to_record_key(); - let (record, payee) = match payment { - Some((payment, payee)) => { - let record = Record { - key: key.clone(), - value: try_serialize_record( - &(payment, &signed_reg), - RecordKind::RegisterWithPayment, - )? - .to_vec(), - publisher: None, - expires: None, - }; - (record, Some(vec![payee])) - } - None => { - let record = Record { - key: key.clone(), - value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - (record, None) - } - }; - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = client - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - ( - Some(Record { - key, - value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }), - expected_holders, - ) - } else { - (None, Default::default()) - }; - - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Quick), - target_record: record_to_verify, - expected_holders, - is_register: true, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: payee, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - - // Register edits might exist, so we cannot be sure that just because we get a record back that this should fail - Ok(client.network.put_record(record, &put_cfg).await?) - } - - /// Retrieve a `Register` from the Network. - pub async fn get_register_from_network( - client: &Client, - address: RegisterAddress, - ) -> Result { - debug!("Retrieving Register from: {address}"); - let signed_reg = client.get_signed_register_from_network(address).await?; - signed_reg.verify_with_address(address)?; - Ok(signed_reg) - } - - /// Merge a network fetched copy with the local one. - /// Note the `get_register_from_network` already verified - /// * the fetched register is the same (address) as to the local one - /// * the ops of the fetched copy are all signed by the owner - pub fn merge(&mut self, signed_reg: &SignedRegister) { - debug!("Merging Register of: {:?}", self.register.address()); - - // Take out the difference between local ops and fetched ops - // note the `difference` functions gives entry that: in a but not in b - let diff: Vec<_> = signed_reg.ops().difference(&self.ops).cloned().collect(); - - // Apply the new ops to local - for op in diff { - // in case of deploying error, record then continue to next - if let Err(err) = self.crdt.apply_op(op.clone()) { - error!( - "Apply op to local Register {:?} failed with {err:?}", - self.register.address() - ); - } else { - let _ = self.ops.insert(op); - } - } - } - - /// Generate SignedRegister from local copy, so that can be published to network - fn get_signed_reg(&self) -> Result { - let signature = self.client.sign(self.register.bytes()?); - Ok(SignedRegister::new( - self.register.clone(), - signature, - self.ops.clone(), - )) - } -} diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs deleted file mode 100644 index 5e0485e543..0000000000 --- a/sn_client/src/test_utils.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, WalletClient, -}; -use sn_peers_acquisition::parse_peer_addr; -use sn_protocol::{storage::Chunk, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; - -use bls::SecretKey; -use bytes::Bytes; -use eyre::{bail, Result}; -use rand::distributions::{Distribution, Standard}; -use std::path::Path; -use tokio::{ - sync::Mutex, - time::{Duration, Instant}, -}; -use tracing::{info, warn}; - -/// 100 SNT is the amount `get_funded_wallet` funds the created wallet with. -pub const AMOUNT_TO_FUND_WALLETS: u64 = 100 * 1_000_000_000; - -// The number of times to try to load the faucet wallet -const LOAD_FAUCET_WALLET_RETRIES: usize = 10; - -// mutex to restrict access to faucet wallet from concurrent tests -static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); - -/// Get a new Client for testing -pub async fn get_new_client(owner_sk: SecretKey) -> Result { - let bootstrap_peers = if cfg!(feature = "local") { - None - } else { - match std::env::var("SAFE_PEERS") { - Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => Some(vec![peer]), - Err(err) => bail!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), - }, - Err(err) => bail!("Can't get env var SAFE_PEERS with error {err:?}"), - } - }; - - println!("Client bootstrap with peer {bootstrap_peers:?}"); - let client = Client::new(owner_sk, bootstrap_peers, None, None).await?; - Ok(client) -} - -/// Generate a Chunk with random bytes -pub fn random_file_chunk() -> Chunk { - let mut rng = rand::thread_rng(); - let random_content: Vec = >::sample_iter(Standard, &mut rng) - .take(100) - .collect(); - Chunk::new(Bytes::from(random_content)) -} - -/// Creates and funds a new hot-wallet at the provided path -pub async fn get_funded_wallet(client: &Client, wallet_dir: &Path) -> Result { - let wallet_balance = NanoTokens::from(AMOUNT_TO_FUND_WALLETS); - let _guard = FAUCET_WALLET_MUTEX.lock().await; - let from_faucet_wallet = load_faucet_wallet().await?; - - let mut local_wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None) - .expect("Wallet shall be successfully created."); - - println!("Getting {wallet_balance} tokens from the faucet..."); - info!("Getting {wallet_balance} tokens from the faucet..."); - let tokens = send( - from_faucet_wallet, - wallet_balance, - local_wallet.address(), - client, - true, - ) - .await?; - - println!("Verifying the transfer from faucet..."); - info!("Verifying the transfer from faucet..."); - client.verify_cashnote(&tokens).await?; - local_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(local_wallet.balance(), wallet_balance); - println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - - Ok(local_wallet) -} - -/// Pay the network for the provided list of storage addresses. -pub async fn pay_for_storage( - client: &Client, - wallet_dir: &Path, - addrs2pay: Vec, -) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - let _ = wallet_client.pay_for_storage(addrs2pay.into_iter()).await?; - Ok(()) -} - -async fn load_faucet_wallet() -> Result { - info!("Loading faucet wallet..."); - let now = Instant::now(); - for attempt in 1..LOAD_FAUCET_WALLET_RETRIES + 1 { - let faucet_wallet = create_faucet_account_and_wallet(); - - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - info!("Loaded faucet wallet after {:?}", now.elapsed()); - return Ok(faucet_wallet); - } - tokio::time::sleep(Duration::from_secs(1)).await; - warn!("The faucet wallet is empty. Attempts: {attempt}/{LOAD_FAUCET_WALLET_RETRIES}") - } - bail!("The faucet wallet is empty even after {LOAD_FAUCET_WALLET_RETRIES} retries. Bailing after {:?}. Check the faucet_server logs.", now.elapsed()); -} diff --git a/sn_client/src/uploader/mod.rs b/sn_client/src/uploader/mod.rs deleted file mode 100644 index c3495b99ab..0000000000 --- a/sn_client/src/uploader/mod.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(test)] -mod tests; -mod upload; - -use self::upload::{start_upload, InnerUploader, MAX_REPAYMENTS_PER_FAILED_ITEM}; -use crate::{Client, ClientRegister, Error, Result, BATCH_SIZE}; -use itertools::Either; -use sn_networking::PayeeQuote; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{RegisterAddress, SignedRegister}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Debug, - path::PathBuf, -}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The set of options to pass into the `Uploader` -#[derive(Debug, Clone, Copy)] -pub struct UploadCfg { - pub batch_size: usize, - pub verify_store: bool, - pub show_holders: bool, - pub retry_strategy: RetryStrategy, - pub max_repayments_for_failed_data: usize, // we want people to specify an explicit limit here. - pub collect_registers: bool, -} - -impl Default for UploadCfg { - fn default() -> Self { - Self { - batch_size: BATCH_SIZE, - verify_store: true, - show_holders: false, - retry_strategy: RetryStrategy::Balanced, - max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, - collect_registers: false, - } - } -} - -/// The result of a successful upload. -#[derive(Debug, Clone)] -pub struct UploadSummary { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub final_balance: NanoTokens, - pub uploaded_addresses: BTreeSet, - pub uploaded_registers: BTreeMap, - pub uploaded_count: usize, - pub skipped_count: usize, -} - -impl UploadSummary { - /// Merge two UploadSummary together. - pub fn merge(mut self, other: Self) -> Result { - self.uploaded_addresses.extend(other.uploaded_addresses); - self.uploaded_registers.extend(other.uploaded_registers); - - let summary = Self { - storage_cost: self - .storage_cost - .checked_add(other.storage_cost) - .ok_or(Error::NumericOverflow)?, - royalty_fees: self - .royalty_fees - .checked_add(other.royalty_fees) - .ok_or(Error::NumericOverflow)?, - final_balance: self - .final_balance - .checked_add(other.final_balance) - .ok_or(Error::NumericOverflow)?, - uploaded_addresses: self.uploaded_addresses, - uploaded_registers: self.uploaded_registers, - uploaded_count: self.uploaded_count + other.uploaded_count, - skipped_count: self.skipped_count + other.skipped_count, - }; - Ok(summary) - } -} - -#[derive(Debug, Clone)] -/// The events emitted from the upload process. -pub enum UploadEvent { - /// Uploaded a record to the network. - ChunkUploaded(ChunkAddress), - /// Uploaded a Register to the network. - /// The returned register is just the passed in register. - RegisterUploaded(ClientRegister), - /// - /// The Chunk already exists in the network. No payments were made. - ChunkAlreadyExistsInNetwork(ChunkAddress), - /// The Register already exists in the network. The locally register changes were pushed to the network. - /// No payments were made. - /// The returned register contains the remote replica merged with the passed in register. - RegisterUpdated(ClientRegister), - /// Payment for a batch of records has been made. - PaymentMade { - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - /// The upload process has terminated with an error. - // Note: We cannot send the Error enum as it does not implement Clone. So we cannot even do Result if - // we also want to return this error from the function. - Error, -} - -pub struct Uploader { - // Has to be stored as an Option as we have to take ownership of inner during the upload. - inner: Option, -} - -impl Uploader { - /// Start the upload process. - pub async fn start_upload(mut self) -> Result { - let event_sender = self - .inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .event_sender - .clone(); - match start_upload(Box::new(self)).await { - Err(err) => { - if let Some(event_sender) = event_sender { - if let Err(err) = event_sender.send(UploadEvent::Error).await { - error!("Error while emitting event: {err:?}"); - } - } - Err(err) - } - Ok(summary) => Ok(summary), - } - } - - /// Creates a new instance of `Uploader` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - // NOTE: Self has to be constructed only using this method. We expect `Self::inner` is present everywhere. - pub fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - inner: Some(InnerUploader::new(client, root_dir)), - } - } - - /// Update all the configurations by passing the `UploadCfg` struct - pub fn set_upload_cfg(&mut self, cfg: UploadCfg) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_cfg(cfg); - } - - /// Sets the default batch size that determines the number of data that are processed in parallel. - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 16`. - pub fn set_batch_size(&mut self, batch_size: usize) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_batch_size(batch_size); - } - - /// Sets the option to verify the data after they have been uploaded. - /// - /// By default, this option is set to true. - pub fn set_verify_store(&mut self, verify_store: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_verify_store(verify_store); - } - - /// Sets the option to display the holders that are expected to be holding the data during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(&mut self, show_holders: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_show_holders(show_holders); - } - - /// Sets the RetryStrategy to increase the re-try during the GetStoreCost & Upload tasks. - /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to - /// configure the re-payment attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_retry_strategy(retry_strategy); - } - - /// Sets the maximum number of repayments to perform if the initial payment failed. - /// NOTE: This creates an extra Spend and uses the wallet funds. - /// - /// By default, this option is set to 1 retry. - pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_max_repayments_for_failed_data(retries); - } - - /// Enables the uploader to return all the registers that were Uploaded or Updated. - /// The registers are emitted through the event channel whenever they're completed, but this returns them - /// through the UploadSummary when the whole upload process completes. - /// - /// By default, this option is set to False - pub fn set_collect_registers(&mut self, collect_registers: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_collect_registers(collect_registers); - } - - /// Returns a receiver for UploadEvent. - /// This method is optional and the upload process can be performed without it. - pub fn get_event_receiver(&mut self) -> mpsc::Receiver { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .get_event_receiver() - } - - /// Insert a list of chunk paths to upload to upload. - pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunk_paths(chunks); - } - - /// Insert a list of chunks to upload to upload. - pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunks(chunks); - } - - /// Insert a list of registers to upload. - pub fn insert_register(&mut self, registers: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_register(registers); - } -} - -// ======= Private ======== - -/// An interface to make the testing easier by not interacting with the network. -trait UploaderInterface: Send + Sync { - fn take_inner_uploader(&mut self) -> InnerUploader; - - // Mutable reference is used in tests. - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ); - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ); - - #[expect(clippy::too_many_arguments)] - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ); - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ); - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ); -} - -// Configuration functions are used in tests. So these are defined here and re-used inside `Uploader` -impl InnerUploader { - pub(super) fn set_cfg(&mut self, cfg: UploadCfg) { - self.cfg = cfg; - } - - pub(super) fn set_batch_size(&mut self, batch_size: usize) { - self.cfg.batch_size = batch_size; - } - - pub(super) fn set_verify_store(&mut self, verify_store: bool) { - self.cfg.verify_store = verify_store; - } - - pub(super) fn set_show_holders(&mut self, show_holders: bool) { - self.cfg.show_holders = show_holders; - } - - pub(super) fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.cfg.retry_strategy = retry_strategy; - } - - pub(super) fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.cfg.max_repayments_for_failed_data = retries; - } - - pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { - self.cfg.collect_registers = collect_registers; - } - - pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { - let (tx, rx) = mpsc::channel(100); - self.event_sender = Some(tx); - rx - } - - pub(super) fn insert_chunk_paths( - &mut self, - chunks: impl IntoIterator, - ) { - self.all_upload_items - .extend(chunks.into_iter().map(|(xorname, path)| { - let item = UploadItem::Chunk { - address: ChunkAddress::new(xorname), - chunk: Either::Right(path), - }; - (xorname, item) - })); - } - - pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.all_upload_items - .extend(chunks.into_iter().map(|chunk| { - let xorname = *chunk.name(); - let item = UploadItem::Chunk { - address: *chunk.address(), - chunk: Either::Left(chunk), - }; - (xorname, item) - })); - } - - pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { - self.all_upload_items - .extend(registers.into_iter().map(|reg| { - let address = *reg.address(); - let item = UploadItem::Register { address, reg }; - (address.xorname(), item) - })); - } -} - -#[derive(Debug, Clone)] -enum UploadItem { - Chunk { - address: ChunkAddress, - // Either the actual chunk or the path to the chunk. - chunk: Either, - }, - Register { - address: RegisterAddress, - reg: ClientRegister, - }, -} - -impl UploadItem { - fn address(&self) -> NetworkAddress { - match self { - Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), - Self::Register { address, .. } => NetworkAddress::from_register_address(*address), - } - } - - fn xorname(&self) -> XorName { - match self { - UploadItem::Chunk { address, .. } => *address.xorname(), - UploadItem::Register { address, .. } => address.xorname(), - } - } -} - -#[derive(Debug)] -enum TaskResult { - GetRegisterFromNetworkOk { - remote_register: SignedRegister, - }, - GetRegisterFromNetworkErr(XorName), - PushRegisterOk { - updated_register: ClientRegister, - }, - PushRegisterErr(XorName), - GetStoreCostOk { - xorname: XorName, - quote: Box, - }, - GetStoreCostErr { - xorname: XorName, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_reached: bool, - }, - MakePaymentsOk { - paid_xornames: Vec, - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - MakePaymentsErr { - failed_xornames: Vec<(XorName, Box)>, - insufficient_balance: Option<(NanoTokens, NanoTokens)>, - }, - UploadOk(XorName), - UploadErr { - xorname: XorName, - }, -} - -#[derive(Debug, Clone)] -enum GetStoreCostStrategy { - /// Selects the PeerId with the lowest quote - Cheapest, - /// Selects the cheapest PeerId that we have not made payment to. - SelectDifferentPayee, -} diff --git a/sn_client/src/uploader/tests/mod.rs b/sn_client/src/uploader/tests/mod.rs deleted file mode 100644 index 75916bbb97..0000000000 --- a/sn_client/src/uploader/tests/mod.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use crate::{ - uploader::tests::setup::{ - get_dummy_chunk_paths, get_dummy_registers, get_inner_uploader, start_uploading_with_steps, - TestSteps, - }, - Error as ClientError, UploadEvent, -}; -use assert_matches::assert_matches; -use eyre::Result; -use sn_logging::LogBuilder; -use std::collections::VecDeque; -use tempfile::tempdir; - -// ===== HAPPY PATH ======= - -/// 1. Chunk: if cost =0, then chunk is present in the network. -#[tokio::test] -async fn chunk_that_already_exists_in_the_network_should_return_zero_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![TestSteps::GetStoreCostOk { - trigger_zero_cost: true, - assert_select_different_payee: false, - }]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::ChunkAlreadyExistsInNetwork(_)); - Ok(()) -} - -/// 2. Chunk: if cost !=0, then make payment upload to the network. -#[tokio::test] -async fn chunk_should_be_paid_for_and_uploaded_if_cost_is_not_zero() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 3. Register: if GET register = ok, then merge and push the register. -#[tokio::test] -async fn register_should_be_merged_and_pushed_if_it_already_exists_in_the_network() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![TestSteps::GetRegisterOk, TestSteps::PushRegisterOk]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::RegisterUpdated { .. }); - Ok(()) -} - -/// 4. Register: if Get register = err, then get store cost and upload. -#[tokio::test] -async fn register_should_be_paid_and_uploaded_if_it_does_not_exists() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - // todo: what if cost = 0 even after GetRegister returns error. check that - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== REPAYMENTS ====== - -/// 1. Chunks: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 2. Register: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== ERRORS ======= -/// 1. Registers: Multiple PushRegisterErr should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_upload_should_error_out_if_there_are_multiple_push_failures() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterOk, - TestSteps::PushRegisterErr, - TestSteps::PushRegisterErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 2. Chunk: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 3. Register: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 4. Chunk: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 5. Register: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -// 6: Chunks + Registers: if the number of repayments exceed a threshold, it should return MaximumRepaymentsReached error. -#[tokio::test] -async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - // initial payment done - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // first repayment - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // thus after reaching max repayments, we should error out during get store cost. - TestSteps::GetStoreCostErr { - assert_select_different_payee: true, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::UploadFailedWithMaximumRepaymentsReached { .. }) - ); - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - Ok(()) -} diff --git a/sn_client/src/uploader/tests/setup.rs b/sn_client/src/uploader/tests/setup.rs deleted file mode 100644 index 59f9005c4a..0000000000 --- a/sn_client/src/uploader/tests/setup.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - uploader::{ - upload::{start_upload, InnerUploader}, - GetStoreCostStrategy, TaskResult, UploadItem, UploaderInterface, - }, - ClientRegister, UploadEvent, -}; -use crate::{Client, Result as ClientResult, UploadSummary}; -use assert_matches::assert_matches; -use bls::SecretKey; -use eyre::Result; -use libp2p::PeerId; -use libp2p_identity::Keypair; -use rand::thread_rng; -use sn_networking::{NetworkBuilder, PayeeQuote}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -use sn_registers::{Permissions, RegisterAddress, SignedRegister}; -use sn_transfers::{MainSecretKey, NanoTokens, PaymentQuote, WalletApi}; -use std::{ - collections::{BTreeMap, VecDeque}, - path::PathBuf, - sync::Arc, -}; -use tokio::{runtime::Handle, sync::mpsc, task::JoinHandle}; -use xor_name::XorName; - -struct TestUploader { - inner: Option, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, - - // test states - make_payment_collector: Vec<(XorName, Box)>, - payments_made_per_xorname: BTreeMap, - batch_size: usize, -} - -impl UploaderInterface for TestUploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner.take().unwrap() - } - - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - _task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::GetRegisterOk => { - handle.spawn(async move { - let remote_register = - SignedRegister::test_new_from_address(reg_addr, client.signer()); - task_result_sender - .send(TaskResult::GetRegisterFromNetworkOk { remote_register }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetRegisterFromNetworkErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetRegister step. Got: {con:?}"), - } - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - _verify_store: bool, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a PushRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::PushRegisterOk => { - handle.spawn(async move { - let updated_register = match upload_item { - UploadItem::Register { reg, .. } => reg, - _ => panic!("Expected UploadItem::Register"), - }; - task_result_sender - .send(TaskResult::PushRegisterOk { - // this register is just used for returning. - updated_register, - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::PushRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::PushRegisterErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected PushRegister step. Got: {con:?}"), - } - } - - fn submit_get_store_cost_task( - &mut self, - _client: Client, - _wallet_api: WalletApi, - xorname: XorName, - _address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - _task_result_sender: mpsc::Sender, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetStoreCost step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - - let has_max_payments_reached_closure = - |get_store_cost_strategy: &GetStoreCostStrategy| -> bool { - match get_store_cost_strategy { - GetStoreCostStrategy::SelectDifferentPayee => { - if let Some(n_payments) = self.payments_made_per_xorname.get(&xorname) { - InnerUploader::have_we_reached_max_repayments( - *n_payments, - max_repayments_for_failed_data, - ) - } else { - false - } - } - _ => false, - } - }; - - // if select different payee, then it can possibly error out if max_repayments have been reached. - // then the step should've been a GetStoreCostErr. - if has_max_payments_reached_closure(&get_store_cost_strategy) { - assert_matches!(step, TestSteps::GetStoreCostErr { .. }, "Max repayments have been reached, so we expect a GetStoreCostErr, not GetStoreCostOk"); - } - - match step { - TestSteps::GetStoreCostOk { - trigger_zero_cost, - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - - let mut quote = PaymentQuote::zero(); - if !trigger_zero_cost { - quote.cost = NanoTokens::from(10); - } - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(( - PeerId::random(), - MainSecretKey::random().main_pubkey(), - quote, - )), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetStoreCostErr { - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - let max_repayments_reached = - has_max_payments_reached_closure(&get_store_cost_strategy); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetStoreCost step. Got: {con:?}"), - } - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - _make_payment_sender: mpsc::Sender)>>, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a MakePayment step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - match &to_send { - Some((upload_item, quote)) => { - let xorname = upload_item.xorname(); - println!("spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}"); - info!( - "TEST: spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}" - ); - - self.make_payment_collector - .push((upload_item.xorname(), quote.clone())); - } - None => { - println!( - "spawn_make_payment called with force make payment. Step to execute: {step:?}" - ); - info!("TEST: spawn_make_payment called with force make payment. Step to execute: {step:?}"); - } - } - - // gotta collect batch size before sending task result. - let _make_payment = self.make_payment_collector.len() >= self.batch_size - || (to_send.is_none() && !self.make_payment_collector.is_empty()); - - match step { - // TestSteps::MakePaymentJustCollectItem => { - // // The test expected for us to just collect item, but if the logic wants us to make payment, then it as - // // error - // assert!(!make_payment); - // } - TestSteps::MakePaymentOk => { - let paid_xornames = std::mem::take(&mut self.make_payment_collector) - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - // track the payments per xorname - for xorname in paid_xornames.iter() { - let entry = self.payments_made_per_xorname.entry(*xorname).or_insert(0); - *entry += 1; - } - let batch_size = self.batch_size; - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost: NanoTokens::from(batch_size as u64 * 10), - royalty_fees: NanoTokens::from(batch_size as u64 * 3), - new_balance: NanoTokens::from(batch_size as u64 * 1000), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::MakePaymentErr => { - let failed_xornames = std::mem::take(&mut self.make_payment_collector); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected MakePayment step. Got: {con:?}"), - } - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - _client: Client, - _wallet_api: WalletApi, - _verify_store: bool, - _retry_strategy: RetryStrategy, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a UploadItem step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::UploadItemOk => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadOk(xorname)) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::UploadItemErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected UploadItem step. Got: {con:?}"), - } - } -} - -#[derive(Debug, Clone)] -pub enum TestSteps { - GetRegisterOk, - GetRegisterErr, - PushRegisterOk, - PushRegisterErr, - GetStoreCostOk { - trigger_zero_cost: bool, - assert_select_different_payee: bool, - }, - GetStoreCostErr { - assert_select_different_payee: bool, - }, - // MakePaymentJustCollectItem, - MakePaymentOk, - MakePaymentErr, - UploadItemOk, - UploadItemErr, -} - -pub fn get_inner_uploader(root_dir: PathBuf) -> Result<(InnerUploader, mpsc::Sender)> { - let client = build_unconnected_client(root_dir.clone())?; - - let mut inner = InnerUploader::new(client, root_dir); - let (task_result_sender, task_result_receiver) = mpsc::channel(100); - inner.testing_task_channels = Some((task_result_sender.clone(), task_result_receiver)); - - Ok((inner, task_result_sender)) -} - -// Spawns two tasks. One is the actual upload task that will return an UploadStat when completed. -// The other is a one to collect all the UploadEvent emitted by the previous task. -pub fn start_uploading_with_steps( - mut inner_uploader: InnerUploader, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, -) -> ( - JoinHandle>, - JoinHandle>, -) { - let batch_size = inner_uploader.cfg.batch_size; - let mut upload_event_rx = inner_uploader.get_event_receiver(); - - let upload_handle = tokio::spawn(start_upload(Box::new(TestUploader { - inner: Some(inner_uploader), - test_steps, - task_result_sender, - make_payment_collector: Default::default(), - payments_made_per_xorname: Default::default(), - batch_size, - }))); - - let event_handle = tokio::spawn(async move { - let mut events = vec![]; - while let Some(event) = upload_event_rx.recv().await { - events.push(event); - } - events - }); - - (upload_handle, event_handle) -} - -// Collect all the upload events into a list - -// Build a very simple client struct for testing. This does not connect to any network. -// The UploaderInterface eliminates the need for direct networking in tests. -pub fn build_unconnected_client(root_dir: PathBuf) -> Result { - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), true, root_dir); - let (network, ..) = network_builder.build_client()?; - let client = Client { - network, - events_broadcaster: Default::default(), - signer: Arc::new(SecretKey::random()), - }; - Ok(client) -} - -// We don't perform any networking, so the paths can be dummy ones. -pub fn get_dummy_chunk_paths(num: usize, temp_dir: PathBuf) -> Vec<(XorName, PathBuf)> { - let mut rng = thread_rng(); - let mut chunks = Vec::with_capacity(num); - for _ in 0..num { - chunks.push((XorName::random(&mut rng), temp_dir.clone())); - } - chunks -} - -pub fn get_dummy_registers(num: usize, client: Client) -> Vec { - let mut rng = thread_rng(); - let mut registers = Vec::with_capacity(num); - for _ in 0..num { - // test_new_from_address that is used during get_register, - // uses AnyoneCanWrite permission, so use the same here - let client_reg = ClientRegister::create_register( - client.clone(), - XorName::random(&mut rng), - Permissions::AnyoneCanWrite, - ); - - registers.push(client_reg); - } - registers -} diff --git a/sn_client/src/uploader/upload.rs b/sn_client/src/uploader/upload.rs deleted file mode 100644 index 857c9fc31c..0000000000 --- a/sn_client/src/uploader/upload.rs +++ /dev/null @@ -1,1084 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, - UploaderInterface, -}; -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - transfers::{TransferError, WalletError}, - Client, ClientRegister, Error as ClientError, Result, Uploader, WalletClient, -}; -use bytes::Bytes; -use itertools::Either; -use libp2p::PeerId; -use sn_networking::PayeeQuote; -use sn_protocol::{ - storage::{Chunk, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{RegisterAddress, SignedRegister}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The number of repayments to attempt for a failed item before returning an error. -/// If value = 1, we do an initial payment & 1 repayment. Thus we make a max 2 payments per data item. -#[cfg(not(test))] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 3; -#[cfg(test)] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; - -/// The maximum number of sequential payment failures before aborting the upload process. -#[cfg(not(test))] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 3; -#[cfg(test)] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 1; - -/// The maximum number of sequential network failures before aborting the upload process. -// todo: use uploader.retry_strategy.get_count() instead. -#[cfg(not(test))] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 32; -#[cfg(test)] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 1; - -/// The number of upload failures for a single data item before -#[cfg(not(test))] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 3; -#[cfg(test)] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; - -// TODO: -// 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` - -/// The main loop that performs the upload process. -/// An interface is passed here for easy testing. -pub(super) async fn start_upload( - mut interface: Box, -) -> Result { - let mut uploader = interface.take_inner_uploader(); - // Take out the testing task senders if any. This is only set for tests. - let (task_result_sender, mut task_result_receiver) = - if let Some(channels) = uploader.testing_task_channels.take() { - channels - } else { - // 6 because of the 6 pipelines, 1 for redundancy. - mpsc::channel(uploader.cfg.batch_size * 6 + 1) - }; - let (make_payment_sender, make_payment_receiver) = mpsc::channel(uploader.cfg.batch_size); - - uploader.start_make_payment_processing_loop( - make_payment_receiver, - task_result_sender.clone(), - uploader.cfg.batch_size, - )?; - - // chunks can be pushed to pending_get_store_cost directly - uploader.pending_to_get_store_cost = uploader - .all_upload_items - .iter() - .filter_map(|(xorname, item)| { - if let UploadItem::Chunk { .. } = item { - Some((*xorname, GetStoreCostStrategy::Cheapest)) - } else { - None - } - }) - .collect(); - - // registers have to be verified + merged with remote replica, so we have to fetch it first. - uploader.pending_to_get_register = uploader - .all_upload_items - .iter() - .filter_map(|(_xorname, item)| { - if let UploadItem::Register { address, .. } = item { - Some(*address) - } else { - None - } - }) - .collect(); - - loop { - // Break if we have uploaded all the items. - // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. - if uploader.all_upload_items.is_empty() { - debug!("Upload items are empty, exiting main upload loop."); - // To avoid empty final_balance when all items are skipped. - uploader.upload_final_balance = - InnerUploader::load_wallet_client(uploader.client.clone(), &uploader.root_dir)? - .balance(); - #[cfg(test)] - trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); - let summary = UploadSummary { - storage_cost: uploader.upload_storage_cost, - royalty_fees: uploader.upload_royalty_fees, - final_balance: uploader.upload_final_balance, - uploaded_addresses: uploader.uploaded_addresses, - uploaded_count: uploader.uploaded_count, - skipped_count: uploader.skipped_count, - uploaded_registers: uploader.uploaded_registers, - }; - - if !uploader.max_repayments_reached.is_empty() { - error!( - "The maximum repayments were reached for these addresses: {:?}", - uploader.max_repayments_reached - ); - return Err(ClientError::UploadFailedWithMaximumRepaymentsReached { - items: uploader.max_repayments_reached.into_iter().collect(), - summary, - }); - } - - return Ok(summary); - } - - // try to GET register if we have enough buffer. - // The results of the get & push register steps are used to fill up `pending_to_get_store` cost - // Since the get store cost list is the init state, we don't have to check if it is not full. - while !uploader.pending_to_get_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - if let Some(reg_addr) = uploader.pending_to_get_register.pop() { - trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); - let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); - interface.submit_get_register_task( - uploader.client.clone(), - reg_addr, - task_result_sender.clone(), - ); - } - } - - // try to push register if we have enough buffer. - // No other checks for the same reason as the above step. - while !uploader.pending_to_push_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - let upload_item = uploader.pop_item_for_push_register()?; - trace!( - "Conditions met for push registers {:?}", - upload_item.xorname() - ); - let _ = uploader - .on_going_push_register - .insert(upload_item.xorname()); - interface.submit_push_register_task( - upload_item, - uploader.cfg.verify_store, - task_result_sender.clone(), - ); - } - - // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. - while !uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.len() < uploader.cfg.batch_size - && uploader.pending_to_pay.len() < uploader.cfg.batch_size - { - let (xorname, address, get_store_cost_strategy) = - uploader.pop_item_for_get_store_cost()?; - trace!("Conditions met for get store cost. {xorname:?} {get_store_cost_strategy:?}",); - - let _ = uploader.on_going_get_cost.insert(xorname); - interface.submit_get_store_cost_task( - uploader.client.clone(), - uploader.wallet_api.clone(), - xorname, - address, - get_store_cost_strategy, - uploader.cfg.max_repayments_for_failed_data, - task_result_sender.clone(), - ); - } - - // try to make payment for an item if pending_to_upload needs items & if we have enough buffer. - while !uploader.pending_to_pay.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - && uploader.pending_to_upload.len() < uploader.cfg.batch_size - { - let (upload_item, quote) = uploader.pop_item_for_make_payment()?; - trace!( - "Conditions met for making payments. {:?} {quote:?}", - upload_item.xorname() - ); - let _ = uploader.on_going_payments.insert(upload_item.xorname()); - - interface - .submit_make_payment_task(Some((upload_item, quote)), make_payment_sender.clone()); - } - - // try to upload if we have enough buffer to upload. - while !uploader.pending_to_upload.is_empty() - && uploader.on_going_uploads.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: upload_item : {uploader:?}"); - let upload_item = uploader.pop_item_for_upload_item()?; - - trace!("Conditions met for uploading. {:?}", upload_item.xorname()); - let _ = uploader.on_going_uploads.insert(upload_item.xorname()); - interface.submit_upload_item_task( - upload_item, - uploader.client.clone(), - uploader.wallet_api.clone(), - uploader.cfg.verify_store, - uploader.cfg.retry_strategy, - task_result_sender.clone(), - ); - } - - // Fire None to trigger a forced round of making leftover payments, if there are not enough store cost tasks - // to fill up the buffer. - if uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.is_empty() - && !uploader.on_going_payments.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: make_payment (forced): {uploader:?}"); - - debug!("There are not enough on going payments to trigger a batch Payment and no get_store_costs to fill the batch. Triggering forced round of payment"); - interface.submit_make_payment_task(None, make_payment_sender.clone()); - } - - #[cfg(test)] - trace!("UPLOADER STATE: before await task result: {uploader:?}"); - - trace!("Fetching task result"); - let task_result = task_result_receiver - .recv() - .await - .ok_or(ClientError::InternalTaskChannelDropped)?; - trace!("Received task result: {task_result:?}"); - match task_result { - TaskResult::GetRegisterFromNetworkOk { remote_register } => { - // if we got back the register, then merge & PUT it. - let xorname = remote_register.address().xorname(); - trace!("TaskResult::GetRegisterFromNetworkOk for remote register: {xorname:?} \n{remote_register:?}"); - let _ = uploader.on_going_get_register.remove(&xorname); - - let reg = uploader - .all_upload_items - .get_mut(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - if let UploadItem::Register { reg, .. } = reg { - reg.merge(&remote_register); - uploader.pending_to_push_register.push(xorname); - } - } - TaskResult::GetRegisterFromNetworkErr(xorname) => { - // then the register is a new one. It can follow the same flow as chunks now. - let _ = uploader.on_going_get_register.remove(&xorname); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::Cheapest)); - } - TaskResult::PushRegisterOk { updated_register } => { - // push modifies the register, so we return this instead of the one from all_upload_items - let xorname = updated_register.address().xorname(); - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.skipped_count += 1; - let _ = uploader - .uploaded_addresses - .insert(NetworkAddress::from_register_address( - *updated_register.address(), - )); - - let _old_register = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*updated_register.address(), updated_register.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); - } - TaskResult::PushRegisterErr(xorname) => { - // the register failed to be Pushed. Retry until failure. - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.pending_to_push_register.push(xorname); - - uploader.push_register_errors += 1; - if uploader.push_register_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during PushRegisterErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::GetStoreCostOk { xorname, quote } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - uploader.get_store_cost_errors = 0; // reset error if Ok. We only throw error after 'n' sequential errors - - trace!("GetStoreCostOk for {xorname:?}'s store_cost {:?}", quote.2); - - if quote.2.cost != NanoTokens::zero() { - uploader.pending_to_pay.push((xorname, quote)); - } - // if cost is 0, then it already in the network. - else { - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - trace!("{xorname:?} has store cost of 0 and it already exists on the network"); - uploader.skipped_count += 1; - - // if during the first try we skip the item, then it is already present in the network. - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( - address, - )); - } - - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(reg)); - } - } - } - } - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - trace!("GetStoreCostErr for {xorname:?} , get_store_cost_strategy: {get_store_cost_strategy:?}, max_repayments_reached: {max_repayments_reached:?}"); - - // If max repayments reached, track it separately. Else retry get_store_cost. - if max_repayments_reached { - error!("Max repayments reached for {xorname:?}. Skipping upload for it"); - uploader.max_repayments_reached.insert(xorname); - uploader.all_upload_items.remove(&xorname); - } else { - // use the same strategy. The repay different payee is set only if upload fails. - uploader - .pending_to_get_store_cost - .push((xorname, get_store_cost_strategy.clone())); - } - uploader.get_store_cost_errors += 1; - if uploader.get_store_cost_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during GetStoreCostErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance, - } => { - trace!("MakePaymentsOk for {} items: hash({:?}), with {storage_cost:?} store_cost and {royalty_fees:?} royalty_fees, and new_balance is {new_balance:?}", - paid_xornames.len(), InnerUploader::hash_of_xornames(paid_xornames.iter())); - for xorname in paid_xornames.iter() { - let _ = uploader.on_going_payments.remove(xorname); - } - uploader.pending_to_upload.extend(paid_xornames); - uploader.make_payments_errors = 0; - uploader.upload_final_balance = new_balance; - uploader.upload_storage_cost = uploader - .upload_storage_cost - .checked_add(storage_cost) - .ok_or(ClientError::TotalPriceTooHigh)?; - uploader.upload_royalty_fees = uploader - .upload_royalty_fees - .checked_add(royalty_fees) - .ok_or(ClientError::TotalPriceTooHigh)?; - - // reset sequential payment fail error if ok. We throw error if payment fails continuously more than - // MAX_SEQUENTIAL_PAYMENT_FAILS errors. - uploader.emit_upload_event(UploadEvent::PaymentMade { - storage_cost, - royalty_fees, - new_balance, - }); - } - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance, - } => { - trace!( - "MakePaymentsErr for {:?} items: hash({:?})", - failed_xornames.len(), - InnerUploader::hash_of_xornames(failed_xornames.iter().map(|(name, _)| name)) - ); - if let Some((available, required)) = insufficient_balance { - error!("Wallet does not have enough funds. This error is not recoverable"); - return Err(ClientError::Wallet(WalletError::Transfer( - TransferError::NotEnoughBalance(available, required), - ))); - } - - for (xorname, quote) in failed_xornames { - let _ = uploader.on_going_payments.remove(&xorname); - uploader.pending_to_pay.push((xorname, quote)); - } - uploader.make_payments_errors += 1; - - if uploader.make_payments_errors >= MAX_SEQUENTIAL_PAYMENT_FAILS { - error!("Max sequential upload failures reached during MakePaymentsErr."); - // Too many sequential overall payment failure indicating - // unrecoverable failure of spend tx continuously rejected by network. - // The entire upload process shall be terminated. - return Err(ClientError::SequentialUploadPaymentError); - } - } - TaskResult::UploadOk(xorname) => { - let _ = uploader.on_going_uploads.remove(&xorname); - uploader.uploaded_count += 1; - trace!("UploadOk for {xorname:?}"); - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); - } - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUploaded(reg)); - } - } - } - TaskResult::UploadErr { xorname } => { - let _ = uploader.on_going_uploads.remove(&xorname); - trace!("UploadErr for {xorname:?}"); - - // keep track of the failure - let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); - *n_errors += 1; - - // if quote has expired, don't retry the upload again. Instead get the cheapest quote again. - if *n_errors > UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE { - // if error > threshold, then select different payee. else retry again - // Also reset n_errors as we want to enable retries for the new payee. - *n_errors = 0; - debug!("Max error during upload reached for {xorname:?}. Selecting a different payee."); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::SelectDifferentPayee)); - } else { - uploader.pending_to_upload.push(xorname); - } - } - } - } -} - -impl UploaderInterface for Uploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner - .take() - .expect("Uploader::new makes sure inner is present") - } - - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning get_store_cost for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_store_cost( - client, - wallet_api, - xorname, - address, - get_store_cost_strategy.clone(), - max_repayments_for_failed_data, - ) - .await - { - Ok(quote) => { - debug!("StoreCosts retrieved for {xorname:?} quote: {quote:?}"); - TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(quote), - } - } - Err(err) => { - error!("Encountered error {err:?} when getting store_cost for {xorname:?}",); - - let max_repayments_reached = - matches!(&err, ClientError::MaximumRepaymentsReached(_)); - - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } - } - }; - - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - trace!("Spawning get_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_register(client, reg_addr).await { - Ok(register) => { - debug!("Register retrieved for {xorname:?}"); - TaskResult::GetRegisterFromNetworkOk { - remote_register: register, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - warn!("Encountered error {err:?} during get_register. The register has to be PUT as it is a new one."); - TaskResult::GetRegisterFromNetworkErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - trace!("Spawning push_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::push_register(upload_item, verify_store).await { - Ok(reg) => { - debug!("Register pushed: {xorname:?}"); - TaskResult::PushRegisterOk { - updated_register: reg, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - error!("Encountered error {err:?} during push_register. The register might not be present in the network"); - TaskResult::PushRegisterErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ) { - let _handle = tokio::spawn(async move { - let _ = make_payment_sender.send(to_send).await; - }); - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning upload item task for {:?}", upload_item.xorname()); - - let _handle = tokio::spawn(async move { - let xorname = upload_item.xorname(); - let result = InnerUploader::upload_item( - client, - wallet_api, - upload_item, - verify_store, - retry_strategy, - ) - .await; - - trace!("Upload item {xorname:?} uploaded with result {result:?}"); - match result { - Ok(_) => { - let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; - } - Err(_) => { - let _ = task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await; - } - }; - }); - } -} - -/// `Uploader` provides functionality for uploading both Chunks and Registers with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -#[derive(custom_debug::Debug)] -pub(super) struct InnerUploader { - pub(super) cfg: UploadCfg, - #[debug(skip)] - pub(super) client: Client, - #[debug(skip)] - pub(super) wallet_api: WalletApi, - pub(super) root_dir: PathBuf, - - // states - pub(super) all_upload_items: HashMap, - pub(super) pending_to_get_register: Vec, - pub(super) pending_to_push_register: Vec, - pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, - pub(super) pending_to_pay: Vec<(XorName, Box)>, - pub(super) pending_to_upload: Vec, - - // trackers - pub(super) on_going_get_register: BTreeSet, - pub(super) on_going_push_register: BTreeSet, - pub(super) on_going_get_cost: BTreeSet, - pub(super) on_going_payments: BTreeSet, - pub(super) on_going_uploads: BTreeSet, - - // error trackers - pub(super) n_errors_during_uploads: BTreeMap, - pub(super) push_register_errors: usize, - pub(super) get_store_cost_errors: usize, - pub(super) make_payments_errors: usize, - - // Upload summary - pub(super) upload_storage_cost: NanoTokens, - pub(super) upload_royalty_fees: NanoTokens, - pub(super) upload_final_balance: NanoTokens, - pub(super) max_repayments_reached: BTreeSet, - pub(super) uploaded_addresses: BTreeSet, - pub(super) uploaded_registers: BTreeMap, - pub(super) uploaded_count: usize, - pub(super) skipped_count: usize, - - // Task channels for testing. Not used in actual code. - pub(super) testing_task_channels: - Option<(mpsc::Sender, mpsc::Receiver)>, - - // Public events events - #[debug(skip)] - pub(super) logged_event_sender_absence: bool, - #[debug(skip)] - pub(super) event_sender: Option>, -} - -impl InnerUploader { - pub(super) fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - cfg: Default::default(), - client, - wallet_api: WalletApi::new_from_root_dir(&root_dir), - root_dir, - - all_upload_items: Default::default(), - pending_to_get_register: Default::default(), - pending_to_push_register: Default::default(), - pending_to_get_store_cost: Default::default(), - pending_to_pay: Default::default(), - pending_to_upload: Default::default(), - - on_going_get_register: Default::default(), - on_going_push_register: Default::default(), - on_going_get_cost: Default::default(), - on_going_payments: Default::default(), - on_going_uploads: Default::default(), - - n_errors_during_uploads: Default::default(), - push_register_errors: Default::default(), - get_store_cost_errors: Default::default(), - max_repayments_reached: Default::default(), - make_payments_errors: Default::default(), - - upload_storage_cost: NanoTokens::zero(), - upload_royalty_fees: NanoTokens::zero(), - upload_final_balance: NanoTokens::zero(), - uploaded_addresses: Default::default(), - uploaded_registers: Default::default(), - uploaded_count: Default::default(), - skipped_count: Default::default(), - - testing_task_channels: None, - logged_event_sender_absence: Default::default(), - event_sender: Default::default(), - } - } - - // ====== Pop items ====== - - fn pop_item_for_push_register(&mut self) -> Result { - if let Some(name) = self.pending_to_push_register.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_get_store_cost( - &mut self, - ) -> Result<(XorName, NetworkAddress, GetStoreCostStrategy)> { - let (xorname, strategy) = self - .pending_to_get_store_cost - .pop() - .ok_or(ClientError::UploadStateTrackerIsEmpty)?; - let address = self - .all_upload_items - .get(&xorname) - .map(|item| item.address()) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - Ok((xorname, address, strategy)) - } - - fn pop_item_for_make_payment(&mut self) -> Result<(UploadItem, Box)> { - if let Some((name, quote)) = self.pending_to_pay.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok((upload_item, quote)) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_upload_item(&mut self) -> Result { - if let Some(name) = self.pending_to_upload.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - // ====== Processing Loop ====== - - // This is spawned as a long running task to prevent us from reading the wallet files - // each time we have to make a payment. - fn start_make_payment_processing_loop( - &self, - mut make_payment_receiver: mpsc::Receiver)>>, - task_result_sender: mpsc::Sender, - batch_size: usize, - ) -> Result<()> { - let mut wallet_client = Self::load_wallet_client(self.client.clone(), &self.root_dir)?; - - let verify_store = self.cfg.verify_store; - let _handle = tokio::spawn(async move { - debug!("Spawning the long running make payment processing loop."); - - let mut cost_map = BTreeMap::new(); - let mut current_batch = vec![]; - - let mut got_a_previous_force_payment = false; - while let Some(payment) = make_payment_receiver.recv().await { - let make_payments = if let Some((item, quote)) = payment { - let xorname = item.xorname(); - trace!("Inserted {xorname:?} into cost_map"); - - current_batch.push((xorname, quote.clone())); - let _ = cost_map.insert(xorname, (quote.1, quote.2, quote.0.to_bytes())); - cost_map.len() >= batch_size || got_a_previous_force_payment - } else { - // using None to indicate as all paid. - let make_payments = !cost_map.is_empty(); - trace!("Got a forced forced round of make payment."); - // Note: There can be a mismatch of ordering between the main loop and the make payment loop because - // the instructions are sent via a task(channel.send().await). And there is no guarantee for the - // order to come in the same order as they were sent. - // - // We cannot just disobey the instruction inside the child loop, as the mainloop would be expecting - // a result back for a particular instruction. - if !make_payments { - got_a_previous_force_payment = true; - warn!( - "We were told to force make payment, but cost_map is empty, so we can't do that just yet. Waiting for a task to insert a quote into cost_map" - ) - } - - make_payments - }; - - if make_payments { - // reset force_make_payment - if got_a_previous_force_payment { - info!("A task inserted a quote into cost_map, so we can now make a forced round of payment!"); - got_a_previous_force_payment = false; - } - - let _ = wallet_client - .resend_pending_transaction_blocking_loop() - .await; - - let mut terminate_process = false; - - let result = match wallet_client.pay_for_records(&cost_map, verify_store).await - { - Ok((storage_cost, royalty_fees)) => { - let paid_xornames = std::mem::take(&mut current_batch); - let paid_xornames = paid_xornames - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - trace!( - "Made payments for {} records: hash({:?})", - cost_map.len(), - Self::hash_of_xornames(paid_xornames.iter()) - ); - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance: wallet_client.balance(), - } - } - Err(err) => { - let failed_xornames = std::mem::take(&mut current_batch); - error!( - "When paying {} data: hash({:?}) got error {err:?}", - failed_xornames.len(), - Self::hash_of_xornames( - failed_xornames.iter().map(|(name, _)| name) - ) - ); - match err { - WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )) => { - terminate_process = true; - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: Some((available, required)), - } - } - _ => TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }, - } - } - }; - let pay_for_chunk_sender_clone = task_result_sender.clone(); - let _handle = tokio::spawn(async move { - let _ = pay_for_chunk_sender_clone.send(result).await; - }); - - cost_map = BTreeMap::new(); - - if terminate_process { - // The error will trigger the entire upload process to be terminated. - // Hence here we shall terminate the inner loop first, - // to avoid the wallet going furhter to be potentially got corrupted. - warn!( - "Terminating make payment processing loop due to un-recoverable error." - ); - break; - } - } - } - debug!("Make payment processing loop terminated."); - }); - Ok(()) - } - - // ====== Logic ====== - - async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { - client.verify_register_stored(reg_addr).await - } - - async fn push_register(upload_item: UploadItem, verify_store: bool) -> Result { - let mut reg = if let UploadItem::Register { reg, .. } = upload_item { - reg - } else { - return Err(ClientError::InvalidUploadItemFound); - }; - reg.push(verify_store).await?; - Ok(reg) - } - - async fn get_store_cost( - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - ) -> Result { - let filter_list = match get_store_cost_strategy { - GetStoreCostStrategy::Cheapest => vec![], - GetStoreCostStrategy::SelectDifferentPayee => { - // Check if we have already made payment for the provided xorname. If so filter out those payee - let filter_list = wallet_api - .get_all_payments(&xorname)? - .into_iter() - .map(|details| { - PeerId::from_bytes(&details.peer_id_bytes).map_err(|_| { - ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)) - }) - }) - .collect::>>()?; - - // if we have already made initial + max_repayments, then we should error out. - if Self::have_we_reached_max_repayments( - filter_list.len(), - max_repayments_for_failed_data, - ) { - // error is used by the caller. - return Err(ClientError::MaximumRepaymentsReached(xorname)); - } - - debug!("Filtering out payments from {filter_list:?} during get_store_cost for {xorname:?}"); - filter_list - } - }; - let quote = client - .network - .get_store_costs_from_network(address, filter_list) - .await?; - Ok(quote) - } - - async fn upload_item( - client: Client, - wallet_api: WalletApi, - upload_item: UploadItem, - verify_store: bool, - retry_strategy: RetryStrategy, - ) -> Result<()> { - let xorname = upload_item.xorname(); - - let payment_details = wallet_api.get_recent_payment(&xorname)?; - let payment = payment_details.to_payment(); - let payee = PeerId::from_bytes(&payment_details.peer_id_bytes) - .map_err(|_| ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)))?; - - debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment:?}"); - - match upload_item { - UploadItem::Chunk { address: _, chunk } => { - let chunk = match chunk { - Either::Left(chunk) => chunk, - Either::Right(path) => { - let bytes = std::fs::read(path)?; - Chunk::new(Bytes::from(bytes)) - } - }; - - trace!("Client upload started for chunk: {xorname:?}"); - client - .store_chunk(chunk, payee, payment, verify_store, Some(retry_strategy)) - .await?; - trace!("Client upload completed for chunk: {xorname:?}"); - } - UploadItem::Register { address: _, reg } => { - reg.publish_register(Some((payment, payee)), verify_store) - .await?; - trace!("Client upload completed for register: {xorname:?}"); - } - } - // remove the payment if the upload is successful. - wallet_api.remove_payment_transaction(&xorname); - - Ok(()) - } - - // ====== Misc ====== - - fn emit_upload_event(&mut self, event: UploadEvent) { - if let Some(sender) = self.event_sender.as_ref() { - let sender_clone = sender.clone(); - let _handle = tokio::spawn(async move { - if let Err(err) = sender_clone.send(event).await { - error!("Error emitting upload event: {err:?}"); - } - }); - } else if !self.logged_event_sender_absence { - info!("FilesUpload upload event sender is not set. Use get_upload_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - } - - /// If we have already made initial + max_repayments_allowed, then we should error out. - // separate function as it is used in test. - pub(super) fn have_we_reached_max_repayments( - payments_made: usize, - max_repayments_allowed: usize, - ) -> bool { - // if max_repayments_allowed = 1, then we have reached capacity = true if 2 payments have been made. i.e., - // i.e., 1 initial + 1 repayment. - payments_made > max_repayments_allowed - } - - /// Create a new WalletClient for a given root directory. - fn load_wallet_client(client: Client, root_dir: &Path) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - Ok(WalletClient::new(client, wallet)) - } - - // Used to debug a list of xornames. - fn hash_of_xornames<'a>(xornames: impl Iterator) -> String { - let mut output = [0; 32]; - let mut hasher = Sha3::v256(); - for xorname in xornames { - hasher.update(xorname); - } - hasher.finalize(&mut output); - - hex::encode(output) - } -} diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs deleted file mode 100644 index 9a32382142..0000000000 --- a/sn_client/src/wallet.rs +++ /dev/null @@ -1,1175 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::Error; - -use super::{error::Result, Client}; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::{future::join_all, TryFutureExt}; -use libp2p::PeerId; -use sn_networking::target_arch::Instant; -use sn_networking::{GetRecordError, PayeeQuote}; -use sn_protocol::NetworkAddress; -use sn_transfers::{ - CashNote, HotWallet, MainPubkey, NanoTokens, Payment, PaymentQuote, SignedSpend, SpendAddress, - Transfer, WalletError, WalletResult, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - iter::Iterator, -}; -use tokio::{ - task::JoinSet, - time::{sleep, Duration}, -}; -use xor_name::XorName; - -const MAX_RESEND_PENDING_TX_ATTEMPTS: usize = 10; - -/// A wallet client can be used to send and receive tokens to and from other wallets. -pub struct WalletClient { - client: Client, - wallet: HotWallet, -} - -/// The result of the payment made for a set of Content Addresses -pub struct StoragePaymentResult { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub skipped_chunks: Vec, -} - -impl WalletClient { - /// Create a new wallet client. - /// - /// # Arguments - /// * `client` - A instance of the struct [`sn_client::Client`](Client) - /// * `wallet` - An instance of the struct [`HotWallet`] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// # Ok(()) - /// # } - /// ``` - pub fn new(client: Client, wallet: HotWallet) -> Self { - Self { client, wallet } - } - - /// Stores the wallet to the local wallet directory. - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// wallet_client.store_local_wallet()?; - /// # Ok(()) - /// # } - pub fn store_local_wallet(&mut self) -> WalletResult<()> { - self.wallet.deposit_and_store_to_disk(&vec![]) - } - - /// Display the wallet balance - /// # Example - /// ```no_run - /// // Display the wallet balance in the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// println!("{}" ,wallet_client.balance()); - /// # Ok(()) - /// # } - pub fn balance(&self) -> NanoTokens { - self.wallet.balance() - } - - /// See if any unconfirmed transactions exist. - /// # Example - /// ```no_run - /// // Print unconfirmed spends to the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// if wallet_client.unconfirmed_spend_requests_exist() {println!("Unconfirmed spends exist!")}; - /// # Ok(()) - /// # } - pub fn unconfirmed_spend_requests_exist(&self) -> bool { - self.wallet.unconfirmed_spend_requests_exist() - } - - /// Returns the most recent cached Payment for a provided NetworkAddress. This function does not check if the - /// quote has expired or not. Use get_non_expired_payment_for_addr if you want to get a non expired one. - /// - /// If multiple payments have been made to the same address, then we pick the last one as it is the most recent. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.get_recent_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_recent_payment_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult<(Payment, PeerId)> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_detail = self.wallet.api().get_recent_payment(&xorname)?; - - let payment = payment_detail.to_payment(); - trace!("Payment retrieved for {xorname:?} from wallet: {payment:?}"); - let peer_id = PeerId::from_bytes(&payment_detail.peer_id_bytes) - .map_err(|_| WalletError::NoPaymentForAddress(xorname))?; - - Ok((payment, peer_id)) - } - - /// Returns the all cached Payment for a provided NetworkAddress. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payments = wallet_client.get_all_payments_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_all_payments_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_details = self.wallet.api().get_all_payments(&xorname)?; - - let payments = payment_details - .into_iter() - .map(|details| { - let payment = details.to_payment(); - - match PeerId::from_bytes(&details.peer_id_bytes) { - Ok(peer_id) => Ok((payment, peer_id)), - Err(_) => Err(WalletError::NoPaymentForAddress(xorname)), - } - }) - .collect::>>()?; - - trace!( - "{} Payment retrieved for {xorname:?} from wallet: {payments:?}", - payments.len() - ); - - Ok(payments) - } - - /// Remove the payment for a given network address from disk. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Removing a payment address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.remove_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn remove_payment_for_addr(&self, address: &NetworkAddress) -> WalletResult<()> { - match &address.as_xorname() { - Some(xorname) => { - self.wallet.api().remove_payment_transaction(xorname); - Ok(()) - } - None => Err(WalletError::InvalidAddressType), - } - } - - /// Send tokens to another wallet. Can also verify the store has been successful. - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Arguments - /// * `amount` - [`NanoTokens`]. - /// * `to` - [`MainPubkey`]. - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use sn_transfers::NanoTokens; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let nano = NanoTokens::from(10); - /// let main_pub_key = MainSecretKey::random().main_pubkey(); - /// let payment = wallet_client.send_cash_note(nano,main_pub_key, true); - /// # Ok(()) - /// # } - /// ``` - pub async fn send_cash_note( - &mut self, - amount: NanoTokens, - to: MainPubkey, - verify_store: bool, - ) -> WalletResult { - let created_cash_notes = self.wallet.local_send(vec![(amount, to)], None)?; - - // send to network - if let Err(error) = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - { - return Err(WalletError::CouldNotSendMoney(format!( - "The transfer was not successfully registered in the network: {error:?}" - ))); - } else { - // clear unconfirmed txs - self.wallet.clear_confirmed_spend_requests(); - } - - // return the first CashNote (assuming there is only one because we only sent to one recipient) - match &created_cash_notes[..] { - [cashnote] => Ok(cashnote.clone()), - [_multiple, ..] => Err(WalletError::CouldNotSendMoney( - "Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG." - .into(), - )), - [] => Err(WalletError::CouldNotSendMoney( - "No CashNotes were returned from the wallet.".into(), - )), - } - } - - /// Get storecost from the network - /// Returns the MainPubkey of the node to pay and the price in NanoTokens - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use libp2p_identity::PeerId; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// # let mut rng = rand::thread_rng(); - /// # let xor_name = XorName::random(&mut rng); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let mut wallet_client = WalletClient::new(client, wallet); - /// // Use get_store_cost_at_address(network_address) to get a storecost from the network. - /// let cost = wallet_client.get_store_cost_at_address(network_address).await?.2.cost.as_nano(); - /// # Ok(()) - /// # } - pub async fn get_store_cost_at_address( - &self, - address: NetworkAddress, - ) -> WalletResult { - self.client - .network - .get_store_costs_from_network(address, vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())) - } - - /// Send tokens to nodes closest to the data we want to make storage payment for. Runs mandatory verification. - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let mut rng = rand::thread_rng(); - /// let xor_name = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xor_name, client.signer_pk()); - /// let net_addr = NetworkAddress::from_register_address(address); - /// - /// // Paying for a random Register Address - /// let cost = wallet_client.pay_for_storage(std::iter::once(net_addr)).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_storage( - &mut self, - content_addrs: impl Iterator, - ) -> WalletResult { - let verify_store = true; - let c: Vec<_> = content_addrs.collect(); - // Using default ExponentialBackoff doesn't make sense, - // as it will just fail after the first payment failure. - let mut backoff = ExponentialBackoff::default(); - let mut last_err = "No retries".to_string(); - - while let Some(delay) = backoff.next_backoff() { - trace!("Paying for storage (w/backoff retries) for: {:?}", c); - match self - .pay_for_storage_once(c.clone().into_iter(), verify_store) - .await - { - Ok(payment_result) => return Ok(payment_result), - Err(WalletError::CouldNotSendMoney(err)) => { - warn!("Attempt to pay for data failed: {err:?}"); - last_err = err; - sleep(delay).await; - } - Err(err) => return Err(err), - } - } - Err(WalletError::CouldNotSendMoney(last_err)) - } - - /// Existing chunks will have the store cost set to Zero. - /// The payment procedure shall be skipped, and the chunk upload as well. - /// Hence the list of existing chunks will be returned. - async fn pay_for_storage_once( - &mut self, - content_addrs: impl Iterator, - verify_store: bool, - ) -> WalletResult { - // get store cost from network in parallel - let mut tasks = JoinSet::new(); - for content_addr in content_addrs { - let client = self.client.clone(); - tasks.spawn(async move { - let cost = client - .network - .get_store_costs_from_network(content_addr.clone(), vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())); - - debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - (content_addr, cost) - }); - } - debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if let Some(xorname) = content_addr.as_xorname() { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(xorname); - debug!("Skipped existing chunk {content_addr:?}"); - } else { - debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(xorname, (cost.1, cost.2, cost.0.to_bytes())); - } - } else { - warn!("Cannot get store cost for a content that is not a data type: {content_addr:?}"); - } - } - Ok((content_addr, Err(err))) => { - warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(WalletError::CouldNotSendMoney(format!( - "Storecost get task failed: {e:?}" - ))); - } - } - } - info!("Storecosts retrieved for all the provided content addrs"); - - // pay for records - let (storage_cost, royalty_fees) = self.pay_for_records(&cost_map, verify_store).await?; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) - } - - /// Send tokens to nodes closest to the data that we want to make storage payments for. - /// # Returns: - /// - /// * [WalletResult]<([NanoTokens], [NanoTokens])> - /// - /// This return contains the amount paid for storage. Including the network royalties fee paid. - /// - /// # Params: - /// * cost_map - [BTreeMap]([XorName],([MainPubkey], [PaymentQuote])) - /// * verify_store - This optional check can verify if the store has been successful. - /// - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeMap; - /// use xor_name::XorName; - /// use sn_transfers::{MainPubkey, Payment, PaymentQuote}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let mut cost_map:BTreeMap)> = BTreeMap::new(); - /// wallet_client.pay_for_records(&cost_map,true).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - verify_store: bool, - ) -> WalletResult<(NanoTokens, NanoTokens)> { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transaction_until_success(verify_store) - .await?; - let start = Instant::now(); - let total_cost = self.wallet.local_send_storage_payment(cost_map)?; - - trace!( - "local_send_storage_payment of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // send to network - trace!("Sending storage payment transfer to the network"); - let start = Instant::now(); - let spend_attempt_result = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await; - - trace!( - "send_spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - let start = Instant::now(); - if let Err(error) = spend_attempt_result { - warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); - - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let WalletError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - warn!("Removing double spends CashNote from wallet: {cash_note_key:?}"); - self.wallet.mark_notes_as_spent([cash_note_key]); - self.wallet.clear_specific_spend_request(*cash_note_key); - } - } - - self.wallet.store_unconfirmed_spend_requests()?; - - return Err(WalletError::CouldNotSendMoney(format!( - "The storage payment transfer was not successfully registered in the network: {error:?}" - ))); - } else { - info!("Spend has completed: {:?}", spend_attempt_result); - self.wallet.clear_confirmed_spend_requests(); - } - trace!( - "clear up spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - Ok(total_cost) - } - - /// Resend failed transactions. This can optionally verify the store has been successful. - /// This will attempt to GET the cash_note from the network. - async fn resend_pending_transactions(&mut self, verify_store: bool) { - if self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - .is_ok() - { - self.wallet.clear_confirmed_spend_requests(); - } - } - - /// Resend previous confirmed spend. - async fn resend_confirmed_spend(&mut self, spend_addr: &SpendAddress) { - if let Ok(Some(spend)) = self.wallet.get_confirmed_spend(*spend_addr) { - let spend_vec = vec![spend]; - let _ = self.client.send_spends(spend_vec.iter(), true).await; - } else { - warn!("Cann't find confirmed spend of {spend_addr:?}"); - println!("Cann't find confirmed spend of {spend_addr:?}"); - } - } - - /// This is a blocking loop in cas there is pending transaction. - /// It will keeps resending the unconfirmed spend infinitely but explictly. - /// Function will only return on success (all unconfirmed spend uploaded), - /// or user chose to manualy, but safely, terminate the procedure. - pub async fn resend_pending_transaction_blocking_loop(&mut self) -> WalletResult<()> { - if !self.wallet.unconfirmed_spend_requests_exist() { - return Ok(()); - } - // Wallet shall be all clear to progress forward. - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions dected, sending again after 30 seconds..."); - println!("Pre-Unconfirmed transactions exist, sending again after 30 seconds..."); - println!("It's safe to terminate the work, but do remember to retain the unconfirmed_spend file during wallet update."); - println!("Otherwise, you are in risk to make the wallet corrupted."); - // Longer wait as the network will already in heavy duty situation, - // hence try not to give it further burden with short intervaled re-puts. - sleep(Duration::from_secs(30)).await; - - // Before re-sending, take a peek of un-confirmed spends first - // Helping user having a better view of what's happening. - let spends_to_check: BTreeMap> = self - .wallet - .unconfirmed_spend_requests() - .iter() - .map(|s| { - info!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - - let parent_spends: BTreeSet<_> = s - .spend - .ancestors - .iter() - .map(SpendAddress::from_unique_pubkey) - .collect(); - (s.address(), parent_spends) - }) - .collect(); - let unconfirmed_spends_addrs: Vec<_> = spends_to_check.keys().copied().collect(); - - for addr in unconfirmed_spends_addrs { - match self.client.peek_a_spend(addr).await { - Ok(_) => { - info!("Unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!( - "Unconfirmed Spend {addr:?} is find at least one copy in the network !" - ); - } - Err(err) => { - info!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // For those that still not even have one copy in network yet - // Check it's parent's status in network - if let Some(parent_spends) = spends_to_check.get(&addr) { - for parent_addr in parent_spends.iter() { - match self.client.peek_a_spend(*parent_addr).await { - Ok(s) => { - info!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - info!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - } - Err(err) => { - warn!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // In theory, it shall be traversed back to re-send all ancestors. - // However, in practical, only track back one generation is enough. - self.resend_confirmed_spend(parent_addr).await; - } - } - } - } - } - } - } - - self.resend_pending_transactions(true).await; - } - info!("Wallet is now all cleared, OK to progress further."); - println!("Wallet is now all cleared, OK to progress further."); - eprintln!("WARNING: Closing the client now could corrupt the wallet !"); - Ok(()) - } - - /// Try resending failed transactions multiple times until it succeeds or until we reach max attempts. - async fn resend_pending_transaction_until_success( - &mut self, - verify_store: bool, - ) -> WalletResult<()> { - let mut did_error = false; - // Wallet shall be all clear to progress forward. - let mut attempts = 0; - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions exist, sending again after 1 second..."); - sleep(Duration::from_secs(1)).await; - self.resend_pending_transactions(verify_store).await; - - if attempts > MAX_RESEND_PENDING_TX_ATTEMPTS { - // save the error state, but break out of the loop so we can save - did_error = true; - break; - } - - attempts += 1; - } - - if did_error { - error!("Wallet has pre-unconfirmed transactions, can't progress further."); - Err(WalletError::UnconfirmedTxAfterRetries) - } else { - Ok(()) - } - } - - /// Returns the wallet: - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.into_wallet(); - /// // Display the wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn into_wallet(self) -> HotWallet { - self.wallet - } - - /// Returns a mutable wallet instance - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.mut_wallet(); - /// // Display the mutable wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn mut_wallet(&mut self) -> &mut HotWallet { - &mut self.wallet - } -} - -impl Client { - /// Send spend requests to the network. - /// This can optionally verify the spends have been correctly stored before returning - /// - /// # Arguments - /// * spend_requests - [Iterator]<[SignedSpend]> - /// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// // An example of sending storage payment transfers over the network with validation - /// client.send_spends(wallet.unconfirmed_spend_requests().iter(),true).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn send_spends( - &self, - spend_requests: impl Iterator, - verify_store: bool, - ) -> WalletResult<()> { - let mut tasks = Vec::new(); - - // send spends to the network in parralel - for spend_request in spend_requests { - trace!( - "sending spend request to the network: {:?}: {spend_request:#?}", - spend_request.unique_pubkey() - ); - - let the_task = async move { - let cash_note_key = spend_request.unique_pubkey(); - let result = self - .network_store_spend(spend_request.clone(), verify_store) - .await; - - (cash_note_key, result) - }; - tasks.push(the_task); - } - - // wait for all the tasks to complete and gather the errors - let mut errors = Vec::new(); - let mut double_spent_keys = BTreeSet::new(); - for (spend_key, spend_attempt_result) in join_all(tasks).await { - match spend_attempt_result { - Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordDoesNotMatch(_), - ))) - | Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::SplitRecord { .. }, - ))) => { - warn!( - "Double spend detected while trying to spend: {:?}", - spend_key - ); - double_spent_keys.insert(*spend_key); - } - Err(e) => { - warn!("Spend request errored out when sent to the network {spend_key:?}: {e}"); - errors.push((spend_key, e)); - } - Ok(()) => { - trace!("Spend request was successfully sent to the network: {spend_key:?}"); - } - } - } - - // report errors accordingly - // double spend errors in priority as they should be dealt with by the wallet - if !double_spent_keys.is_empty() { - return Err(WalletError::DoubleSpendAttemptedForCashNotes( - double_spent_keys, - )); - } - if !errors.is_empty() { - let mut err_report = "Failed to send spend requests to the network:".to_string(); - for (spend_key, e) in &errors { - warn!("Failed to send spend request to the network: {spend_key:?}: {e}"); - err_report.push_str(&format!("{spend_key:?}: {e}")); - } - return Err(WalletError::CouldNotSendMoney(err_report)); - } - - Ok(()) - } - - /// Receive a Transfer, verify and redeem CashNotes from the Network. - /// - /// # Arguments - /// * transfer: &[Transfer] - Borrowed value for [Transfer] - /// * wallet: &[HotWallet] - Borrowed value for [HotWallet] - /// - /// # Return Value - /// * [WalletResult]<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("13abc").unwrap(); - /// // An example for using client.receive() for cashNotes - /// let cash_notes = match client.receive(&transfer, &wallet).await { - /// Ok(cash_notes) => cash_notes, - /// Err(err) => { - /// println!("Failed to verify and redeem transfer: {err:?}"); - /// error!("Failed to verify and redeem transfer: {err:?}"); - /// return Err(err.into()); - /// } - /// }; - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn receive( - &self, - transfer: &Transfer, - wallet: &HotWallet, - ) -> WalletResult> { - let cashnotes = self - .network - .verify_and_unpack_transfer(transfer, wallet) - .map_err(|e| WalletError::CouldNotReceiveMoney(format!("{e:?}"))) - .await?; - let valuable_cashnotes = self.filter_out_already_spend_cash_notes(cashnotes).await?; - Ok(valuable_cashnotes) - } - - /// Check that the redeemed CashNotes are not already spent - async fn filter_out_already_spend_cash_notes( - &self, - mut cash_notes: Vec, - ) -> WalletResult> { - trace!("Validating CashNotes are not already spent"); - let mut tasks = JoinSet::new(); - for cn in &cash_notes { - let pk = cn.unique_pubkey(); - let addr = SpendAddress::from_unique_pubkey(&pk); - let self_clone = self.network.clone(); - let _ = tasks.spawn(async move { self_clone.get_spend(addr).await }); - } - while let Some(result) = tasks.join_next().await { - let res = result.map_err(|e| WalletError::FailedToGetSpend(format!("{e}")))?; - match res { - // if we get a RecordNotFound, it means the CashNote is not spent, which is good - Err(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordNotFound, - )) => (), - // if we get a spend, it means the CashNote is already spent - Ok(s) => { - warn!( - "CashNoteRedemption contains a CashNote that is already spent, skipping it: {:?}", - s.unique_pubkey() - ); - cash_notes.retain(|c| &c.unique_pubkey() != s.unique_pubkey()); - } - // report all other errors - Err(e) => return Err(WalletError::FailedToGetSpend(format!("{e}"))), - } - } - - if cash_notes.is_empty() { - return Err(WalletError::AllRedeemedCashnotesSpent); - } - - Ok(cash_notes) - } - - /// Verify that the spends referred to (in the CashNote) exist on the network. - /// - /// # Arguments - /// * cash_note - [CashNote] - /// - /// # Return value - /// [WalletResult] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("").unwrap(); - /// let cash_notes = client.receive(&transfer, &wallet).await?; - /// // Verification: - /// for cash_note in cash_notes { - /// println!("{:?}" , client.verify_cashnote(&cash_note).await.unwrap()); - /// } - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn verify_cashnote(&self, cash_note: &CashNote) -> WalletResult<()> { - let address = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - - // We need to get all the spends in the cash_note from the network, - // and compare them to the spends in the cash_note, to know if the - // transfer is considered valid in the network. - let mut tasks = Vec::new(); - - info!( - "parent spends for cn; {address:?}: {:?}", - &cash_note.parent_spends.len() - ); - - for spend in &cash_note.parent_spends { - let address = SpendAddress::from_unique_pubkey(spend.unique_pubkey()); - info!( - "Getting parent spend for cn {address:?} pubkey {:?} from network at {address:?}", - spend.unique_pubkey() - ); - tasks.push(self.get_spend_from_network(address)); - } - - let mut received_spends = std::collections::BTreeSet::new(); - for result in join_all(tasks).await { - let network_valid_spend = match result { - Ok(spend) => Ok(spend), - Err(error) => match error { - Error::Network(sn_networking::NetworkError::DoubleSpendAttempt(spends)) => { - warn!("BurntSpend found with {spends:?}"); - Err(WalletError::BurntSpend) - } - err => Err(WalletError::CouldNotVerifyTransfer(format!("{err:?}"))), - }, - }?; - - let _ = received_spends.insert(network_valid_spend); - } - - // If all the spends in the cash_note are the same as the ones in the network, - // we have successfully verified that the cash_note is globally recognised and therefor valid. - if received_spends == cash_note.parent_spends { - return Ok(()); - } - - warn!( - "Unexpected parent spends found in CashNote verification at {:?}: {received_spends:?}.", - address - ); - Err(WalletError::UnexpectedParentSpends(address)) - } -} - -/// Use the client to send a CashNote from a local wallet to an address. -/// This marks the spent CashNote as spent in the Network -/// -/// # Arguments -/// * from - [HotWallet] -/// * amount - [NanoTokens] -/// * to - [MainPubkey] -/// * client - [Client] -/// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. -/// -/// # Example -/// ```no_run -/// use sn_client::{Client, WalletClient, Error}; -/// # use tempfile::TempDir; -/// use bls::SecretKey; -/// use sn_transfers::{HotWallet, MainSecretKey}; -/// # #[tokio::main] -/// # async fn main() -> Result<(),Error>{ -/// use tracing::error; -/// use sn_client::send; -/// use sn_transfers::Transfer; -/// let client = Client::new(SecretKey::random(), None, None, None).await?; -/// # let tmp_path = TempDir::new()?.path().to_owned(); -/// let mut first_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let mut second_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let tokens = send( -/// first_wallet, // From -/// second_wallet.balance(), // To -/// second_wallet.address(), // Amount -/// &client, // Client -/// true, // Verification -/// ).await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn send( - from: HotWallet, - amount: NanoTokens, - to: MainPubkey, - client: &Client, - verify_store: bool, -) -> Result { - if amount.is_zero() { - return Err(Error::AmountIsZero); - } - - let mut wallet_client = WalletClient::new(client.clone(), from); - - if let Err(err) = wallet_client - .resend_pending_transaction_until_success(verify_store) - .await - { - println!("Wallet has pre-unconfirmed transactions, can't progress further."); - warn!("Wallet has pre-unconfirmed transactions, can't progress further."); - return Err(err.into()); - } - - let new_cash_note = wallet_client - .send_cash_note(amount, to, verify_store) - .await - .map_err(|err| { - error!("Could not send cash note, err: {err:?}"); - err - })?; - - wallet_client - .resend_pending_transaction_until_success(verify_store) - .await?; - - wallet_client - .into_wallet() - .deposit_and_store_to_disk(&vec![new_cash_note.clone()])?; - - Ok(new_cash_note) -} diff --git a/sn_client/tests/folders_api.rs b/sn_client/tests/folders_api.rs deleted file mode 100644 index 8340c3ad32..0000000000 --- a/sn_client/tests/folders_api.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -// All tests require a network running so Clients can be instantiated. - -use bls::SecretKey; -use eyre::Result; -use sn_client::test_utils::{ - get_funded_wallet, get_new_client, pay_for_storage, random_file_chunk, -}; -use sn_client::{FolderEntry, FoldersApi, Metadata}; -use sn_protocol::{storage::ChunkAddress, NetworkAddress}; -use sn_registers::{EntryHash, RegisterAddress}; -use xor_name::XorName; - -#[tokio::test] -async fn test_folder_basics() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let mut rng = rand::thread_rng(); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let address_subdir = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, Some(address))?; - - let file_chunk = random_file_chunk(); - - let (file_entry_hash, file_meta_xorname, file_metadata) = - folders_api.add_file("file.txt".into(), file_chunk.clone(), None)?; - assert_eq!( - file_metadata, - Metadata { - name: "file.txt".to_string(), - content: FolderEntry::File(file_chunk) - } - ); - - let (subdir_entry_hash, subdir_meta_xorname, subdir_metadata) = - folders_api.add_folder("subdir".into(), address_subdir, None)?; - assert_eq!( - subdir_metadata, - Metadata { - name: "subdir".to_string(), - content: FolderEntry::Folder(address_subdir) - } - ); - - assert_eq!(folders_api.address(), &address); - assert_eq!( - folders_api.as_net_addr(), - NetworkAddress::RegisterAddress(address) - ); - assert_eq!( - folders_api.meta_addrs_to_pay(), - vec![ - NetworkAddress::ChunkAddress(ChunkAddress::new(file_meta_xorname)), - NetworkAddress::ChunkAddress(ChunkAddress::new(subdir_meta_xorname)) - ] - .into_iter() - .collect() - ); - - assert!(folders_api.contains(&file_entry_hash)); - assert!(folders_api.contains(&subdir_entry_hash)); - assert!(!folders_api.contains(&EntryHash::default())); - - assert_eq!( - folders_api.find_by_name("file.txt"), - Some((&file_meta_xorname, &file_metadata)) - ); - assert_eq!( - folders_api.find_by_name("subdir"), - Some((&subdir_meta_xorname, &subdir_metadata)) - ); - assert!(folders_api.find_by_name("inexistent").is_none()); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file_entry_hash, (file_meta_xorname, file_metadata)), - (subdir_entry_hash, (subdir_meta_xorname, subdir_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_remove_replace_entries() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - let file2_chunk = random_file_chunk(); - let file3_chunk = random_file_chunk(); - let file4_chunk = random_file_chunk(); - - let (file1_entry_hash, _, _) = - folders_api.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - folders_api.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - assert_eq!(folders_api.entries().await?.len(), 2); - assert!(folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_some()); - assert!(folders_api.find_by_name("file2.txt").is_some()); - - // let's now test removing file1.txt - folders_api.remove_item(file1_entry_hash)?; - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - // now we test replacing file2.txt with file3.txt - let (file3_entry_hash, file3_meta_xorname, file3_metadata) = - folders_api.replace_file(file2_entry_hash, "file3.txt".into(), file3_chunk, None)?; - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![( - file3_entry_hash, - (file3_meta_xorname, file3_metadata.clone()) - ),] - .into_iter() - .collect() - ); - - // let's add file4.txt, and check that final state is correct - let (file4_entry_hash, file4_meta_xorname, file4_metadata) = - folders_api.add_file("file4.txt".into(), file4_chunk, None)?; - - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.contains(&file4_entry_hash)); - - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.find_by_name("file4.txt"), - Some((&file4_meta_xorname, &file4_metadata)) - ); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file3_entry_hash, (file3_meta_xorname, file3_metadata)), - (file4_entry_hash, (file4_meta_xorname, file4_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_retrieve() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_retrieve", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut folder = FoldersApi::new(client.clone(), wallet_dir, None)?; - let mut subfolder = FoldersApi::new(client.clone(), wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - - let (file1_entry_hash, file1_meta_xorname, file1_metadata) = - folder.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (subfolder_entry_hash, subfolder_meta_xorname, subfolder_metadata) = - folder.add_folder("subfolder".into(), *subfolder.address(), None)?; - - let file2_chunk = random_file_chunk(); - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - subfolder.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder.as_net_addr(), subfolder.as_net_addr()]; - addrs2pay.extend(folder.meta_addrs_to_pay()); - addrs2pay.extend(subfolder.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder.sync(Default::default()).await?; - subfolder.sync(Default::default()).await?; - - let mut retrieved_folder = - FoldersApi::retrieve(client.clone(), wallet_dir, *folder.address()).await?; - let mut retrieved_subfolder = - FoldersApi::retrieve(client, wallet_dir, *subfolder.address()).await?; - - assert_eq!(retrieved_folder.entries().await?.len(), 2); - assert!(retrieved_folder.contains(&file1_entry_hash)); - assert!(retrieved_folder.contains(&subfolder_entry_hash)); - assert_eq!( - retrieved_folder.find_by_name("file1.txt"), - Some((&file1_meta_xorname, &file1_metadata)) - ); - assert_eq!( - retrieved_folder.find_by_name("subfolder"), - Some((&subfolder_meta_xorname, &subfolder_metadata)) - ); - - assert_eq!(retrieved_subfolder.entries().await?.len(), 1); - assert!(retrieved_subfolder.contains(&file2_entry_hash)); - assert_eq!( - retrieved_subfolder.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - - assert_eq!( - retrieved_folder.entries().await?, - vec![ - (file1_entry_hash, (file1_meta_xorname, file1_metadata)), - ( - subfolder_entry_hash, - (subfolder_meta_xorname, subfolder_metadata) - ), - ] - .into_iter() - .collect() - ); - assert_eq!( - retrieved_subfolder.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_merge_changes() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_merge_changes", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk.clone()).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut rng = rand::thread_rng(); - let owner_pk = owner_sk.public_key(); - let folder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let subfolder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - - let mut folder_a = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_a = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_a1_chunk = random_file_chunk(); - let file_a2_chunk = random_file_chunk(); - - let (file_a1_entry_hash, file_a1_meta_xorname, file_a1_metadata) = - folder_a.add_file("fileA1.txt".into(), file_a1_chunk.clone(), None)?; - let (subfolder_a_entry_hash, subfolder_a_meta_xorname, subfolder_a_metadata) = - folder_a.add_folder("subfolderA".into(), *subfolder_a.address(), None)?; - let (file_a2_entry_hash, file_a2_meta_xorname, file_a2_metadata) = - subfolder_a.add_file("fileA2.txt".into(), file_a2_chunk.clone(), None)?; - - let mut folder_b = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_b = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_b1_chunk = random_file_chunk(); - let file_b2_chunk = random_file_chunk(); - - let (file_b1_entry_hash, file_b1_meta_xorname, file_b1_metadata) = - folder_b.add_file("fileB1.txt".into(), file_b1_chunk.clone(), None)?; - let (subfolder_b_entry_hash, subfolder_b_meta_xorname, subfolder_b_metadata) = - folder_b.add_folder("subfolderB".into(), *subfolder_b.address(), None)?; - let (file_b2_entry_hash, file_b2_meta_xorname, file_b2_metadata) = - subfolder_b.add_file("fileB2.txt".into(), file_b2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder_a.as_net_addr(), subfolder_a.as_net_addr()]; - addrs2pay.extend(folder_a.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_a.meta_addrs_to_pay()); - addrs2pay.extend(folder_b.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_b.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - folder_b.sync(Default::default()).await?; - subfolder_b.sync(Default::default()).await?; - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - - let folder_a_entries = folder_a.entries().await?; - let folder_b_entries = folder_b.entries().await?; - let subfolder_a_entries = subfolder_a.entries().await?; - let subfolder_b_entries = subfolder_b.entries().await?; - - assert_eq!(folder_a_entries.len(), 4); - assert_eq!(folder_b_entries.len(), 4); - assert_eq!(subfolder_a_entries.len(), 2); - assert_eq!(subfolder_b_entries.len(), 2); - - assert!(folder_a.contains(&file_a1_entry_hash)); - assert!(folder_a.contains(&file_b1_entry_hash)); - assert!(folder_a.contains(&subfolder_a_entry_hash)); - assert!(folder_a.contains(&subfolder_b_entry_hash)); - assert!(subfolder_a.contains(&file_a2_entry_hash)); - assert!(subfolder_a.contains(&file_b2_entry_hash)); - - assert!(folder_b.contains(&file_a1_entry_hash)); - assert!(folder_b.contains(&file_b1_entry_hash)); - assert!(folder_b.contains(&subfolder_a_entry_hash)); - assert!(folder_b.contains(&subfolder_b_entry_hash)); - assert!(subfolder_b.contains(&file_a2_entry_hash)); - assert!(subfolder_b.contains(&file_b2_entry_hash)); - - assert_eq!( - folder_a.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!( - folder_b.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!(folder_a_entries, folder_b_entries); - assert_eq!( - folder_a_entries, - vec![ - (file_a1_entry_hash, (file_a1_meta_xorname, file_a1_metadata)), - (file_b1_entry_hash, (file_b1_meta_xorname, file_b1_metadata)), - ( - subfolder_a_entry_hash, - (subfolder_a_meta_xorname, subfolder_a_metadata) - ), - ( - subfolder_b_entry_hash, - (subfolder_b_meta_xorname, subfolder_b_metadata) - ), - ] - .into_iter() - .collect() - ); - - assert_eq!( - subfolder_a.find_by_name("fileA2.txt"), - Some((&file_a2_meta_xorname, &file_a2_metadata)) - ); - assert_eq!( - subfolder_a.find_by_name("fileB2.txt"), - Some((&file_b2_meta_xorname, &file_b2_metadata)) - ); - - assert_eq!(subfolder_a_entries, subfolder_b_entries); - assert_eq!( - subfolder_a_entries, - vec![ - (file_a2_entry_hash, (file_a2_meta_xorname, file_a2_metadata)), - (file_b2_entry_hash, (file_b2_meta_xorname, file_b2_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} diff --git a/sn_faucet/CHANGELOG.md b/sn_faucet/CHANGELOG.md deleted file mode 100644 index e8b9817648..0000000000 --- a/sn_faucet/CHANGELOG.md +++ /dev/null @@ -1,1355 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.4.27](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.26...sn_faucet-v0.4.27) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(release)* sn_auditor-v0.1.22/sn_faucet-v0.4.24/node-launchpad-v0.3.4 - -## [0.4.26](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.25...sn_faucet-v0.4.26) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.25](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.24...sn_faucet-v0.4.25) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_cli - -## [0.4.24](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.23...sn_faucet-v0.4.24) - 2024-06-04 - -### Other -- remove gifting and start initial data uploads - -## [0.4.23](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.22...sn_faucet-v0.4.23) - 2024-06-04 - -### Added -- *(faucet_server)* download and upload gutenberger book part by part - -## [0.4.22](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.21...sn_faucet-v0.4.22) - 2024-06-03 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.20](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.19...sn_faucet-v0.4.20) - 2024-06-03 - -### Added -- *(faucet_server)* upload sample files and print head_addresses -- *(faucet_server)* download some iso files during startup - -### Other -- no openssl dep for faucet - -## [0.4.19](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.18...sn_faucet-v0.4.19) - 2024-05-24 - -### Added -- *(faucet)* allow gifting by default -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- enable default features during faucet release -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(deps)* bump dependencies - -## [0.4.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.17...sn_faucet-v0.4.18) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.4.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.16...sn_faucet-v0.4.17) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.4.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.15...sn_faucet-v0.4.16) - 2024-05-09 - -### Other -- updated the following local packages: sn_client - -## [0.4.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14...sn_faucet-v0.4.15) - 2024-05-08 - -### Other -- update Cargo.lock dependencies - -## [0.4.14-alpha.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14-alpha.0...sn_faucet-v0.4.14-alpha.1) - 2024-05-07 - -### Added -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(deps)* bump dependencies - -## [0.4.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.2...sn_faucet-v0.4.3) - 2024-03-28 - -### Other -- updated the following local packages: sn_client - -## [0.4.2](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.1...sn_faucet-v0.4.2) - 2024-03-28 - -### Fixed -- *(faucet)* bind to wan - -## [0.4.1](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.0...sn_faucet-v0.4.1) - 2024-03-28 - -### Fixed -- *(faucet)* add build info) - -## [0.4.0](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.85...sn_faucet-v0.4.0) - 2024-03-27 - -### Added -- *(faucet)* rate limit based upon wallet locks -- *(faucet)* start using warp for simpler server tweaks -- only give out 1snt per req -- make logging simpler to use -- [**breaking**] remove gossip code - -## [0.3.85](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.84...sn_faucet-v0.3.85) - 2024-03-21 - -### Added -- *(log)* set log levels on the fly - -### Other -- *(release)* sn_cli-v0.89.84/sn_node-v0.104.40/sn_networking-v0.13.34/sn_service_management-v0.1.1/sn_client-v0.104.30 - -## [0.3.84](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.83...sn_faucet-v0.3.84) - 2024-03-14 - -### Other -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.3.83](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.82-alpha.0...sn_faucet-v0.3.83) - 2024-03-08 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.81](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.80...sn_faucet-v0.3.81) - 2024-03-06 - -### Added -- provide `faucet add` command -- *(faucet)* claim using signature of safe wallet - -### Other -- *(release)* sn_transfers-v0.16.1 - -## [0.3.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.79...sn_faucet-v0.3.80) - 2024-02-23 - -### Other -- update Cargo.lock dependencies - -## [0.3.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.78...sn_faucet-v0.3.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.3.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.77...sn_faucet-v0.3.78) - 2024-02-20 - -### Other -- *(release)* sn_protocol-v0.14.6/sn_node-v0.104.33/sn-node-manager-v0.3.9/sn_cli-v0.89.78/sn_client-v0.104.25/sn_networking-v0.13.27/sn_node_rpc_client-v0.4.64 - -## [0.3.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.76...sn_faucet-v0.3.77) - 2024-02-20 - -### Other -- fix distribution test check - -## [0.3.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.75...sn_faucet-v0.3.76) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.74...sn_faucet-v0.3.75) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.3.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.73...sn_faucet-v0.3.74) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.72...sn_faucet-v0.3.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.71...sn_faucet-v0.3.72) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.70...sn_faucet-v0.3.71) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.3.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.69...sn_faucet-v0.3.70) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.3.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.68...sn_faucet-v0.3.69) - 2024-02-19 - -### Other -- token_distribution against network - -## [0.3.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.67...sn_faucet-v0.3.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.66...sn_faucet-v0.3.67) - 2024-02-15 - -### Other -- *(release)* sn_protocol-v0.14.1/sn-node-manager-v0.3.1/sn_cli-v0.89.68/sn_client-v0.104.13/sn_networking-v0.13.18/sn_node-v0.104.21/sn_node_rpc_client-v0.4.54 - -## [0.3.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.65...sn_faucet-v0.3.66) - 2024-02-15 - -### Other -- token_distribution -- *(release)* sn_protocol-v0.14.0/sn-node-manager-v0.3.0/sn_cli-v0.89.67/sn_client-v0.104.12/sn_networking-v0.13.17/sn_node-v0.104.20/sn_node_rpc_client-v0.4.53 - -## [0.3.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.64...sn_faucet-v0.3.65) - 2024-02-14 - -### Other -- *(release)* sn_protocol-v0.13.0/sn-node-manager-v0.2.0/sn_cli-v0.89.65/sn_client-v0.104.10/sn_networking-v0.13.15/sn_node-v0.104.18/sn_node_rpc_client-v0.4.51 - -## [0.3.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.63...sn_faucet-v0.3.64) - 2024-02-13 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.62...sn_faucet-v0.3.63) - 2024-02-12 - -### Other -- *(faucet)* improve faucet server response for clippy -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.3.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.61...sn_faucet-v0.3.62) - 2024-02-12 - -### Other -- updated the following local packages: sn_client - -## [0.3.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.60...sn_faucet-v0.3.61) - 2024-02-12 - -### Added -- *(faucet)* api endpoint to return distribution - -## [0.3.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.59...sn_faucet-v0.3.60) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.3.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.58...sn_faucet-v0.3.59) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.3.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.57...sn_faucet-v0.3.58) - 2024-02-09 - -### Other -- update dependencies - -## [0.3.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.56...sn_faucet-v0.3.57) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.3.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.55...sn_faucet-v0.3.56) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.54...sn_faucet-v0.3.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.53...sn_faucet-v0.3.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.52...sn_faucet-v0.3.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.51...sn_faucet-v0.3.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.50...sn_faucet-v0.3.51) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.49...sn_faucet-v0.3.50) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.48...sn_faucet-v0.3.49) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.47...sn_faucet-v0.3.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.46...sn_faucet-v0.3.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.45...sn_faucet-v0.3.46) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.44...sn_faucet-v0.3.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.43...sn_faucet-v0.3.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.42...sn_faucet-v0.3.43) - 2024-02-05 - -### Added -- *(faucet)* initial distributions in background -- *(faucet)* create distributions for maid addrs - -### Other -- *(ci)* make deps optional if used only inside a feature -- *(faucet)* fix typo/clippy/fmt after rebase - -## [0.3.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.41...sn_faucet-v0.3.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.40...sn_faucet-v0.3.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.39...sn_faucet-v0.3.40) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.38...sn_faucet-v0.3.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.37...sn_faucet-v0.3.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.36...sn_faucet-v0.3.37) - 2024-02-02 - -### Added -- make token distribution an option - -### Fixed -- minreq as optional dep - -## [0.3.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.35...sn_faucet-v0.3.36) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.34...sn_faucet-v0.3.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.33...sn_faucet-v0.3.34) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.32...sn_faucet-v0.3.33) - 2024-01-31 - -### Other -- remove the `sn_testnet` crate - -## [0.3.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.31...sn_faucet-v0.3.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.30...sn_faucet-v0.3.31) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.29...sn_faucet-v0.3.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.28...sn_faucet-v0.3.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.27...sn_faucet-v0.3.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.26...sn_faucet-v0.3.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.25...sn_faucet-v0.3.26) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.24...sn_faucet-v0.3.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.23...sn_faucet-v0.3.24) - 2024-01-29 - -### Added -- *(faucet)* add Snapshot type -- *(faucet)* get pubkeys from repo not pastebin -- *(faucet)* custom types for maid values -- *(faucet)* load public keys for distribution -- *(faucet)* snapshot is a hashmap - -## [0.3.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.22...sn_faucet-v0.3.23) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.21...sn_faucet-v0.3.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.20...sn_faucet-v0.3.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.3.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.19...sn_faucet-v0.3.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.18...sn_faucet-v0.3.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.17...sn_faucet-v0.3.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.16...sn_faucet-v0.3.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.15...sn_faucet-v0.3.16) - 2024-01-25 - -### Added -- client webtransport-websys feat - -## [0.3.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.14...sn_faucet-v0.3.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.13...sn_faucet-v0.3.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.12...sn_faucet-v0.3.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.11...sn_faucet-v0.3.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.10...sn_faucet-v0.3.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.9...sn_faucet-v0.3.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.8...sn_faucet-v0.3.9) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.7...sn_faucet-v0.3.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.6...sn_faucet-v0.3.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.5...sn_faucet-v0.3.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.3.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.4...sn_faucet-v0.3.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.3...sn_faucet-v0.3.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.2...sn_faucet-v0.3.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.3.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.1...sn_faucet-v0.3.2) - 2024-01-18 - -### Added -- *(faucet)* download snapshot of maid balances - -## [0.3.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.0...sn_faucet-v0.3.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.3.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.22...sn_faucet-v0.3.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.2.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.21...sn_faucet-v0.2.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.2.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.20...sn_faucet-v0.2.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.19...sn_faucet-v0.2.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.18...sn_faucet-v0.2.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.17...sn_faucet-v0.2.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.16...sn_faucet-v0.2.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.15...sn_faucet-v0.2.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.14...sn_faucet-v0.2.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.13...sn_faucet-v0.2.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.12...sn_faucet-v0.2.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.11...sn_faucet-v0.2.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.10...sn_faucet-v0.2.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.9...sn_faucet-v0.2.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.8...sn_faucet-v0.2.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.7...sn_faucet-v0.2.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.6...sn_faucet-v0.2.7) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.5...sn_faucet-v0.2.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.4...sn_faucet-v0.2.5) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.3...sn_faucet-v0.2.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.2...sn_faucet-v0.2.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.1...sn_faucet-v0.2.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.0...sn_faucet-v0.2.1) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.126...sn_faucet-v0.2.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.1.126](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.125...sn_faucet-v0.1.126) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.125](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.124...sn_faucet-v0.1.125) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.124](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.123...sn_faucet-v0.1.124) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.123](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.122...sn_faucet-v0.1.123) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.122](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.121...sn_faucet-v0.1.122) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.121](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.120...sn_faucet-v0.1.121) - 2024-01-06 - -### Other -- update dependencies - -## [0.1.120](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.119...sn_faucet-v0.1.120) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.119](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.118...sn_faucet-v0.1.119) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.118](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.117...sn_faucet-v0.1.118) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.117](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.116...sn_faucet-v0.1.117) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.116](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.115...sn_faucet-v0.1.116) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.115](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.114...sn_faucet-v0.1.115) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.114](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.113...sn_faucet-v0.1.114) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.113](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.112...sn_faucet-v0.1.113) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.112](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.111...sn_faucet-v0.1.112) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.111](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.110...sn_faucet-v0.1.111) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.110](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.109...sn_faucet-v0.1.110) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.109](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.108...sn_faucet-v0.1.109) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.108](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.107...sn_faucet-v0.1.108) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.107](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.106...sn_faucet-v0.1.107) - 2023-12-29 - -### Added -- restart faucet_server from breaking point - -## [0.1.106](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.105...sn_faucet-v0.1.106) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.105](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.104...sn_faucet-v0.1.105) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.104](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.103...sn_faucet-v0.1.104) - 2023-12-26 - -### Other -- update dependencies - -## [0.1.103](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.102...sn_faucet-v0.1.103) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.102](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.101...sn_faucet-v0.1.102) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.101](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.100...sn_faucet-v0.1.101) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.100](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.99...sn_faucet-v0.1.100) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.99](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.98...sn_faucet-v0.1.99) - 2023-12-20 - -### Other -- update dependencies - -## [0.1.98](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.97...sn_faucet-v0.1.98) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.97](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.96...sn_faucet-v0.1.97) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.96](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.95...sn_faucet-v0.1.96) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.95](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.94...sn_faucet-v0.1.95) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.94](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.93...sn_faucet-v0.1.94) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.93](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.92...sn_faucet-v0.1.93) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.92](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.91...sn_faucet-v0.1.92) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.91](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.90...sn_faucet-v0.1.91) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.90](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.89...sn_faucet-v0.1.90) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.89](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.88...sn_faucet-v0.1.89) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.88](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.87...sn_faucet-v0.1.88) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.87](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.86...sn_faucet-v0.1.87) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.86](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.85...sn_faucet-v0.1.86) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.85](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.84...sn_faucet-v0.1.85) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.84](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.83...sn_faucet-v0.1.84) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.83](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.82...sn_faucet-v0.1.83) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.82](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.81...sn_faucet-v0.1.82) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.81](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.80...sn_faucet-v0.1.81) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.79...sn_faucet-v0.1.80) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.78...sn_faucet-v0.1.79) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.77...sn_faucet-v0.1.78) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.76...sn_faucet-v0.1.77) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.75...sn_faucet-v0.1.76) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.74...sn_faucet-v0.1.75) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.73...sn_faucet-v0.1.74) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.72...sn_faucet-v0.1.73) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.71...sn_faucet-v0.1.72) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.70...sn_faucet-v0.1.71) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.69...sn_faucet-v0.1.70) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.68...sn_faucet-v0.1.69) - 2023-12-07 - -### Other -- update dependencies - -## [0.1.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.67...sn_faucet-v0.1.68) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.66...sn_faucet-v0.1.67) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.65...sn_faucet-v0.1.66) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.64...sn_faucet-v0.1.65) - 2023-12-06 - -### Other -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.1.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.63...sn_faucet-v0.1.64) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.62...sn_faucet-v0.1.63) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.61...sn_faucet-v0.1.62) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.60...sn_faucet-v0.1.61) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.59...sn_faucet-v0.1.60) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.58...sn_faucet-v0.1.59) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.57...sn_faucet-v0.1.58) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.56...sn_faucet-v0.1.57) - 2023-12-04 - -### Added -- *(testnet)* wait till faucet server starts - -### Other -- *(faucet)* print on claim genesis error - -## [0.1.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.55...sn_faucet-v0.1.56) - 2023-12-01 - -### Other -- update dependencies - -## [0.1.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.54...sn_faucet-v0.1.55) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.53...sn_faucet-v0.1.54) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.52...sn_faucet-v0.1.53) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.51...sn_faucet-v0.1.52) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.50...sn_faucet-v0.1.51) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.49...sn_faucet-v0.1.50) - 2023-11-29 - -### Added -- add missing quic features - -## [0.1.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.48...sn_faucet-v0.1.49) - 2023-11-29 - -### Added -- verify spends through the cli - -## [0.1.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.47...sn_faucet-v0.1.48) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.46...sn_faucet-v0.1.47) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.45...sn_faucet-v0.1.46) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.44...sn_faucet-v0.1.45) - 2023-11-27 - -### Other -- update dependencies - -## [0.1.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.43...sn_faucet-v0.1.44) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.42...sn_faucet-v0.1.43) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.41...sn_faucet-v0.1.42) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.40...sn_faucet-v0.1.41) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.39...sn_faucet-v0.1.40) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.38...sn_faucet-v0.1.39) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.37...sn_faucet-v0.1.38) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.36...sn_faucet-v0.1.37) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.35...sn_faucet-v0.1.36) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.34...sn_faucet-v0.1.35) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.1.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.33...sn_faucet-v0.1.34) - 2023-11-21 - -### Other -- update dependencies - -## [0.1.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.32...sn_faucet-v0.1.33) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.31...sn_faucet-v0.1.32) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.30...sn_faucet-v0.1.31) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.29...sn_faucet-v0.1.30) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.28...sn_faucet-v0.1.29) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.27...sn_faucet-v0.1.28) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.26...sn_faucet-v0.1.27) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.25...sn_faucet-v0.1.26) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.24...sn_faucet-v0.1.25) - 2023-11-16 - -### Other -- update dependencies - -## [0.1.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.23...sn_faucet-v0.1.24) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.1.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.22...sn_faucet-v0.1.23) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.21...sn_faucet-v0.1.22) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.20...sn_faucet-v0.1.21) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.19...sn_faucet-v0.1.20) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.18...sn_faucet-v0.1.19) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.17...sn_faucet-v0.1.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.16...sn_faucet-v0.1.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.15...sn_faucet-v0.1.16) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.14...sn_faucet-v0.1.15) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.13...sn_faucet-v0.1.14) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.12...sn_faucet-v0.1.13) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.11...sn_faucet-v0.1.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.10...sn_faucet-v0.1.11) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.9...sn_faucet-v0.1.10) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.8...sn_faucet-v0.1.9) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.7...sn_faucet-v0.1.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.6...sn_faucet-v0.1.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.5...sn_faucet-v0.1.6) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.4...sn_faucet-v0.1.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.3...sn_faucet-v0.1.4) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.2...sn_faucet-v0.1.3) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.1...sn_faucet-v0.1.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.0...sn_faucet-v0.1.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.0](https://github.com/maidsafe/safe_network/releases/tag/sn_faucet-v0.1.0) - 2023-11-07 - -### Fixed -- CI errors - -### Other -- move sn_faucet to its own crate diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml deleted file mode 100644 index 9fed6af601..0000000000 --- a/sn_faucet/Cargo.toml +++ /dev/null @@ -1,58 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "The Safe Network Faucet" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_faucet" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.5.3" - -[features] -default = ["gifting"] -distribution = ["base64", "bitcoin", "minreq"] -gifting = [] -initial-data = ["reqwest", "futures"] -nightly = [] - -[[bin]] -path = "src/main.rs" -name = "faucet" - -[dependencies] -warp = "0.3" -assert_fs = "1.0.0" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", features = [ - "rand-std", - "base64", -], optional = true } -bls = { package = "blsttc", version = "8.0.1" } -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "0.6.2" -dirs-next = "~2.0.0" -hex = "0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } -serde = { version = "1.0.193", features = ["derive"] } -serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_cli = { path = "../sn_cli", version = "0.95.3" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } -tracing = { version = "~0.1.26" } -url = "2.5.0" -fs2 = "0.4.3" -reqwest = { version = "0.12.4", default-features = false, features = [ - "rustls-tls", -], optional = true } -futures = { version = "0.3.30", optional = true } - -[lints] -workspace = true diff --git a/sn_faucet/README.md b/sn_faucet/README.md deleted file mode 100644 index 041edc921d..0000000000 --- a/sn_faucet/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Safe Network Faucet -This is a command line application that allows you to run a Safe Network Faucet. - -## Usage -Run `cargo run -- ` to start the application. Some of the commands available are: - -- `ClaimGenesis`: Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. -- `Send`: Send a specified amount of tokens to a specified wallet. -- `Server`: Starts an http server that will send tokens to anyone who requests them. - -For more information about each command, run `cargo run -- --help`. diff --git a/sn_faucet/maid_address_claims.csv b/sn_faucet/maid_address_claims.csv deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sn_faucet/src/faucet_server.rs b/sn_faucet/src/faucet_server.rs deleted file mode 100644 index 0147b434e3..0000000000 --- a/sn_faucet/src/faucet_server.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::claim_genesis; -#[cfg(feature = "gifting")] -use crate::send_tokens; -#[cfg(feature = "distribution")] -use crate::token_distribution; -use color_eyre::eyre::Result; -use fs2::FileExt; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, - Client, -}; -use sn_transfers::{ - get_faucet_data_dir, wallet_lockfile_name, NanoTokens, Transfer, WALLET_DIR_NAME, -}; -use std::path::Path; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::Semaphore; -use tracing::{debug, error, info, warn}; -use warp::{ - http::{Response, StatusCode}, - Filter, Reply, -}; - -#[cfg(feature = "initial-data")] -use crate::gutenberger::{download_book, State}; -#[cfg(feature = "initial-data")] -use reqwest::Client as ReqwestClient; -#[cfg(feature = "initial-data")] -use sn_cli::FilesUploader; -#[cfg(feature = "initial-data")] -use sn_client::{UploadCfg, BATCH_SIZE}; -#[cfg(feature = "initial-data")] -use sn_protocol::storage::{ChunkAddress, RetryStrategy}; -#[cfg(feature = "initial-data")] -use std::{fs::File, path::PathBuf}; -#[cfg(feature = "initial-data")] -use tokio::{fs, io::AsyncWriteExt}; - -/// Run the faucet server. -/// -/// This will listen on port 8000 and send a transfer of tokens as response to any GET request. -/// -/// # Example -/// -/// ```bash -/// # run faucet server -/// cargo run --features="local" --bin faucet --release -- server -/// -/// # query faucet server for money for our address `get local wallet address` -/// curl "localhost:8000/`cargo run --features="local" --bin safe --release wallet address | tail -n 1`" > transfer_hex -/// -/// # receive transfer with our wallet -/// cargo run --features="local" --bin safe --release wallet receive --file transfer_hex -/// -/// # balance should be updated -/// ``` -pub async fn run_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None)?; - claim_genesis(client, wallet).await.inspect_err(|_err| { - println!("Faucet Server couldn't start as we failed to claim Genesis"); - eprintln!("Faucet Server couldn't start as we failed to claim Genesis"); - error!("Faucet Server couldn't start as we failed to claim Genesis"); - })?; - - #[cfg(feature = "initial-data")] - { - let _ = upload_initial_data(client, &root_dir).await; - } - - startup_server(client.clone()).await -} - -#[cfg(feature = "initial-data")] -/// Trigger one by one uploading of intitial data packets to the entwork. -async fn upload_initial_data(client: &Client, root_dir: &Path) -> Result<()> { - let temp_dir = std::env::temp_dir(); - let state_file = temp_dir.join("state.json"); - let uploaded_books_file = temp_dir.join("uploaded_books.json"); - let mut state = State::load_from_file(&state_file)?; - - let reqwest_client = ReqwestClient::new(); - - let mut uploaded_books: Vec<(String, String)> = if uploaded_books_file.exists() { - let file = File::open(&uploaded_books_file)?; - serde_json::from_reader(file)? - } else { - vec![] - }; - - println!("Previous upload state restored"); - info!("Previous upload state restored"); - - for book_id in state.max_seen()..u16::MAX as u32 { - if state.has_seen(book_id) { - println!("Already seen book ID: {book_id}"); - info!("Already seen book ID: {book_id}"); - continue; - } - - match download_book(&reqwest_client, book_id).await { - Ok(data) => { - println!("Downloaded book ID: {book_id}"); - info!("Downloaded book ID: {book_id}"); - - let fname = format!("{book_id}.book"); - let fpath = temp_dir.join(fname.clone()); - - match mark_download_progress(book_id, &fpath, data, &mut state, &state_file).await { - Ok(_) => { - println!("Marked download progress book ID: {book_id} completed"); - info!("Marked download progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - - match upload_downloaded_book(client, root_dir, fpath).await { - Ok(head_addresses) => { - println!("Uploaded book ID: {book_id}"); - info!("Uploaded book ID: {book_id}"); - - // There shall be just one - for head_address in head_addresses { - uploaded_books.push((fname.clone(), head_address.to_hex())); - - match mark_upload_progress(&uploaded_books_file, &uploaded_books) { - Ok(_) => { - println!("Marked upload progress book ID: {book_id} completed"); - info!("Marked upload progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - } - } - Err(err) => { - println!("Failed to upload book ID: {book_id} with error {err:?}"); - info!("Failed to upload book ID: {book_id} with error {err:?}"); - } - } - - println!("Sleeping for 1 minutes..."); - tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - } - Err(e) => { - eprintln!("Failed to download book ID {book_id}: {e:?}"); - } - } - } - - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn mark_download_progress( - book_id: u32, - fpath: &Path, - data: Vec, - state: &mut State, - state_file: &Path, -) -> Result<()> { - let mut dest = fs::File::create(fpath).await?; - dest.write_all(&data).await?; - - state.mark_seen(book_id); - state.save_to_file(state_file)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -fn mark_upload_progress(fpath: &Path, uploaded_books: &Vec<(String, String)>) -> Result<()> { - let file = File::create(fpath)?; - serde_json::to_writer(file, &uploaded_books)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn upload_downloaded_book( - client: &Client, - root_dir: &Path, - file_path: PathBuf, -) -> Result> { - let upload_cfg = UploadCfg { - batch_size: BATCH_SIZE, - verify_store: true, - retry_strategy: RetryStrategy::Quick, - ..Default::default() - }; - - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(true) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let summary = match files_uploader.start_upload().await { - Ok(summary) => summary, - Err(err) => { - println!("Failed to upload {file_path:?} with error {err:?}"); - return Ok(vec![]); - } - }; - - info!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - println!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - - let mut head_addresses = vec![]; - for (_, file_name, head_address) in summary.completed_files.iter() { - info!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - println!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - head_addresses.push(*head_address); - } - - Ok(head_addresses) -} - -pub async fn restart_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - println!("Loading the previous wallet at {root_dir:?}"); - debug!("Loading the previous wallet at {root_dir:?}"); - - deposit(&root_dir)?; - - println!("Previous wallet loaded"); - debug!("Previous wallet loaded"); - - startup_server(client.clone()).await -} - -#[cfg(feature = "distribution")] -async fn respond_to_distribution_request( - client: Client, - query: HashMap, - balances: HashMap, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due to locked wallet"); - - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let r = - match token_distribution::handle_distribution_req(&client, query, balances.clone()).await { - Ok(distribution) => Response::new(distribution.to_string()), - Err(err) => { - eprintln!("Failed to get distribution: {err}"); - error!("Failed to get distribution: {err}"); - Response::new(format!("Failed to get distribution: {err}")) - } - }; - - Ok(r) -} - -fn is_wallet_locked() -> bool { - info!("Checking if wallet is locked"); - let root_dir = get_faucet_data_dir(); - - let wallet_dir = root_dir.join(WALLET_DIR_NAME); - let wallet_lockfile_name = wallet_lockfile_name(&wallet_dir); - let file_result = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(wallet_lockfile_name) - .and_then(|file| file.try_lock_exclusive()); - info!("After if wallet is locked"); - - if file_result.is_err() { - // Either opening the file or locking it failed, indicating rate limiting should occur - return true; - } - - false -} - -async fn respond_to_donate_request( - client: Client, - transfer_str: String, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - info!("Got donate request with: {transfer_str}"); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let faucet_root = get_faucet_data_dir(); - let mut wallet = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - if let Err(err) = fund_faucet_from_genesis_wallet(&client, &mut wallet).await { - eprintln!("Failed to load + fund faucet wallet: {err}"); - error!("Failed to load + fund faucet wallet: {err}"); - let mut response = Response::new(format!("Failed to load faucet wallet: {err}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - }; - - // return key is Transfer is empty - if transfer_str.is_empty() { - let address = wallet.address().to_hex(); - return Ok(Response::new(format!("Faucet wallet address: {address}"))); - } - - // parse transfer - let transfer = match Transfer::from_hex(&transfer_str) { - Ok(t) => t, - Err(err) => { - eprintln!("Failed to parse transfer: {err}"); - error!("Failed to parse transfer {transfer_str}: {err}"); - let mut response = Response::new(format!("Failed to parse transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - return Ok(response); - } - }; - - // receive transfer - let res = client.receive(&transfer, &wallet).await; - match res { - Ok(cashnotes) => { - let old_balance = wallet.balance(); - if let Err(e) = wallet.deposit_and_store_to_disk(&cashnotes) { - eprintln!("Failed to store deposited amount: {e}"); - error!("Failed to store deposited amount: {e}"); - let mut response = Response::new(format!("Failed to store deposited amount: {e}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - } - let new_balance = wallet.balance(); - - info!("Successfully stored cash_note to wallet dir"); - info!("Old balance: {old_balance}, new balance: {new_balance}"); - - Ok(Response::new("Thank you!".to_string())) - } - Err(err) => { - eprintln!("Failed to verify and redeem transfer: {err}"); - error!("Failed to verify and redeem transfer: {err}"); - let mut response = - Response::new(format!("Failed to verify and redeem transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - Ok(response) - } - } -} - -#[cfg(not(feature = "gifting"))] -#[expect(clippy::unused_async)] -async fn respond_to_gift_request( - _client: Client, - _key: String, - _semaphore: Arc, -) -> std::result::Result { - let mut response = Response::new("Gifting not enabled".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - Ok(response) -} - -#[cfg(feature = "gifting")] -async fn respond_to_gift_request( - client: Client, - key: String, - semaphore: Arc, -) -> std::result::Result { - let faucet_root = get_faucet_data_dir(); - - let from = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - const GIFT_AMOUNT_SNT: &str = "1"; - match send_tokens(&client, from, GIFT_AMOUNT_SNT, &key).await { - Ok(transfer) => { - println!("Sent tokens to {key}"); - debug!("Sent tokens to {key}"); - Ok(Response::new(transfer.to_string())) - } - Err(err) => { - eprintln!("Failed to send tokens to {key}: {err}"); - error!("Failed to send tokens to {key}: {err}"); - Ok(Response::new(format!("Failed to send tokens: {err}"))) - } - } -} - -async fn startup_server(client: Client) -> Result<()> { - // Create a semaphore with a single permit - let semaphore = Arc::new(Semaphore::new(1)); - - #[expect(unused)] - let mut balances = HashMap::::new(); - #[cfg(feature = "distribution")] - { - balances = token_distribution::load_maid_snapshot()?; - let keys = token_distribution::load_maid_claims()?; - // Each distribution takes about 500ms to create, so for thousands of - // initial distributions this takes many minutes. This is run in the - // background instead of blocking the server from starting. - tokio::spawn(token_distribution::distribute_from_maid_to_tokens( - client.clone(), - balances.clone(), - keys, - )); - } - - let gift_client = client.clone(); - let donation_client = client.clone(); - let donation_addr_client = client.clone(); - let donation_semaphore = Arc::clone(&semaphore); - let donation_addr_semaphore = Arc::clone(&semaphore); - #[cfg(feature = "distribution")] - let semaphore_dist = Arc::clone(&semaphore); - - // GET /distribution/address=address&wallet=wallet&signature=signature - #[cfg(feature = "distribution")] - let distribution_route = warp::get() - .and(warp::path("distribution")) - .and(warp::query::>()) - .map(|query| { - debug!("Received distribution request: {query:?}"); - query - }) - .and_then(move |query| { - let semaphore = Arc::clone(&semaphore_dist); - let client = client.clone(); - respond_to_distribution_request(client, query, balances.clone(), semaphore) - }); - - // GET /key - let gift_route = warp::get() - .and(warp::path!(String)) - .map(|query| { - debug!("Gift distribution request: {query}"); - query - }) - .and_then(move |key| { - let client = gift_client.clone(); - let semaphore = Arc::clone(&semaphore); - - respond_to_gift_request(client, key, semaphore) - }); - - // GET /donate - let donation_addr = warp::get().and(warp::path("donate")).and_then(move || { - debug!("Donation address request"); - let client = donation_addr_client.clone(); - let semaphore = Arc::clone(&donation_addr_semaphore); - - respond_to_donate_request(client, String::new(), semaphore) - }); - - // GET /donate/transfer - let donation_route = warp::get() - .and(warp::path!("donate" / String)) - .map(|query| { - debug!("Donation request: {query}"); - query - }) - .and_then(move |transfer| { - let client = donation_client.clone(); - let semaphore = Arc::clone(&donation_semaphore); - - respond_to_donate_request(client, transfer, semaphore) - }); - - println!("Starting http server listening on port 8000..."); - debug!("Starting http server listening on port 8000..."); - - #[cfg(feature = "distribution")] - warp::serve( - distribution_route - .or(donation_route) - .or(donation_addr) - .or(gift_route), - ) - // warp::serve(gift_route) - .run(([0, 0, 0, 0], 8000)) - .await; - - #[cfg(not(feature = "distribution"))] - warp::serve(donation_route.or(donation_addr).or(gift_route)) - .run(([0, 0, 0, 0], 8000)) - .await; - - debug!("Server closed"); - Ok(()) -} - -fn deposit(root_dir: &Path) -> Result<()> { - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let previous_balance = wallet.balance(); - - wallet.try_load_cash_notes()?; - - let deposited = NanoTokens::from(wallet.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = wallet.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) -} diff --git a/sn_faucet/src/gutenberger.rs b/sn_faucet/src/gutenberger.rs deleted file mode 100644 index 4968c93cc4..0000000000 --- a/sn_faucet/src/gutenberger.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use color_eyre::eyre::Result; -use reqwest::Client; -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; -use std::fs::File; -use std::path::Path; - -#[derive(Serialize, Deserialize)] -pub(crate) struct State { - seen_books: HashSet, -} - -impl State { - pub(crate) fn new() -> Self { - State { - seen_books: HashSet::new(), - } - } - - pub(crate) fn load_from_file(path: &Path) -> Result { - if path.exists() { - let file = File::open(path)?; - let state: State = serde_json::from_reader(file)?; - Ok(state) - } else { - Ok(Self::new()) - } - } - - pub(crate) fn save_to_file(&self, path: &Path) -> Result<()> { - let file = File::create(path)?; - serde_json::to_writer(file, self)?; - Ok(()) - } - - pub(crate) fn mark_seen(&mut self, book_id: u32) { - self.seen_books.insert(book_id); - } - - pub(crate) fn has_seen(&self, book_id: u32) -> bool { - if book_id == 0 && self.seen_books.is_empty() { - return true; - } - self.seen_books.contains(&book_id) - } - - pub(crate) fn max_seen(&self) -> u32 { - if let Some(result) = self.seen_books.iter().max() { - *result - } else { - 0 - } - } -} - -pub(crate) async fn download_book(client: &Client, book_id: u32) -> Result> { - let url = format!("http://www.gutenberg.org/ebooks/{book_id}.txt.utf-8"); - let response = client.get(&url).send().await?.bytes().await?; - Ok(response.to_vec()) -} diff --git a/sn_faucet/src/main.rs b/sn_faucet/src/main.rs deleted file mode 100644 index ad1bf336f9..0000000000 --- a/sn_faucet/src/main.rs +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod faucet_server; -#[cfg(feature = "initial-data")] -pub(crate) mod gutenberger; -#[cfg(feature = "distribution")] -mod token_distribution; - -use clap::{Parser, Subcommand}; -use color_eyre::eyre::{bail, eyre, Result}; -use faucet_server::{restart_faucet_server, run_faucet_server}; -use indicatif::ProgressBar; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, send, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, -}; -use sn_logging::{Level, LogBuilder, LogOutputDest}; -use sn_peers_acquisition::PeersArgs; -use sn_protocol::version::IDENTIFY_PROTOCOL_STR; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainPubkey, NanoTokens, Transfer}; -use std::{path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; -use tracing::{error, info}; - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - - if opt.version { - println!( - "{}", - sn_build_info::version_string( - "Autonomi Test Faucet", - env!("CARGO_PKG_VERSION"), - Some(&IDENTIFY_PROTOCOL_STR.to_string()) - ) - ); - return Ok(()); - } - - if opt.crate_version { - println!("Crate version: {}", env!("CARGO_PKG_VERSION")); - return Ok(()); - } - - if opt.protocol_version { - println!("Network version: {}", *IDENTIFY_PROTOCOL_STR); - return Ok(()); - } - - #[cfg(not(feature = "nightly"))] - if opt.package_version { - println!("Package version: {}", sn_build_info::package_version()); - return Ok(()); - } - - let bootstrap_peers = opt.peers.get_peers().await?; - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("faucet".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_faucet".to_string(), Level::TRACE), - ("sn_networking".to_string(), Level::DEBUG), - ("sn_build_info".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - let _log_handles = log_builder.initialize()?; - - sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); - - info!("Instantiating a SAFE Test Faucet..."); - - let secret_key = bls::SecretKey::random(); - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, handle) = spawn_connection_progress_bar(broadcaster.subscribe()); - let result = Client::new(secret_key, bootstrap_peers, None, Some(broadcaster)).await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - error!("Failed to get Client with err {err:?}"); - return Err(err.into()); - } - }; - handle.await?; - - let root_dir = get_faucet_data_dir(); - let mut funded_faucet = match load_account_wallet_or_create_with_mnemonic(&root_dir, None) { - Ok(wallet) => wallet, - Err(err) => { - println!("failed to load wallet for faucet! with error {err:?}"); - error!("failed to load wallet for faucet! with error {err:?}"); - return Err(err.into()); - } - }; - - fund_faucet_from_genesis_wallet(&client, &mut funded_faucet).await?; - - if let Err(err) = faucet_cmds(opt.cmd.clone(), &client, funded_faucet).await { - error!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - eprintln!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - } - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -#[derive(Parser)] -#[command(disable_version_flag = true)] -struct Opt { - /// Specify the logging output destination. - /// - /// Valid values are "stdout", "data-dir", or a custom path. - /// - /// `data-dir` is the default value. - /// - /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs - #[clap(long, value_parser = parse_log_output, verbatim_doc_comment, default_value = "data-dir")] - pub log_output_dest: LogOutputDest, - - #[command(flatten)] - peers: PeersArgs, - - /// Available sub commands. - #[clap(subcommand)] - pub cmd: Option, - - /// Print the crate version - #[clap(long)] - crate_version: bool, - - /// Print the protocol version - #[clap(long)] - protocol_version: bool, - - /// Print the package version - #[cfg(not(feature = "nightly"))] - #[clap(long)] - package_version: bool, - - /// Print version information. - #[clap(long)] - version: bool, -} - -#[derive(Subcommand, Debug, Clone)] -enum SubCmd { - /// Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. - /// This needs to be run before a testnet is opened to the public, as to not have - /// the genesis claimed by someone else (the key and cash_note are public for audit). - ClaimGenesis, - Send { - /// This shall be the number of nanos to send. - #[clap(name = "amount")] - amount: String, - /// This must be a hex-encoded `MainPubkey`. - #[clap(name = "to")] - to: String, - }, - /// Starts an http server that will send tokens to anyone who requests them. - /// curl http://localhost:8000/your-hex-encoded-wallet-public-address - Server, - /// Restart the faucet_server from the last breaking point. - /// - /// Before firing this cmd, ensure: - /// 1, The previous faucet_server has been stopped. - /// 2, Invalid cash_notes have been removed from the cash_notes folder. - /// 3, The old `wallet` and `wallet.lock` files shall also be removed. - /// The command will create a new wallet with the same key, - /// then deposit all valid cash_notes into wallet and startup the faucet_server. - RestartServer, -} - -async fn faucet_cmds( - cmds: Option, - client: &Client, - funded_wallet: HotWallet, -) -> Result<()> { - if let Some(cmds) = cmds { - match cmds { - SubCmd::ClaimGenesis => { - claim_genesis(client, funded_wallet).await?; - } - SubCmd::Send { amount, to } => { - send_tokens(client, funded_wallet, &amount, &to).await?; - } - SubCmd::Server => { - run_faucet_server(client).await?; - } - SubCmd::RestartServer => { - restart_faucet_server(client).await?; - } - } - } else { - // Handle the case when no subcommand is provided - println!("No subcommand provided. Use --help for more information."); - } - Ok(()) -} - -async fn claim_genesis(client: &Client, mut wallet: HotWallet) -> Result<()> { - for i in 1..6 { - if let Err(e) = fund_faucet_from_genesis_wallet(client, &mut wallet).await { - println!("Failed to claim genesis: {e}"); - } else { - println!("Genesis claimed!"); - return Ok(()); - } - println!("Trying to claiming genesis... attempt {i}"); - } - bail!("Failed to claim genesis") -} - -/// returns the hex-encoded transfer -async fn send_tokens(client: &Client, from: HotWallet, amount: &str, to: &str) -> Result { - let to = MainPubkey::from_hex(to)?; - use std::str::FromStr; - let amount = NanoTokens::from_str(amount)?; - if amount.as_nano() == 0 { - println!("Invalid format or zero amount passed in. Nothing sent."); - return Err(eyre!( - "Invalid format or zero amount passed in. Nothing sent." - )); - } - - let cash_note = send(from, amount, to, client, true).await?; - let transfer_hex = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("{transfer_hex}"); - - Ok(transfer_hex) -} - -fn parse_log_output(val: &str) -> Result { - match val { - "stdout" => Ok(LogOutputDest::Stdout), - "data-dir" => { - let dir = get_faucet_data_dir().join("logs"); - Ok(LogOutputDest::Path(dir)) - } - // The path should be a directory, but we can't use something like `is_dir` to check - // because the path doesn't need to exist. We can create it for the user. - value => Ok(LogOutputDest::Path(PathBuf::from(value))), - } -} diff --git a/sn_faucet/src/token_distribution.rs b/sn_faucet/src/token_distribution.rs deleted file mode 100644 index 76e7b46a9f..0000000000 --- a/sn_faucet/src/token_distribution.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::send_tokens; -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::eyre::{eyre, Result}; -use serde::{Deserialize, Serialize}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::Client; -use sn_transfers::{get_faucet_data_dir, MainPubkey, NanoTokens}; -use std::str::FromStr; -use std::{collections::HashMap, path::PathBuf}; -use tracing::info; - -const SNAPSHOT_FILENAME: &str = "snapshot.json"; -const SNAPSHOT_URL: &str = "https://api.omniexplorer.info/ask.aspx?api=getpropertybalances&prop=3"; -const CLAIMS_URL: &str = - "https://github.com/maidsafe/safe_network/raw/main/sn_faucet/maid_address_claims.csv"; -const HTTP_STATUS_OK: i32 = 200; - -type MaidAddress = String; // base58 encoded -type Snapshot = HashMap; - -// Parsed from json in SNAPSHOT_URL -#[derive(Serialize, Deserialize)] -struct MaidBalance { - address: MaidAddress, - balance: String, - reserved: String, -} - -// Maid owners supply info that allows the faucet to distribute their funds. -// They sign a safe wallet address using their maid key to prove ownership of -// the maid. -// The faucet will distribute SNT directly to that safe wallet address. -pub struct MaidClaim { - address: String, // base58 encoded bitcoin address owning omni maid - pubkey: String, // hex encoded bitcoin public key - wallet: String, // hex encoded safe wallet address - signature: String, // base64 encoded bitcoin signature of the wallet hex -} - -impl MaidClaim { - pub fn new(address: MaidAddress, wallet: String, signature: String) -> Result { - let pubkey = match pubkey_from_signature(&wallet, &signature) { - Ok(pk) => pk, - Err(err) => { - return Err(eyre!("Invalid signature: {err}")); - } - }; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - let mc = MaidClaim { - address, - pubkey: pubkey_hex, - wallet, - signature, - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn from_csv_line(line: &str) -> Result { - let cells = line.trim().split(',').collect::>(); - if cells.len() != 4 { - let msg = format!("Invalid claim csv: {line}"); - return Err(eyre!(msg.to_string())); - } - let mc = MaidClaim { - address: cells[0].to_string(), - pubkey: cells[1].to_string(), - wallet: cells[2].to_string(), - signature: cells[3].to_string(), - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn to_csv_line(&self) -> String { - format!( - "{},{},{},{}", - self.address, self.pubkey, self.wallet, self.signature - ) - } - - pub fn is_valid(&self) -> Result<()> { - // check signature is correct - check_signature(&self.address, &self.wallet, &self.signature)?; - // check pk matches address - if !maid_pk_matches_address(&self.address, &self.pubkey) { - return Err(eyre!("Claim public key does not match address")); - } - // check wallet is a valid bls pubkey - if MainPubkey::from_hex(&self.wallet).is_err() { - return Err(eyre!("Invalid bls public key")); - }; - // if all the checks are ok, it's valid - Ok(()) - } - - pub fn save_to_file(&self) -> Result<()> { - // check it's valid before we write it, can't know for sure it was - // already validated - self.is_valid()?; - // if it already exists, overwrite it - let addr_path = get_claims_data_dir_path()?.join(self.address.clone()); - let csv_line = self.to_csv_line(); - std::fs::write(addr_path, csv_line)?; - Ok(()) - } -} - -// This is different to test_faucet_data_dir because it should *not* be -// removed when --clean flag is specified. -fn get_snapshot_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_claims_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("claims"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_distributions_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("distributions"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -pub fn load_maid_snapshot() -> Result { - // If the faucet restarts there will be an existing snapshot which should - // be used to avoid conflicts in the balances between two different - // snapshots. - // Check if a previous snapshot already exists - let root_dir = get_snapshot_data_dir_path()?; - let filename = root_dir.join(SNAPSHOT_FILENAME); - if std::fs::metadata(filename.clone()).is_ok() { - info!("Using existing maid snapshot from {:?}", filename); - maid_snapshot_from_file(filename) - } else { - info!("Fetching snapshot from {}", SNAPSHOT_URL); - maid_snapshot_from_internet(filename) - } -} - -fn maid_snapshot_from_file(snapshot_path: PathBuf) -> Result { - let content = std::fs::read_to_string(snapshot_path)?; - parse_snapshot(content) -} - -fn maid_snapshot_from_internet(snapshot_path: PathBuf) -> Result { - // make the request - let response = minreq::get(SNAPSHOT_URL).send()?; - // check the request is ok - if response.status_code != HTTP_STATUS_OK { - let msg = format!("Snapshot failed with http status {}", response.status_code); - return Err(eyre!(msg)); - } - // write the response to file - let body = response.as_str()?; - info!("Writing snapshot to {:?}", snapshot_path); - std::fs::write(snapshot_path.clone(), body)?; - info!("Saved snapshot to {:?}", snapshot_path); - // parse the json response - parse_snapshot(body.to_string()) -} - -fn parse_snapshot(json_str: String) -> Result { - let balances: Vec = serde_json::from_str(&json_str)?; - let mut balances_map: Snapshot = Snapshot::new(); - // verify the snapshot is ok - // balances must match the ico amount, which is slightly higher than - // 2^32/10 because of the ico process. - // see https://omniexplorer.info/asset/3 - let supply = NanoTokens::from(452_552_412_000_000_000); - let mut total = NanoTokens::zero(); - for b in &balances { - // The reserved amount is the amount currently for sale on omni dex. - // If it's not included the total is lower than expected. - // So the amount of maid an address owns is balance + reserved. - let balance = NanoTokens::from_str(&b.balance)?; - let reserved = NanoTokens::from_str(&b.reserved)?; - let address_balance = match balance.checked_add(reserved) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {balance} + {reserved}"); - return Err(eyre!(msg)); - } - }; - total = match total.checked_add(address_balance) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {total} + {address_balance}"); - return Err(eyre!(msg)); - } - }; - balances_map.insert(b.address.clone(), address_balance); - } - if total != supply { - let msg = format!("Incorrect snapshot total, got {total} want {supply}"); - return Err(eyre!(msg)); - } - // log the total number of balances that were parsed - info!("Parsed {} maid balances from the snapshot", balances.len()); - Ok(balances_map) -} - -fn load_maid_claims_from_local() -> Result> { - let mut claims = HashMap::new(); - // load from existing files - let claims_dir = get_claims_data_dir_path()?; - let file_list = std::fs::read_dir(claims_dir)?; - for file in file_list { - // add to hashmap - let file = file?; - let claim_csv = std::fs::read_to_string(file.path())?; - let claim = MaidClaim::from_csv_line(&claim_csv)?; - claims.insert(claim.address.clone(), claim); - } - Ok(claims) -} - -pub fn load_maid_claims() -> Result> { - info!("Loading claims for distributions"); - let mut claims = match load_maid_claims_from_local() { - Ok(claims) => claims, - Err(err) => { - info!("Failed to load claims from local, {err:?}"); - HashMap::new() - } - }; - info!("{} claims after reading existing files", claims.len()); - - // load from list on internet - info!("Fetching claims from {CLAIMS_URL}"); - let response = minreq::get(CLAIMS_URL).send()?; - // check the request is ok - if response.status_code != 200 { - println!( - "Claims request failed with http status {}", - response.status_code - ); - // The existing data is ok, no need to fail to start the server here - return Ok(claims); - } - // parse the response as csv, each row has format: - // address,pkhex,wallet,signature - let body = response.as_str()?; - let lines: Vec<&str> = body.trim().split('\n').collect(); - info!("{} claims rows from {CLAIMS_URL}", lines.len()); - for line in lines { - let claim = match MaidClaim::from_csv_line(line) { - Ok(c) => c, - Err(_) => { - continue; - } - }; - // validate this claim info all matches correctly - if claim.is_valid().is_err() { - continue; - } - // save this cliam to the file system - if claim.save_to_file().is_err() { - println!("Error saving claim to file"); - continue; - } - // add this claim to the hashmap - claims.insert(claim.address.clone(), claim); - } - info!("{} claims after reading from online list", claims.len()); - Ok(claims) -} - -fn maid_pk_matches_address(address: &str, pk_hex: &str) -> bool { - // parse the address - let addr = match bitcoin::Address::from_str(address) { - Ok(a) => a, - Err(_) => return false, - }; - let btc_addr = match addr.clone().require_network(bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - // parse the public key - let pk = match bitcoin::PublicKey::from_str(pk_hex) { - Ok(p) => p, - Err(_) => return false, - }; - // The public key may be for a p2pkh address (starting with 1) or a p2wpkh - // address (starting with 3) so we need to check both. - let is_p2pkh = btc_addr.is_related_to_pubkey(&pk); - if is_p2pkh { - return true; - } - let p2wpkh_addr = match bitcoin::Address::p2shwpkh(&pk, bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - let is_p2wpkh = p2wpkh_addr == addr; - if is_p2wpkh { - return true; - } - false -} - -fn check_signature(address: &MaidAddress, msg: &str, signature: &str) -> Result<()> { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = bitcoin::sign_message::MessageSignature::from_str(signature)?; - // Signatures doesn't work with p2wpkh-p2sh so always use p2pkh addr. - // This was double checked with electrum signature validation. - let mut addr = - bitcoin::Address::from_str(address)?.require_network(bitcoin::Network::Bitcoin)?; - let pubkey = pubkey_from_signature(msg, signature)?; - if address.starts_with('3') { - addr = bitcoin::Address::p2pkh(&pubkey, bitcoin::Network::Bitcoin); - } - // check the signature is correct - if !sig.is_signed_by_address(&secp, &addr, msg_hash)? { - return Err(eyre!("Invalid signature")); - } - // Check the pubkey in the signature matches the address. - // This prevents someone submitting a valid signature from a pubkey that - // doesn't match the address for the snapshot. - let pubkey_hex = hex::encode(pubkey.to_bytes()); - if !maid_pk_matches_address(address, &pubkey_hex) { - return Err(eyre!("Public key does not match address")); - } - Ok(()) -} - -fn pubkey_from_signature(msg: &str, signature: &str) -> Result { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = match bitcoin::sign_message::MessageSignature::from_base64(signature) { - Ok(s) => s, - Err(err) => { - let msg = format!("Error parsing signature: {err}"); - return Err(eyre!(msg)); - } - }; - let pubkey = sig.recover_pubkey(&secp, msg_hash)?; - Ok(pubkey) -} - -pub async fn distribute_from_maid_to_tokens( - client: Client, - snapshot: Snapshot, - claims: HashMap, -) { - for (addr, amount) in snapshot { - // check if this snapshot address has a pubkey - if !claims.contains_key(&addr) { - continue; - } - let claim = &claims[&addr]; - match create_distribution(&client, claim, &amount).await { - Ok(_) => {} - Err(err) => { - info!( - "Error creating distribution: {0} {err}", - claim.to_csv_line() - ); - } - } - } -} - -pub async fn handle_distribution_req( - client: &Client, - query: HashMap, - balances: Snapshot, -) -> Result { - let address = query - .get("address") - .ok_or(eyre!("Missing address in querystring"))? - .to_string(); - let wallet = query - .get("wallet") - .ok_or(eyre!("Missing wallet in querystring"))? - .to_string(); - let signature = query - .get("signature") - .ok_or(eyre!("Missing signature in querystring"))? - .to_string(); - let amount = balances - .get(&address) - .ok_or(eyre!("Address not in snapshot"))?; - // Bitcoin expects base64 standard encoding but the query string has - // base64 url encoding, so the sig is converted to standard encoding - let sig_bytes = base64::engine::general_purpose::URL_SAFE.decode(signature)?; - let sig = base64::engine::general_purpose::STANDARD.encode(sig_bytes); - let claim = MaidClaim::new(address, wallet, sig)?; - create_distribution(client, &claim, amount).await -} - -async fn create_distribution( - client: &Client, - claim: &MaidClaim, - amount: &NanoTokens, -) -> Result { - // validate the claim - if claim.is_valid().is_err() { - let claim_csv = claim.to_csv_line(); - let msg = format!("Not creating distribution for invalid claim: {claim_csv}"); - info!(msg); - return Err(eyre!(msg)); - } - // save this claim to file - claim.save_to_file()?; - // check if this distribution has already been created - let root = get_distributions_data_dir_path()?; - let dist_path = root.join(&claim.address); - if dist_path.exists() { - let dist_hex = match std::fs::read_to_string(dist_path.clone()) { - Ok(content) => content, - Err(err) => { - let msg = format!( - "Error reading distribution file {}: {}", - dist_path.display(), - err - ); - info!(msg); - return Err(eyre!(msg)); - } - }; - return Ok(dist_hex); - } - info!( - "Distributing {} for {} to {}", - amount, claim.address, claim.wallet - ); - - let faucet_dir = get_faucet_data_dir(); - let faucet_wallet = load_account_wallet_or_create_with_mnemonic(&faucet_dir, None)?; - // create a transfer to the claim wallet - let transfer_hex = - match send_tokens(client, faucet_wallet, &amount.to_string(), &claim.wallet).await { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed send for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - let _ = match hex::decode(transfer_hex.clone()) { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed to decode transfer for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - // save the transfer - match std::fs::write(dist_path.clone(), transfer_hex.clone()) { - Ok(_) => {} - Err(err) => { - let msg = format!( - "Failed to write transfer to file {}: {}", - dist_path.display(), - err - ); - info!(msg); - info!("The transfer hex that failed to write to file:"); - info!(transfer_hex); - return Err(eyre!(msg)); - } - }; - Ok(transfer_hex) -} - -#[cfg(all(test, feature = "distribution"))] -mod tests { - use super::*; - - use assert_fs::TempDir; - use bitcoin::{ - hashes::Hash, - secp256k1::{rand, Secp256k1}, - Address, Network, PublicKey, - }; - use sn_logging::LogBuilder; - use sn_transfers::{HotWallet, MainSecretKey, Transfer}; - - // This test is to confirm fetching 'MAID snapshop` and `Maid claims` list from website - // is working properly and giving consistent and expected result. - // - // Note: the current list will grow as testnets collect more claims - #[test] - fn fetching_from_network() -> Result<()> { - let snapshot = load_maid_snapshot()?; - println!("Maid snapshot got {:?} entries", snapshot.len()); - assert!(!snapshot.is_empty()); - - let claims = load_maid_claims()?; - println!("Got {:?} distribution claims", claims.len()); - - Ok(()) - } - - // This test will simulate a token distribution. - #[tokio::test] - async fn token_distribute_to_user() -> Result<()> { - let _log_guards = - LogBuilder::init_single_threaded_tokio_test("token_distribute_to_user test", true); - - let amount = NanoTokens::from(10); - - let secp = Secp256k1::new(); // DevSkim: ignore DS440100 - let (maid_secret_key, maid_public_key) = secp.generate_keypair(&mut rand::thread_rng()); - let maid_address = Address::p2pkh(&PublicKey::new(maid_public_key), Network::Bitcoin); - - let client_token_issuer = Client::quick_start(None).await?; - - // wallet comes from `safe wallet address` - let wallet_sk = bls::SecretKey::random(); - let wallet_pk_hex = wallet_sk.public_key().to_hex(); - // signature comes from bitcoin signing like electrum or trezor - let msg_hash = bitcoin::sign_message::signed_msg_hash(&wallet_pk_hex); - let msg = bitcoin::secp256k1::Message::from_digest(msg_hash.to_byte_array()); // DevSkim: ignore DS440100 - let secp_sig = secp.sign_ecdsa_recoverable(&msg, &maid_secret_key); - let signature = bitcoin::sign_message::MessageSignature { - signature: secp_sig, - compressed: true, - }; - let claim = MaidClaim::new( - maid_address.to_string(), - wallet_pk_hex, - signature.to_string(), - )?; - - let transfer_hex = create_distribution(&client_token_issuer, &claim, &amount).await?; - - let transfer = Transfer::from_hex(&transfer_hex)?; - - assert!(transfer - .cashnote_redemptions(&MainSecretKey::new(wallet_sk.clone())) - .is_ok()); - - let receiver_client = Client::new(bls::SecretKey::random(), None, None, None).await?; - let tmp_path = TempDir::new()?.path().to_owned(); - let receiver_wallet = - HotWallet::load_from_path(&tmp_path, Some(MainSecretKey::new(wallet_sk)))?; - - let mut cash_notes = receiver_client.receive(&transfer, &receiver_wallet).await?; - assert_eq!(cash_notes.len(), 1); - let cash_note = cash_notes.pop().unwrap(); - - assert_eq!(cash_note.value(), amount); - - Ok(()) - } - - #[test] - fn maidclaim_isvalid() -> Result<()> { - // Signatures generated using electrum to ensure interoperability. - - // prvkey for addr 17ig7... is L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - // sig is valid for wallet_a signed by addr_a - const MAID_ADDR_A: &str = "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG"; - const MAID_PUBKEY_A: &str = - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc"; // DevSkim: ignore DS173237 - const WALLET_A: &str = "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a"; // DevSkim: ignore DS173237 - const SIG_A: &str = "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM="; - - // prvkey for addr 1EbjF... is L2gzGZUqifkBG3jwwkyyfos8A67VvFhyrtqKU5cWkfEpySkFbaBR - // sig is valid for wallet_b signed by addr_b - const MAID_PUBKEY_B: &str = - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f"; // DevSkim: ignore DS173237 - const WALLET_B: &str = "915d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_B: &str = "Hy3zUK3YiEidzE+HpdgeoRoH3lkCrOoTh59TvoOiUdfJVKKLAVUuAydgIJkOTVU8JKdvbYPGiQhf7KCiNtLRIVU="; - - // not a valid bls wallet (starting with 0) - // sig is valid for wallet_c signed by addr_a - const WALLET_C: &str = "015d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_C: &str = "IE8y8KSRKw3hz/rd9dzrJLOu24sAspuJgYr6VVGCga3FQQhzOEFDKZoDdrJORRI4Rvv7vFqRARQVaBKCobYh9sc="; - - // MaidClaim::new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_A.to_string(), - ); - assert!(mc.is_ok()); - - // MaidClaim::new will fail if inputs are incorrect - // because new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_B.to_string(), - ); - assert!(mc.is_err()); - - // valid - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_ok()); - - // pk not matching address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature not matching message - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature matches message but not address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_B.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // wallet is not a valid bls key - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_C.to_string(), - signature: SIG_C.to_string(), - }; - assert!(mc.is_valid().is_err()); - - Ok(()) - } - - #[test] - fn pk_matches_addr() -> Result<()> { - // p2pkh compressed - assert!(maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc", // DevSkim: ignore DS173237 - )); - - // p2pkh uncompressed - assert!(maid_pk_matches_address( - "1QK8WWMcDEFUVV2zKU8GSCwwuvAFWEs2QW", - "0483f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc4327efb5ba23543c8a6e63ddc09618e11b5d0d184bb69f964712d0894c005655", // DevSkim: ignore DS173237 - )); - - // p2wpkh-p2sh - assert!(maid_pk_matches_address( - "3GErA71Kz6Tn4QCLqoaDvMxD5cLgqQLykv", - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a", // DevSkim: ignore DS173237 - )); - - // mismatched returns false - assert!(!maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f", // DevSkim: ignore DS173237 - )); - - Ok(()) - } - - #[test] - fn pubkey_from_sig() -> Result<()> { - // Valid message and signature produces the corresponding public key. - // Signatures generated using electrum to ensure interoperability - - // p2pkh compressed - // electrum import key - // L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc" // DevSkim: ignore DS173237 - ); - - // p2pkh uncompressed - // electrum import key - // 5Jz2acAoqLr57YXzQuoiNS8sQtZQ3TBcVcaKsX5ybp9HtJiUSXq - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Gw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "04952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a0418114ad86aeda109dd924629bbf929e82c6ce5be948e4d21a95575a53e1f73" // DevSkim: ignore DS173237 - ); - - // p2wpkh-p2sh uncompressed - // electrum import key - // p2wpkh-p2sh:L2NhyLEHiNbb9tBnQY5BbbwjWSZzhpZqfJ26Hynxpf5bXL9sUm73 - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Hw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a" // DevSkim: ignore DS173237 - ); - - Ok(()) - } -} From 93856ecee5ed3fd2b9381fd1c41cec2f2f25ace4 Mon Sep 17 00:00:00 2001 From: grumbach Date: Wed, 16 Oct 2024 15:56:33 +0900 Subject: [PATCH 229/255] feat: expose rewards key in autonomi --- autonomi/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/autonomi/src/lib.rs b/autonomi/src/lib.rs index 98921768ce..abfbd7563a 100644 --- a/autonomi/src/lib.rs +++ b/autonomi/src/lib.rs @@ -39,6 +39,7 @@ mod self_encryption; pub use sn_evm::get_evm_network_from_env; pub use sn_evm::EvmNetwork; pub use sn_evm::EvmWallet as Wallet; +pub use sn_evm::RewardsAddress; #[doc(no_inline)] // Place this under 'Re-exports' in the docs. pub use bytes::Bytes; From 099d8a36df9e71bc1ddc86f42948a5f536be03bc Mon Sep 17 00:00:00 2001 From: Benno Zeeman Date: Wed, 16 Oct 2024 16:46:30 +0200 Subject: [PATCH 230/255] refactor(autonomi): change error to debug log When we fetch the vault for the first time, it won't exist so an error is expected in this case. I've observed that the logs will print this as an error, but it's expected behavior. --- autonomi/src/client/vault.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autonomi/src/client/vault.rs b/autonomi/src/client/vault.rs index 2eb960dfc1..af40f61cf6 100644 --- a/autonomi/src/client/vault.rs +++ b/autonomi/src/client/vault.rs @@ -70,7 +70,7 @@ impl Client { .get_record_from_network(scratch_key, &get_cfg) .await .inspect_err(|err| { - error!("Failed to fetch vault {network_address:?} from network: {err}"); + debug!("Failed to fetch vault {network_address:?} from network: {err}"); })?; let pad = try_deserialize_record::(&record) From ca37ff7a6b87611a98ef7ea25bd50a34e9784414 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 16:26:15 +0100 Subject: [PATCH 231/255] feat: support the arbitrum sepolia network This network will serve as testing alternative to Arbitrum One. --- evmlib/src/lib.rs | 18 ++++++++++++++++++ evmlib/src/utils.rs | 7 +++++++ sn_node/src/bin/safenode/subcommands.rs | 5 +++++ .../src/bin/cli/subcommands/evm_network.rs | 4 ++++ 4 files changed, 34 insertions(+) diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index 8bf3734265..0093aeac0e 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -33,13 +33,25 @@ static PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL: LazyLock = LazyLock::new( .expect("Invalid RPC URL") }); +static PUBLIC_ARBITRUM_SEPOLIA_HTTP_RPC_URL: LazyLock = LazyLock::new(|| { + "https://sepolia-rollup.arbitrum.io/rpc" + .parse() + .expect("Invalid RPC URL") +}); + const ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS: Address = address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); +const ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS: Address = + address!("4bc1aCE0E66170375462cB4E6Af42Ad4D5EC689C"); + // Should be updated when the smart contract changes! const ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS: Address = address!("887930F30EDEb1B255Cd2273C3F4400919df2EFe"); +const ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS: Address = + address!("e6D6bB5Fa796baA8c1ADc439Ac0fd66fd2A1858b"); + #[serde_as] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomNetwork { @@ -64,6 +76,7 @@ impl CustomNetwork { #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Network { ArbitrumOne, + ArbitrumSepolia, Custom(CustomNetwork), } @@ -71,6 +84,7 @@ impl std::fmt::Display for Network { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Network::ArbitrumOne => write!(f, "evm-arbitrum-one"), + Network::ArbitrumSepolia => write!(f, "evm-arbitrum-sepolia"), Network::Custom(_) => write!(f, "evm-custom"), } } @@ -88,6 +102,7 @@ impl Network { pub fn identifier(&self) -> &str { match self { Network::ArbitrumOne => "arbitrum-one", + Network::ArbitrumSepolia => "arbitrum-sepolia", Network::Custom(_) => "custom", } } @@ -95,6 +110,7 @@ impl Network { pub fn rpc_url(&self) -> &reqwest::Url { match self { Network::ArbitrumOne => &PUBLIC_ARBITRUM_ONE_HTTP_RPC_URL, + Network::ArbitrumSepolia => &PUBLIC_ARBITRUM_SEPOLIA_HTTP_RPC_URL, Network::Custom(custom) => &custom.rpc_url_http, } } @@ -102,6 +118,7 @@ impl Network { pub fn payment_token_address(&self) -> &Address { match self { Network::ArbitrumOne => &ARBITRUM_ONE_PAYMENT_TOKEN_ADDRESS, + Network::ArbitrumSepolia => &ARBITRUM_SEPOLIA_PAYMENT_TOKEN_ADDRESS, Network::Custom(custom) => &custom.payment_token_address, } } @@ -109,6 +126,7 @@ impl Network { pub fn data_payments_address(&self) -> &Address { match self { Network::ArbitrumOne => &ARBITRUM_ONE_DATA_PAYMENTS_ADDRESS, + Network::ArbitrumSepolia => &ARBITRUM_SEPOLIA_DATA_PAYMENTS_ADDRESS, Network::Custom(custom) => &custom.data_payments_address, } } diff --git a/evmlib/src/utils.rs b/evmlib/src/utils.rs index 00b018fa09..e6f657938b 100644 --- a/evmlib/src/utils.rs +++ b/evmlib/src/utils.rs @@ -88,11 +88,18 @@ pub fn get_evm_network_from_env() -> Result { .map(|v| v == "arbitrum-one") .unwrap_or(false); + let use_arbitrum_sepolia = std::env::var("EVM_NETWORK") + .map(|v| v == "arbitrum-sepolia") + .unwrap_or(false); + if use_local_evm { local_evm_network_from_csv() } else if use_arbitrum_one { info!("Using Arbitrum One EVM network as EVM_NETWORK is set to 'arbitrum-one'"); Ok(Network::ArbitrumOne) + } else if use_arbitrum_sepolia { + info!("Using Arbitrum Sepolia EVM network as EVM_NETWORK is set to 'arbitrum-sepolia'"); + Ok(Network::ArbitrumSepolia) } else if let Ok(evm_vars) = evm_vars { info!("Using custom EVM network from environment variables"); Ok(Network::Custom(CustomNetwork::new( diff --git a/sn_node/src/bin/safenode/subcommands.rs b/sn_node/src/bin/safenode/subcommands.rs index 7c5ec3aa51..c2b0389465 100644 --- a/sn_node/src/bin/safenode/subcommands.rs +++ b/sn_node/src/bin/safenode/subcommands.rs @@ -2,10 +2,14 @@ use clap::Subcommand; use sn_evm::EvmNetwork; #[derive(Subcommand, Clone, Debug)] +#[allow(clippy::enum_variant_names)] pub(crate) enum EvmNetworkCommand { /// Use the Arbitrum One network EvmArbitrumOne, + /// Use the Arbitrum Sepolia network + EvmArbitrumSepolia, + /// Use a custom network EvmCustom { /// The RPC URL for the custom network @@ -27,6 +31,7 @@ impl Into for EvmNetworkCommand { fn into(self) -> EvmNetwork { match self { Self::EvmArbitrumOne => EvmNetwork::ArbitrumOne, + Self::EvmArbitrumSepolia => EvmNetwork::ArbitrumSepolia, Self::EvmCustom { rpc_url, payment_token_address, diff --git a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs index a77893f609..1683e00e99 100644 --- a/sn_node_manager/src/bin/cli/subcommands/evm_network.rs +++ b/sn_node_manager/src/bin/cli/subcommands/evm_network.rs @@ -16,6 +16,9 @@ pub enum EvmNetworkCommand { /// Use the Arbitrum One network EvmArbitrumOne, + /// Use the Arbitrum Sepolia network + EvmArbitrumSepolia, + /// Use a custom network EvmCustom { /// The RPC URL for the custom network @@ -41,6 +44,7 @@ impl TryInto for EvmNetworkCommand { fn try_into(self) -> Result { match self { Self::EvmArbitrumOne => Ok(EvmNetwork::ArbitrumOne), + Self::EvmArbitrumSepolia => Ok(EvmNetwork::ArbitrumSepolia), Self::EvmLocal => { if !cfg!(feature = "local") { return Err(color_eyre::eyre::eyre!( From 758dfa6f20154950c941346ada4ee931e42b7a10 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 21:50:04 +0100 Subject: [PATCH 232/255] chore: remove the auditor and faucet from bumping We will not release these binaries any more. --- resources/scripts/bump_version_for_rc.sh | 4 +--- resources/scripts/print-versions.sh | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/resources/scripts/bump_version_for_rc.sh b/resources/scripts/bump_version_for_rc.sh index 655345e199..dd5e50303f 100755 --- a/resources/scripts/bump_version_for_rc.sh +++ b/resources/scripts/bump_version_for_rc.sh @@ -80,12 +80,10 @@ done echo "=======================" echo " New Binary Versions " echo "=======================" -echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "autonomi: $(grep "^version" < autonomi-cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" diff --git a/resources/scripts/print-versions.sh b/resources/scripts/print-versions.sh index b2a75fdb49..c3cb26ab6a 100755 --- a/resources/scripts/print-versions.sh +++ b/resources/scripts/print-versions.sh @@ -16,12 +16,10 @@ done echo "===================" echo " Binary Versions " echo "===================" -echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "autonomi: $(grep "^version" < autonomi-cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" -echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" From 44ca4438b803aa254c1a79916f4f60b22e20f646 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 22:08:17 +0100 Subject: [PATCH 233/255] chore(release): release candidate 2024.10.3.1 --- Cargo.lock | 34 ++++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 +++++------ autonomi/Cargo.toml | 14 ++++++------- evm_testnet/Cargo.toml | 4 ++-- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 ++++---- node-launchpad/Cargo.toml | 12 +++++------ release-cycle-info | 4 ++-- sn_build_info/Cargo.toml | 2 +- sn_evm/Cargo.toml | 2 +- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 10 +++++----- sn_node/Cargo.toml | 26 ++++++++++++------------ sn_node_manager/Cargo.toml | 14 ++++++------- sn_node_rpc_client/Cargo.toml | 16 +++++++-------- sn_peers_acquisition/Cargo.toml | 4 ++-- sn_protocol/Cargo.toml | 8 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 ++++---- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 4 ++-- token_supplies/Cargo.toml | 2 +- 23 files changed, 97 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d397e2a98..62426ff43f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1069,7 +1069,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.1.2" +version = "0.2.0-rc.1" dependencies = [ "bip39", "blsttc", @@ -1113,7 +1113,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.0" +version = "0.1.1-rc.1" dependencies = [ "autonomi", "clap", @@ -5565,7 +5565,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.7" +version = "0.2.8-rc.1" dependencies = [ "clap", "clap-verbosity-flag", @@ -5682,7 +5682,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.19" +version = "0.4.0-rc.1" dependencies = [ "atty", "better-panic", @@ -8026,7 +8026,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.6" +version = "0.11.0-rc.1" dependencies = [ "assert_cmd", "assert_fs", @@ -8102,7 +8102,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.15" +version = "0.1.16-rc.1" dependencies = [ "chrono", "tracing", @@ -8167,7 +8167,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.36" +version = "0.2.37-rc.1" dependencies = [ "chrono", "color-eyre", @@ -8192,7 +8192,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.16" +version = "0.1.17-rc.1" dependencies = [ "clap", "color-eyre", @@ -8206,7 +8206,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.18.4" +version = "0.19.0-rc.1" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8251,7 +8251,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.111.4" +version = "0.112.0-rc.1" dependencies = [ "assert_fs", "async-trait", @@ -8307,7 +8307,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.31" +version = "0.6.32-rc.1" dependencies = [ "assert_fs", "async-trait", @@ -8334,7 +8334,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.3" +version = "0.5.4-rc.1" dependencies = [ "clap", "lazy_static", @@ -8350,7 +8350,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.11" +version = "0.17.12-rc.1" dependencies = [ "blsttc", "bytes", @@ -8380,7 +8380,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.21" +version = "0.4.0-rc.1" dependencies = [ "blsttc", "crdts", @@ -8397,7 +8397,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.14" +version = "0.4.0-rc.1" dependencies = [ "async-trait", "dirs-next", @@ -8423,7 +8423,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.19.3" +version = "0.20.0-rc.1" dependencies = [ "assert_fs", "blsttc", @@ -8900,7 +8900,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.54" +version = "0.1.55-rc.1" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index 0eea336fdf..fabf258488 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "autonomi-cli" -version = "0.1.0" +version = "0.1.1-rc.1" edition = "2021" [[bin]] @@ -18,7 +18,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.1.0", features = [ +autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = [ "data", "fs", "registers", @@ -38,12 +38,12 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.0" } -sn_build_info = { path = "../sn_build_info", version = "0.1.11" } -sn_logging = { path = "../sn_logging", version = "0.2.33" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.1.0", features = [ +autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index c7ecf07338..56953227b1 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.1.2" +version = "0.2.0-rc.1" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -37,10 +37,10 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { version = "0.17.11", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_protocol = { version = "0.17.12-rc.1", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } sn_evm = { path = "../sn_evm" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } @@ -55,8 +55,8 @@ serde-wasm-bindgen = "0.6.5" [dev-dependencies] eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.33" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 666fc9fdbb..a270c126fc 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.0" +version = "0.1.1-rc.1" [dependencies] clap = { version = "4.5", features = ["derive"] } @@ -16,4 +16,4 @@ sn_evm = { path = "../sn_evm" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 9a26778c36..01a370a6a6 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.0" +version = "0.1.1-rc.1" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index d43c9fe407..e49e8d5eea 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.7" +version = "0.2.8-rc.1" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index cb9c03ab3f..586ce7afb8 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.19" +version = "0.4.0-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } sn_evm = { path = "../sn_evm", version = "0.1" } -sn-node-manager = { version = "0.10.6", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.3", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } +sn-node-manager = { version = "0.11.0-rc.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.4-rc.1", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.14", path = "../sn_service_management" } +sn_service_management = { version = "0.4.0-rc.1", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 2b83422132..31b9961213 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -14,5 +14,5 @@ # number for all the released binaries. release-year: 2024 release-month: 10 -release-cycle: 2 -release-cycle-counter: 3 +release-cycle: 3 +release-cycle-counter: 1 diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 50a09650b8..fdd1b6daf8 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.15" +version = "0.1.16-rc.1" build = "build.rs" [build-dependencies] diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 87da8453e4..57cc6e63c9 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.0" +version = "0.1.1-rc.1" [features] test-utils = [] diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index bd73bb2773..2e03dd6882 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.36" +version = "0.2.37-rc.1" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index cd2ad4b26d..8adb7d35fe 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16" +version = "0.1.17-rc.1" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 07c23c542d..a291073300 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.4" +version = "0.19.0-rc.1" [features] default = [] @@ -54,10 +54,10 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } sn_evm = { path = "../sn_evm", version = "0.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 9e13f59d3d..5bb28f9e6e 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.111.4" +version = "0.112.0-rc.1" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,14 +52,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,18 +83,18 @@ color-eyre = "0.6.2" [dev-dependencies] evmlib = { path = "../evmlib", version = "0.1" } -autonomi = { path = "../autonomi", version = "0.1.0", features = ["registers"] } +autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.19.3", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1", features = [ "test-utils", ] } -sn_evm = { path = "../sn_evm", version = "0.1.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 217f9c531d..9199ed355a 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.6" +version = "0.11.0-rc.1" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } sn-releases = "0.2.6" sn_evm = { path = "../sn_evm", version = "0.1" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index d53bb4627a..d8bc548854 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.31" +version = "0.6.32-rc.1" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_node = { path = "../sn_node", version = "0.111.4" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.14" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_node = { path = "../sn_node", version = "0.112.0-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index f1bfe2d4d7..897473f3c3 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.3" +version = "0.5.4-rc.1" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 284a8c6216..3e7af67634 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.11" +version = "0.17.12-rc.1" [features] default = [] @@ -28,9 +28,9 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } sn_evm = { path = "../sn_evm", version = "0.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index cfdaaccc5f..d83e5e38e4 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.21" +version = "0.4.0-rc.1" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 11863f7a4d..0f86b0f817 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.14" +version = "0.4.0-rc.1" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index a530dd1c8a..6ee7fcb0a1 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.3" +version = "0.20.0-rc.1" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 697c43a69c..2574bc52ea 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.7" +version = "0.4.8-rc.1" [features] local = ["sn_peers_acquisition/local"] @@ -21,4 +21,4 @@ libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index b040667397..b8604571c3 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.54" +version = "0.1.55-rc.1" [dependencies] From ebf708c0e79d32733fdccc02b7c0860b857c228f Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 22:14:32 +0100 Subject: [PATCH 234/255] chore: remove binaries from release process The `faucet` and `sn_auditor` binaries do not apply to the EVM-based network, so they will be removed. --- Justfile | 37 +++---------------------------------- 1 file changed, 3 insertions(+), 34 deletions(-) diff --git a/Justfile b/Justfile index fc0aadb9f1..3d92627bb1 100644 --- a/Justfile +++ b/Justfile @@ -131,7 +131,6 @@ build-release-artifacts arch nightly="false": if [[ $arch == arm* || $arch == armv7* || $arch == aarch64* ]]; then echo "Passing to cross CROSS_CONTAINER_OPTS=$CROSS_CONTAINER_OPTS" cargo binstall --no-confirm cross - cross build --release --target $arch --bin faucet --features=distribution $nightly_feature cross build --release --target $arch --bin nat-detection $nightly_feature cross build --release --target $arch --bin node-launchpad $nightly_feature cross build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature @@ -139,9 +138,7 @@ build-release-artifacts arch nightly="false": cross build --release --target $arch --bin safenode-manager $nightly_feature cross build --release --target $arch --bin safenodemand $nightly_feature cross build --release --target $arch --bin safenode_rpc_client $nightly_feature - cross build --release --target $arch --bin sn_auditor $nightly_feature else - cargo build --release --target $arch --bin faucet --features=distribution $nightly_feature cargo build --release --target $arch --bin nat-detection $nightly_feature cargo build --release --target $arch --bin node-launchpad $nightly_feature cargo build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature @@ -149,7 +146,6 @@ build-release-artifacts arch nightly="false": cargo build --release --target $arch --bin safenode-manager $nightly_feature cargo build --release --target $arch --bin safenodemand $nightly_feature cargo build --release --target $arch --bin safenode_rpc_client $nightly_feature - cargo build --release --target $arch --bin sn_auditor $nightly_feature fi find target/$arch/release -maxdepth 1 -type f -exec cp '{}' artifacts \; @@ -182,7 +178,6 @@ make-artifacts-directory: package-all-bins: #!/usr/bin/env bash set -e - just package-bin "faucet" just package-bin "nat-detection" just package-bin "node-launchpad" just package-bin "safe" @@ -190,7 +185,6 @@ package-all-bins: just package-bin "safenode_rpc_client" just package-bin "safenode-manager" just package-bin "safenodemand" - just package-bin "sn_auditor" package-bin bin version="": #!/usr/bin/env bash @@ -209,24 +203,19 @@ package-bin bin version="": bin="{{bin}}" supported_bins=(\ - "faucet" \ "nat-detection" \ "node-launchpad" \ "safe" \ "safenode" \ "safenode-manager" \ "safenodemand" \ - "safenode_rpc_client" \ - "sn_auditor") + "safenode_rpc_client") crate_dir_name="" # In the case of the node manager, the actual name of the crate is `sn-node-manager`, but the # directory it's in is `sn_node_manager`. bin="{{bin}}" case "$bin" in - faucet) - crate_dir_name="sn_faucet" - ;; nat-detection) crate_dir_name="nat-detection" ;; @@ -248,9 +237,6 @@ package-bin bin version="": safenode_rpc_client) crate_dir_name="sn_node_rpc_client" ;; - sn_auditor) - crate_dir_name="sn_auditor" - ;; *) echo "The $bin binary is not supported" exit 1 @@ -287,7 +273,6 @@ upload-all-packaged-bins-to-s3: set -e binaries=( - faucet nat-detection node-launchpad safe @@ -295,7 +280,6 @@ upload-all-packaged-bins-to-s3: safenode-manager safenode_rpc_client safenodemand - sn_auditor ) for binary in "${binaries[@]}"; do just upload-packaged-bin-to-s3 "$binary" @@ -306,9 +290,6 @@ upload-packaged-bin-to-s3 bin_name: set -e case "{{bin_name}}" in - faucet) - bucket="sn-faucet" - ;; nat-detection) bucket="nat-detection" ;; @@ -330,9 +311,6 @@ upload-packaged-bin-to-s3 bin_name: safenode_rpc_client) bucket="sn-node-rpc-client" ;; - sn_auditor) - bucket="sn-auditor" - ;; *) echo "The {{bin_name}} binary is not supported" exit 1 @@ -362,9 +340,6 @@ delete-s3-bin bin_name version: set -e case "{{bin_name}}" in - faucet) - bucket="sn-faucet" - ;; nat-detection) bucket="nat-detection" ;; @@ -386,9 +361,6 @@ delete-s3-bin bin_name version: safenode_rpc_client) bucket="sn-node-rpc-client" ;; - sn_auditor) - bucket="sn-auditor" - ;; *) echo "The {{bin_name}} binary is not supported" exit 1 @@ -456,7 +428,6 @@ package-arch arch: cd artifacts/$architecture/release binaries=( - faucet nat-detection node-launchpad safe @@ -464,7 +435,6 @@ package-arch arch: safenode-manager safenode_rpc_client safenodemand - sn_auditor ) if [[ "$architecture" == *"windows"* ]]; then @@ -482,10 +452,9 @@ node-man-integration-tests: #!/usr/bin/env bash set -e - cargo build --release --bin safenode --bin faucet --bin safenode-manager + cargo build --release --bin safenode --bin safenode-manager cargo run --release --bin safenode-manager -- local run \ - --node-path target/release/safenode \ - --faucet-path target/release/faucet + --node-path target/release/safenode peer=$(cargo run --release --bin safenode-manager -- local status \ --json | jq -r .nodes[-1].listen_addr[0]) export SAFE_PEERS=$peer From 845cac8b7d4c3dc9dd8a611065a14ed3f89b07fb Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 22:24:09 +0100 Subject: [PATCH 235/255] chore: package `autonomi` binary rather than `safe` --- Justfile | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Justfile b/Justfile index 3d92627bb1..1b9199c265 100644 --- a/Justfile +++ b/Justfile @@ -133,7 +133,7 @@ build-release-artifacts arch nightly="false": cargo binstall --no-confirm cross cross build --release --target $arch --bin nat-detection $nightly_feature cross build --release --target $arch --bin node-launchpad $nightly_feature - cross build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature + cross build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature cross build --release --features=network-contacts --target $arch --bin safenode $nightly_feature cross build --release --target $arch --bin safenode-manager $nightly_feature cross build --release --target $arch --bin safenodemand $nightly_feature @@ -141,7 +141,7 @@ build-release-artifacts arch nightly="false": else cargo build --release --target $arch --bin nat-detection $nightly_feature cargo build --release --target $arch --bin node-launchpad $nightly_feature - cargo build --release --features="network-contacts,distribution" --target $arch --bin safe $nightly_feature + cargo build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature cargo build --release --features=network-contacts --target $arch --bin safenode $nightly_feature cargo build --release --target $arch --bin safenode-manager $nightly_feature cargo build --release --target $arch --bin safenodemand $nightly_feature @@ -180,11 +180,11 @@ package-all-bins: set -e just package-bin "nat-detection" just package-bin "node-launchpad" - just package-bin "safe" + just package-bin "autonomi" just package-bin "safenode" - just package-bin "safenode_rpc_client" just package-bin "safenode-manager" just package-bin "safenodemand" + just package-bin "safenode_rpc_client" package-bin bin version="": #!/usr/bin/env bash @@ -205,7 +205,7 @@ package-bin bin version="": supported_bins=(\ "nat-detection" \ "node-launchpad" \ - "safe" \ + "autonomi" \ "safenode" \ "safenode-manager" \ "safenodemand" \ @@ -222,8 +222,8 @@ package-bin bin version="": node-launchpad) crate_dir_name="node-launchpad" ;; - safe) - crate_dir_name="sn_cli" + autonomi) + crate_dir_name="autonomi-cli" ;; safenode) crate_dir_name="sn_node" @@ -275,7 +275,7 @@ upload-all-packaged-bins-to-s3: binaries=( nat-detection node-launchpad - safe + autonomi safenode safenode-manager safenode_rpc_client @@ -296,8 +296,8 @@ upload-packaged-bin-to-s3 bin_name: node-launchpad) bucket="node-launchpad" ;; - safe) - bucket="sn-cli" + autonomi) + bucket="autonomi-cli" ;; safenode) bucket="sn-node" @@ -346,8 +346,8 @@ delete-s3-bin bin_name version: node-launchpad) bucket="node-launchpad" ;; - safe) - bucket="sn-cli" + autonomi) + bucket="autonomi-cli" ;; safenode) bucket="sn-node" @@ -430,7 +430,7 @@ package-arch arch: binaries=( nat-detection node-launchpad - safe + autonomi safenode safenode-manager safenode_rpc_client From 1d4d319ab04f920fe8a2dd2a45f93066e9f21b22 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 22:24:58 +0100 Subject: [PATCH 236/255] chore: remove redundant targets These targets were not used for a long time and were just cluttering the file with noise. --- Justfile | 79 +------------------------------------------------------- 1 file changed, 1 insertion(+), 78 deletions(-) diff --git a/Justfile b/Justfile index 1b9199c265..5c8013329e 100644 --- a/Justfile +++ b/Justfile @@ -1,70 +1,5 @@ #!/usr/bin/env just --justfile -release_repo := "maidsafe/safe_network" - -droplet-testbed: - #!/usr/bin/env bash - - DROPLET_NAME="node-manager-testbed" - REGION="lon1" - SIZE="s-1vcpu-1gb" - IMAGE="ubuntu-20-04-x64" - SSH_KEY_ID="30878672" - - droplet_ip=$(doctl compute droplet list \ - --format Name,PublicIPv4 --no-header | grep "^$DROPLET_NAME " | awk '{ print $2 }') - - if [ -z "$droplet_ip" ]; then - droplet_id=$(doctl compute droplet create $DROPLET_NAME \ - --region $REGION \ - --size $SIZE \ - --image $IMAGE \ - --ssh-keys $SSH_KEY_ID \ - --format ID \ - --no-header \ - --wait) - if [ -z "$droplet_id" ]; then - echo "Failed to obtain droplet ID" - exit 1 - fi - - echo "Droplet ID: $droplet_id" - echo "Waiting for droplet IP address..." - droplet_ip=$(doctl compute droplet get $droplet_id --format PublicIPv4 --no-header) - while [ -z "$droplet_ip" ]; do - echo "Still waiting to obtain droplet IP address..." - sleep 5 - droplet_ip=$(doctl compute droplet get $droplet_id --format PublicIPv4 --no-header) - done - fi - echo "Droplet IP address: $droplet_ip" - - nc -zw1 $droplet_ip 22 - exit_code=$? - while [ $exit_code -ne 0 ]; do - echo "Waiting on SSH to become available..." - sleep 5 - nc -zw1 $droplet_ip 22 - exit_code=$? - done - - cargo build --release --target x86_64-unknown-linux-musl - scp -r ./target/x86_64-unknown-linux-musl/release/safenode-manager \ - root@$droplet_ip:/root/safenode-manager - -kill-testbed: - #!/usr/bin/env bash - - DROPLET_NAME="node-manager-testbed" - - droplet_id=$(doctl compute droplet list \ - --format Name,ID --no-header | grep "^$DROPLET_NAME " | awk '{ print $2 }') - - if [ -z "$droplet_ip" ]; then - echo "Deleting droplet with ID $droplet_id" - doctl compute droplet delete $droplet_id - fi - build-release-artifacts arch nightly="false": #!/usr/bin/env bash set -e @@ -446,16 +381,4 @@ package-arch arch: zip "../../../packaged_architectures/$zip_filename" "${binaries[@]}" fi - cd ../../.. - -node-man-integration-tests: - #!/usr/bin/env bash - set -e - - cargo build --release --bin safenode --bin safenode-manager - cargo run --release --bin safenode-manager -- local run \ - --node-path target/release/safenode - peer=$(cargo run --release --bin safenode-manager -- local status \ - --json | jq -r .nodes[-1].listen_addr[0]) - export SAFE_PEERS=$peer - cargo test --release --package sn-node-manager --test e2e -- --nocapture + cd ../../.. \ No newline at end of file From c96ceee1ad3305846489a4bfecfad702d25451f6 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Wed, 16 Oct 2024 23:10:00 +0100 Subject: [PATCH 237/255] chore: fix up evmlib and sn_evm references These crates were not being correctly referenced in some other crates, which meant some release configurations would not build properly. --- Cargo.lock | 8 ++++---- autonomi/Cargo.toml | 4 ++-- evm_testnet/Cargo.toml | 4 ++-- node-launchpad/Cargo.toml | 2 +- sn_evm/Cargo.toml | 2 +- sn_networking/Cargo.toml | 2 +- sn_node/Cargo.toml | 5 ++--- sn_node_manager/Cargo.toml | 2 +- sn_protocol/Cargo.toml | 2 +- test_utils/Cargo.toml | 2 +- 10 files changed, 16 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62426ff43f..f74312c858 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2747,7 +2747,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.0" +version = "0.1.1-rc.1" dependencies = [ "clap", "dirs-next", @@ -2758,7 +2758,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.0" +version = "0.1.1-rc.1" dependencies = [ "alloy", "dirs-next", @@ -8144,7 +8144,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.0" +version = "0.1.1-rc.1" dependencies = [ "custom_debug", "evmlib", @@ -8756,7 +8756,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.7" +version = "0.4.8-rc.1" dependencies = [ "bytes", "color-eyre", diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 56953227b1..4abb4697dc 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -41,7 +41,7 @@ sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } sn_protocol = { version = "0.17.12-rc.1", path = "../sn_protocol" } sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -66,7 +66,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.1-rc.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index a270c126fc..4dc12e3a74 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -11,8 +11,8 @@ version = "0.1.1-rc.1" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib" } -sn_evm = { path = "../sn_evm" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 586ce7afb8..909546c73e 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -52,7 +52,7 @@ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } sn-node-manager = { version = "0.11.0-rc.1", path = "../sn_node_manager" } sn_peers_acquisition = { version = "0.5.4-rc.1", path = "../sn_peers_acquisition" } sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 57cc6e63c9..ee66ed91b0 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -16,7 +16,7 @@ local = ["evmlib/local"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index a291073300..fb922cef70 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -58,7 +58,7 @@ sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 5bb28f9e6e..d24112861f 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -60,7 +60,7 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -82,7 +82,7 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", @@ -94,7 +94,6 @@ sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features = [ sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1", features = [ "test-utils", ] } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } tempfile = "3.6.0" # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 9199ed355a..2e247010f8 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -52,7 +52,7 @@ sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1 sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } sysinfo = "0.30.12" thiserror = "1.0.23" diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 3e7af67634..2109c53d60 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -31,7 +31,7 @@ sha2 = "0.10.7" sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 2574bc52ea..e3cc909848 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -16,7 +16,7 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } From bc9593a0696518556e6f25620897d6216af5c1f6 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Thu, 17 Oct 2024 16:34:52 +0100 Subject: [PATCH 238/255] chore: build `safenode` with `websockets` feature This is a requirement of the web app. --- Justfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Justfile b/Justfile index 5c8013329e..a6f6f90118 100644 --- a/Justfile +++ b/Justfile @@ -69,7 +69,7 @@ build-release-artifacts arch nightly="false": cross build --release --target $arch --bin nat-detection $nightly_feature cross build --release --target $arch --bin node-launchpad $nightly_feature cross build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature - cross build --release --features=network-contacts --target $arch --bin safenode $nightly_feature + cross build --release --features=network-contacts,websockets --target $arch --bin safenode $nightly_feature cross build --release --target $arch --bin safenode-manager $nightly_feature cross build --release --target $arch --bin safenodemand $nightly_feature cross build --release --target $arch --bin safenode_rpc_client $nightly_feature @@ -77,7 +77,7 @@ build-release-artifacts arch nightly="false": cargo build --release --target $arch --bin nat-detection $nightly_feature cargo build --release --target $arch --bin node-launchpad $nightly_feature cargo build --release --features=network-contacts --target $arch --bin autonomi $nightly_feature - cargo build --release --features=network-contacts --target $arch --bin safenode $nightly_feature + cargo build --release --features=network-contacts,websockets --target $arch --bin safenode $nightly_feature cargo build --release --target $arch --bin safenode-manager $nightly_feature cargo build --release --target $arch --bin safenodemand $nightly_feature cargo build --release --target $arch --bin safenode_rpc_client $nightly_feature @@ -381,4 +381,4 @@ package-arch arch: zip "../../../packaged_architectures/$zip_filename" "${binaries[@]}" fi - cd ../../.. \ No newline at end of file + cd ../../.. From 90a8f261098def9dd61cabd4beaa6d3cdee153bb Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Fri, 18 Oct 2024 18:50:40 +0100 Subject: [PATCH 239/255] feat: shutting down on excess cpu does not return error The node RPC is changed such that the stop event allows shutting the node down with either a success or failure result. Then, in the case of the excessive CPU usage, the node will be shut down with a success result. This allows us to retain an on-failure restart policy for node-based services: with excess CPU usage, they would shut down with a success and so would not be started again. --- sn_node/src/bin/safenode/main.rs | 27 ++++++++++++++++--------- sn_node/src/bin/safenode/rpc_service.rs | 4 ++-- sn_protocol/src/node_rpc.rs | 8 +++++++- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index 487eec9a69..dd328b14b2 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -24,7 +24,7 @@ use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; use sn_node::{Marker, NodeBuilder, NodeEvent, NodeEventsReceiver}; use sn_peers_acquisition::PeersArgs; use sn_protocol::{ - node::get_safenode_root_dir, node_rpc::NodeCtrl, version::IDENTIFY_PROTOCOL_STR, + node::get_safenode_root_dir, node_rpc::{NodeCtrl, StopResult}, version::IDENTIFY_PROTOCOL_STR, }; use std::{ env, @@ -381,7 +381,7 @@ You can check your reward balance by running: if let Err(err) = ctrl_tx_clone .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Ctrl-C received!"), + result: StopResult::Error(eyre!("Ctrl-C received!")), }) .await { @@ -426,7 +426,7 @@ You can check your reward balance by running: if let Err(err) = ctrl_tx_clone_cpu .send(NodeCtrl::Stop { delay: NODE_STOP_DELAY, - cause: eyre!("Excess host CPU detected for {HIGH_CPU_CONSECUTIVE_LIMIT} consecutive minutes!"), + result: StopResult::Success(format!("Excess host CPU %{CPU_USAGE_THRESHOLD} detected for {HIGH_CPU_CONSECUTIVE_LIMIT} consecutive minutes!")), }) .await { @@ -475,12 +475,21 @@ You can check your reward balance by running: break Ok(res); } - Some(NodeCtrl::Stop { delay, cause }) => { + Some(NodeCtrl::Stop { delay, result }) => { let msg = format!("Node is stopping in {delay:?}..."); info!("{msg}"); println!("{msg} Node log path: {log_output_dest}"); sleep(delay).await; - return Err(cause); + match result { + StopResult::Success(message) => { + info!("Node stopped successfully: {}", message); + return Ok(None); + } + StopResult::Error(cause) => { + error!("Node stopped with error: {}", cause); + return Err(cause); + } + } } Some(NodeCtrl::Update(_delay)) => { // TODO: implement self-update once safenode app releases are published again @@ -503,7 +512,7 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se if let Err(err) = ctrl_tx .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Node events channel closed!"), + result: StopResult::Error(eyre!("Node events channel closed!")), }) .await { @@ -517,13 +526,11 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se if let Err(err) = ctrl_tx .send(NodeCtrl::Stop { delay: Duration::from_secs(1), - cause: eyre!("Node terminated due to: {reason:?}"), + result: StopResult::Error(eyre!("Node terminated due to: {reason:?}")), }) .await { - error!( - "Failed to send node control msg to safenode bin main thread: {err}" - ); + error!("Failed to send node control msg to safenode bin main thread: {err}"); break; } } diff --git a/sn_node/src/bin/safenode/rpc_service.rs b/sn_node/src/bin/safenode/rpc_service.rs index c42503f112..eef388b2d5 100644 --- a/sn_node/src/bin/safenode/rpc_service.rs +++ b/sn_node/src/bin/safenode/rpc_service.rs @@ -9,7 +9,7 @@ use eyre::{ErrReport, Result}; use sn_logging::ReloadHandle; use sn_node::RunningNode; -use sn_protocol::node_rpc::NodeCtrl; +use sn_protocol::node_rpc::{NodeCtrl, StopResult}; use sn_protocol::safenode_proto::{ k_buckets_response, safe_node_server::{SafeNode, SafeNodeServer}, @@ -202,7 +202,7 @@ impl SafeNode for SafeNodeRpcService { }; let delay = Duration::from_millis(request.get_ref().delay_millis); - match self.ctrl_tx.send(NodeCtrl::Stop { delay, cause }).await { + match self.ctrl_tx.send(NodeCtrl::Stop { delay, result: StopResult::Success(cause.to_string()) }).await { Ok(()) => Ok(Response::new(StopResponse {})), Err(err) => Err(Status::new( Code::Internal, diff --git a/sn_protocol/src/node_rpc.rs b/sn_protocol/src/node_rpc.rs index 599e874221..d35ddac5b4 100644 --- a/sn_protocol/src/node_rpc.rs +++ b/sn_protocol/src/node_rpc.rs @@ -15,7 +15,7 @@ pub enum NodeCtrl { /// Request to stop the execution of the safenode app, providing an error as a reason for it. Stop { delay: Duration, - cause: Error, + result: StopResult, }, /// Request to restart the execution of the safenode app, retrying to join the network, after the requested delay. /// Set `retain_peer_id` to `true` if you want to re-use the same root dir/secret keys/PeerId. @@ -26,3 +26,9 @@ pub enum NodeCtrl { // Request to update the safenode app, and restart it, after the requested delay. Update(Duration), } + +#[derive(Debug)] +pub enum StopResult { + Success(String), + Error(Error), +} From 6bfa9247f36f45390044d6fc4f54cfac610697d8 Mon Sep 17 00:00:00 2001 From: Warm Beer Date: Mon, 21 Oct 2024 11:06:47 +0200 Subject: [PATCH 240/255] feat(evmlib): add token allowance and approval functions & set contract approval to infinite --- evmlib/src/contract/network_token.rs | 14 ++++++++ evmlib/src/wallet.rs | 52 ++++++++++++++++++++++++---- 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/evmlib/src/contract/network_token.rs b/evmlib/src/contract/network_token.rs index 4c8112e869..ce582f2543 100644 --- a/evmlib/src/contract/network_token.rs +++ b/evmlib/src/contract/network_token.rs @@ -74,6 +74,20 @@ where Ok(balance) } + /// See how many tokens are approved to be spent. + pub async fn allowance(&self, owner: Address, spender: Address) -> Result { + debug!("Getting allowance of owner: {owner} for spender: {spender}",); + let balance = self + .contract + .allowance(owner, spender) + .call() + .await + .inspect_err(|err| error!("Error getting allowance: {err:?}"))? + ._0; + debug!("Allowance of owner: {owner} for spender: {spender} is: {balance}"); + Ok(balance) + } + /// Approve spender to spend a raw amount of tokens. pub async fn approve(&self, spender: Address, value: U256) -> Result { debug!("Approving spender to spend raw amt of tokens: {value}"); diff --git a/evmlib/src/wallet.rs b/evmlib/src/wallet.rs index e758e58eee..9fa3c92ce1 100644 --- a/evmlib/src/wallet.rs +++ b/evmlib/src/wallet.rs @@ -64,12 +64,12 @@ impl Wallet { /// Returns the raw balance of payment tokens for this wallet. pub async fn balance_of_tokens(&self) -> Result { - balance_of_tokens(wallet_address(&self.wallet), &self.network).await + balance_of_tokens(self.address(), &self.network).await } /// Returns the raw balance of gas tokens for this wallet. pub async fn balance_of_gas_tokens(&self) -> Result { - balance_of_gas_tokens(wallet_address(&self.wallet), &self.network).await + balance_of_gas_tokens(self.address(), &self.network).await } /// Transfer a raw amount of payment tokens to another address. @@ -90,6 +90,20 @@ impl Wallet { transfer_gas_tokens(self.wallet.clone(), &self.network, to, amount).await } + /// See how many tokens of the owner may be spent by the spender. + pub async fn token_allowance(&self, spender: Address) -> Result { + token_allowance(&self.network, self.address(), spender).await + } + + /// Approve an address / smart contract to spend this wallet's payment tokens. + pub async fn approve_to_spend_tokens( + &self, + spender: Address, + amount: U256, + ) -> Result { + approve_to_spend_tokens(self.wallet.clone(), &self.network, spender, amount).await + } + /// Pays for a single quote. Returns transaction hash of the payment. pub async fn pay_for_quote( &self, @@ -196,8 +210,20 @@ pub async fn balance_of_gas_tokens( Ok(balance) } +/// See how many tokens of the owner may be spent by the spender. +pub async fn token_allowance( + network: &Network, + owner: Address, + spender: Address, +) -> Result { + debug!("Getting allowance for owner: {owner} and spender: {spender}",); + let provider = http_provider(network.rpc_url().clone()); + let network_token = NetworkToken::new(*network.payment_token_address(), provider); + network_token.allowance(owner, spender).await +} + /// Approve an address / smart contract to spend this wallet's payment tokens. -async fn approve_to_spend_tokens( +pub async fn approve_to_spend_tokens( wallet: EthereumWallet, network: &Network, spender: Address, @@ -258,16 +284,28 @@ pub async fn pay_for_quotes>( let mut tx_hashes_by_quote = BTreeMap::new(); - // Approve the contract to spend enough of the client's tokens. - approve_to_spend_tokens( - wallet.clone(), + // Check allowance + let allowance = token_allowance( network, + wallet_address(&wallet), *network.data_payments_address(), - total_amount, ) .await .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + // TODO: Get rid of approvals altogether, by using permits or whatever.. + if allowance < total_amount { + // Approve the contract to spend all the client's tokens. + approve_to_spend_tokens( + wallet.clone(), + network, + *network.data_payments_address(), + U256::MAX, + ) + .await + .map_err(|err| PayForQuotesError(Error::from(err), tx_hashes_by_quote.clone()))?; + } + let provider = http_provider_with_wallet(network.rpc_url().clone(), wallet); let data_payments = DataPaymentsHandler::new(*network.data_payments_address(), provider); From d4ab7b91da367e7c5c381a56d10b2d2a35215e6b Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 16 Oct 2024 16:47:14 +0200 Subject: [PATCH 241/255] fix(node-manager): hardcoding network params for demo --- sn_node_manager/src/cmd/node.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index d28dbf7266..375788bb11 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -22,7 +22,7 @@ use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; use libp2p_identity::PeerId; use semver::Version; -use sn_evm::{EvmNetwork, RewardsAddress}; +use sn_evm::{CustomNetwork, EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -714,7 +714,16 @@ pub async fn maintain_n_running_nodes( data_dir_path.clone(), enable_metrics_server, env_variables.clone(), - None, + // FIXME: Hardcoding for demo. Should be fixed!! + Some(EvmNetwork::Custom(CustomNetwork { + rpc_url_http: "http://165.227.234.109:4343/".parse()?, + payment_token_address: RewardsAddress::from_str( + "0x5FbDB2315678afecb367f032d93F642f64180aa3", + )?, + data_payments_address: RewardsAddress::from_str( + "0x8464135c8F25Da09e49BC8782676a84730C318bC", + )?, + })), home_network, local, log_dir_path.clone(), From 3f537af7a712c54f5eaed6921f15d64ff1d70b38 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 16 Oct 2024 12:04:33 +0200 Subject: [PATCH 242/255] feat(launchpad): evm network integration --- Cargo.lock | 31 +-- node-launchpad/Cargo.toml | 1 + node-launchpad/src/components/options.rs | 10 +- .../src/components/popup/beta_programme.rs | 224 +++++++++++------- node-launchpad/src/components/status.rs | 89 ++++--- node-launchpad/src/node_mgmt.rs | 7 +- 6 files changed, 210 insertions(+), 152 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f74312c858..44e8694dcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1476,7 +1476,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.8", "serde", ] @@ -3724,8 +3724,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4306,7 +4306,7 @@ dependencies = [ "globset", "log", "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.8", "same-file", "walkdir", "winapi-util", @@ -5705,6 +5705,7 @@ dependencies = [ "pretty_assertions", "prometheus-parse", "ratatui", + "regex", "reqwest 0.12.7", "serde", "serde_json", @@ -6588,7 +6589,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift 0.3.0", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", @@ -7085,14 +7086,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -7106,13 +7107,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -7123,9 +7124,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -9897,7 +9898,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 909546c73e..332878595f 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -70,6 +70,7 @@ tui-input = "0.8.0" which = "6.0.1" faccess = "0.2.4" throbber-widgets-tui = "0.7.0" +regex = "1.11.0" [build-dependencies] vergen = { version = "8.2.6", features = ["build", "git", "gitoxide", "cargo"] } diff --git a/node-launchpad/src/components/options.rs b/node-launchpad/src/components/options.rs index 90a626ec33..934578f93e 100644 --- a/node-launchpad/src/components/options.rs +++ b/node-launchpad/src/components/options.rs @@ -192,10 +192,14 @@ impl Component for Options { .style(Style::default().fg(GHOST_WHITE)); // Beta Rewards Program - let beta_legend = " Edit Discord Username "; + let beta_legend = if self.discord_username.is_empty() { + " Add Wallet " + } else { + " Change Wallet " + }; let beta_key = " [Ctrl+B] "; let block2 = Block::default() - .title(" Beta Rewards Program ") + .title(" Wallet ") .title_style(Style::default().bold().fg(GHOST_WHITE)) .style(Style::default().fg(GHOST_WHITE)) .borders(Borders::ALL) @@ -204,7 +208,7 @@ impl Component for Options { vec![Row::new(vec![ Cell::from( Line::from(vec![Span::styled( - " Discord Username: ", + " Wallet Address: ", Style::default().fg(LIGHT_PERIWINKLE), )]) .alignment(Alignment::Left), diff --git a/node-launchpad/src/components/popup/beta_programme.rs b/node-launchpad/src/components/popup/beta_programme.rs index 615c20bcf4..f512f9d0a4 100644 --- a/node-launchpad/src/components/popup/beta_programme.rs +++ b/node-launchpad/src/components/popup/beta_programme.rs @@ -11,27 +11,30 @@ use super::super::Component; use crate::{ action::{Action, OptionsActions}, mode::{InputMode, Scene}, - style::{clear_area, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, VIVID_SKY_BLUE}, + style::{clear_area, EUCALYPTUS, GHOST_WHITE, INDIGO, LIGHT_PERIWINKLE, RED, VIVID_SKY_BLUE}, widgets::hyperlink::Hyperlink, }; use color_eyre::Result; use crossterm::event::{Event, KeyCode, KeyEvent}; use ratatui::{prelude::*, widgets::*}; +use regex::Regex; use tui_input::{backend::crossterm::EventHandler, Input}; -const INPUT_SIZE_USERNAME: u16 = 32; // as per discord docs +const INPUT_SIZE_USERNAME: u16 = 42; // Etherum address plus 0x const INPUT_AREA_USERNAME: u16 = INPUT_SIZE_USERNAME + 2; // +2 for the padding pub struct BetaProgramme { /// Whether the component is active right now, capturing keystrokes + draw things. active: bool, state: BetaProgrammeState, - discord_input_filed: Input, + discord_input_field: Input, // cache the old value incase user presses Esc. old_value: String, back_to: Scene, + can_save: bool, } +#[allow(dead_code)] enum BetaProgrammeState { DiscordIdAlreadySet, ShowTCs, @@ -49,27 +52,43 @@ impl BetaProgramme { Self { active: false, state, - discord_input_filed: Input::default().with_value(username), + discord_input_field: Input::default().with_value(username), old_value: Default::default(), back_to: Scene::Status, + can_save: false, + } + } + + pub fn validate(&mut self) { + if self.discord_input_field.value().is_empty() { + self.can_save = false; + } else { + let re = Regex::new(r"^0x[a-fA-F0-9]{40}$").expect("Failed to compile regex"); + self.can_save = re.is_match(self.discord_input_field.value()); } } fn capture_inputs(&mut self, key: KeyEvent) -> Vec { let send_back = match key.code { KeyCode::Enter => { - let username = self.discord_input_filed.value().to_string().to_lowercase(); - self.discord_input_filed = username.clone().into(); - - debug!( - "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", - ); - self.state = BetaProgrammeState::DiscordIdAlreadySet; - vec![ - Action::StoreDiscordUserName(username.clone()), - Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername(username)), - Action::SwitchScene(Scene::Status), - ] + self.validate(); + if self.can_save { + let username = self.discord_input_field.value().to_string().to_lowercase(); + self.discord_input_field = username.clone().into(); + + debug!( + "Got Enter, saving the discord username {username:?} and switching to DiscordIdAlreadySet, and Home Scene", + ); + self.state = BetaProgrammeState::DiscordIdAlreadySet; + return vec![ + Action::StoreDiscordUserName(username.clone()), + Action::OptionsActions(OptionsActions::UpdateBetaProgrammeUsername( + username, + )), // FIXME: Change OptionsActions::UpdateBetaProgrammeUsername name + Action::SwitchScene(Scene::Status), + ]; + } + vec![] } KeyCode::Esc => { debug!( @@ -77,8 +96,8 @@ impl BetaProgramme { self.old_value ); // reset to old value - self.discord_input_filed = self - .discord_input_filed + self.discord_input_field = self + .discord_input_field .clone() .with_value(self.old_value.clone()); vec![Action::SwitchScene(self.back_to)] @@ -86,13 +105,14 @@ impl BetaProgramme { KeyCode::Char(' ') => vec![], KeyCode::Backspace => { // if max limit reached, we should allow Backspace to work. - self.discord_input_filed.handle_event(&Event::Key(key)); + self.discord_input_field.handle_event(&Event::Key(key)); + self.validate(); vec![] } _ => { - // max 32 limit as per discord docs - if self.discord_input_filed.value().chars().count() < 32 { - self.discord_input_filed.handle_event(&Event::Key(key)); + if self.discord_input_field.value().chars().count() < INPUT_SIZE_USERNAME as usize { + self.discord_input_field.handle_event(&Event::Key(key)); + self.validate(); } vec![] } @@ -109,26 +129,27 @@ impl Component for BetaProgramme { // while in entry mode, keybinds are not captured, so gotta exit entry mode from here let send_back = match &self.state { BetaProgrammeState::DiscordIdAlreadySet => self.capture_inputs(key), - BetaProgrammeState::ShowTCs => { - match key.code { - KeyCode::Char('y') | KeyCode::Char('Y') => { - let is_discord_id_set = !self.discord_input_filed.value().is_empty(); - if is_discord_id_set { - debug!("User accepted the TCs, but discord id already set, moving to DiscordIdAlreadySet"); - self.state = BetaProgrammeState::DiscordIdAlreadySet; - } else { - debug!("User accepted the TCs, but no discord id set, moving to AcceptTCsAndEnterDiscordId"); - self.state = BetaProgrammeState::AcceptTCsAndEnterDiscordId; - } + BetaProgrammeState::ShowTCs => match key.code { + KeyCode::Char('y') | KeyCode::Char('Y') => { + let is_discord_id_set = !self.discord_input_field.value().is_empty(); + if is_discord_id_set { + debug!("User accepted the TCs, but discord id already set, moving to DiscordIdAlreadySet"); + self.state = BetaProgrammeState::DiscordIdAlreadySet; + } else { + debug!("User accepted the TCs, but no discord id set, moving to AcceptTCsAndEnterDiscordId"); + self.state = BetaProgrammeState::AcceptTCsAndEnterDiscordId; } - KeyCode::Esc => { - debug!("User rejected the TCs, moving to RejectTCs"); - self.state = BetaProgrammeState::RejectTCs; - } - _ => {} + vec![] } - vec![] - } + KeyCode::Esc => { + debug!("User rejected the TCs, moving to original screen"); + self.state = BetaProgrammeState::ShowTCs; + vec![Action::SwitchScene(self.back_to)] + } + _ => { + vec![] + } + }, BetaProgrammeState::RejectTCs => { if let KeyCode::Esc = key.code { debug!("RejectTCs msg closed. Switching to Status scene."); @@ -146,7 +167,7 @@ impl Component for BetaProgramme { Action::SwitchScene(scene) => match scene { Scene::StatusBetaProgrammePopUp | Scene::OptionsBetaProgrammePopUp => { self.active = true; - self.old_value = self.discord_input_filed.value().to_string(); + self.old_value = self.discord_input_field.value().to_string(); if scene == Scene::StatusBetaProgrammePopUp { self.back_to = Scene::Status; } else if scene == Scene::OptionsBetaProgrammePopUp { @@ -190,7 +211,7 @@ impl Component for BetaProgramme { let pop_up_border = Paragraph::new("").block( Block::default() .borders(Borders::ALL) - .title(" Beta Rewards Program ") + .title(" Add Your Wallet ") .bold() .title_style(Style::new().fg(VIVID_SKY_BLUE)) .padding(Padding::uniform(2)) @@ -200,7 +221,8 @@ impl Component for BetaProgramme { match self.state { BetaProgrammeState::DiscordIdAlreadySet => { - // split into 4 parts, for the prompt, input, text, dash , and buttons + self.validate(); // FIXME: maybe this should be somewhere else + // split into 4 parts, for the prompt, input, text, dash , and buttons let layer_two = Layout::new( Direction::Vertical, [ @@ -218,27 +240,40 @@ impl Component for BetaProgramme { ) .split(layer_one[1]); - let prompt_text = Paragraph::new("Discord Username associated with this device:") - .block(Block::default()) - .alignment(Alignment::Center) - .fg(GHOST_WHITE); + let prompt_text = Paragraph::new(Line::from(vec![ + Span::styled("Enter new ".to_string(), Style::default()), + Span::styled("Wallet Address".to_string(), Style::default().bold()), + ])) + .block(Block::default()) + .alignment(Alignment::Center) + .fg(GHOST_WHITE); f.render_widget(prompt_text, layer_two[0]); let spaces = " ".repeat( - (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_filed.value().len(), + (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_field.value().len(), ); let input = Paragraph::new(Span::styled( - format!("{}{} ", spaces, self.discord_input_filed.value()), - Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), + format!("{}{} ", spaces, self.discord_input_field.value()), + Style::default() + .fg(if self.can_save { VIVID_SKY_BLUE } else { RED }) + .bg(INDIGO) + .underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(Text::from(vec![ - Line::raw("Changing your Username will reset all nodes,"), - Line::raw("and any Nanos left on this device will be lost."), - ])) + let text = Paragraph::new(Text::from(if self.can_save { + vec![ + Line::raw("Changing your Wallet will reset and restart"), + Line::raw("all your nodes."), + ] + } else { + vec![Line::from(Span::styled( + "Invalid wallet address".to_string(), + Style::default().fg(RED), + ))] + })) .alignment(Alignment::Center) .block( Block::default() @@ -260,15 +295,19 @@ impl Component for BetaProgramme { .split(layer_two[4]); let button_no = Line::from(vec![Span::styled( - " No, Cancel [Esc]", + " Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); let button_yes = Line::from(vec![Span::styled( - "Save Username [Enter]", - Style::default().fg(EUCALYPTUS), + "Change Wallet [Enter]", + if self.can_save { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }, )]); f.render_widget(button_yes, buttons_layer[1]); } @@ -290,9 +329,9 @@ impl Component for BetaProgramme { .split(layer_one[1]); let text = Paragraph::new(vec![ - Line::from(Span::styled("Earn a slice of millions of tokens created at the genesis of the Autonomi Network by running nodes to build and test the Beta.",Style::default())), + Line::from(Span::styled("Add your wallet and you can earn a slice of millions of tokens created at the genesis of the Autonomi Network when through running nodes.",Style::default())), Line::from(Span::styled("\n\n",Style::default())), - Line::from(Span::styled("To continue in the beta Rewards Program you agree to the Terms and Conditions found here:",Style::default())), + Line::from(Span::styled("By continuing you agree to the Terms and Conditions found here:",Style::default())), Line::from(Span::styled("\n\n",Style::default())), ] ) @@ -327,10 +366,12 @@ impl Component for BetaProgramme { Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); - let button_yes = Line::from(vec![Span::styled( - "Yes, I agree! Continue [Y]", + + let button_yes = Paragraph::new(Line::from(vec![Span::styled( + "Yes, I agree! Continue [Y] ", Style::default().fg(EUCALYPTUS), - )]); + )])) + .alignment(Alignment::Right); f.render_widget(button_yes, buttons_layer[1]); } BetaProgrammeState::RejectTCs => { @@ -381,7 +422,9 @@ impl Component for BetaProgramme { // for the input Constraint::Length(2), // for the text - Constraint::Length(5), + Constraint::Length(3), + // for the hyperlink + Constraint::Length(2), // gap Constraint::Length(1), // for the buttons @@ -390,57 +433,68 @@ impl Component for BetaProgramme { ) .split(layer_one[1]); - let prompt = - Paragraph::new("Enter your Discord Username").alignment(Alignment::Center); + let prompt = Paragraph::new(Line::from(vec![ + Span::styled("Enter your ", Style::default()), + Span::styled("Wallet Address", Style::default().fg(GHOST_WHITE)), + ])) + .alignment(Alignment::Center); f.render_widget(prompt.fg(GHOST_WHITE), layer_two[0]); let spaces = " ".repeat( - (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_filed.value().len(), + (INPUT_AREA_USERNAME - 1) as usize - self.discord_input_field.value().len(), ); let input = Paragraph::new(Span::styled( - format!("{}{} ", spaces, self.discord_input_filed.value()), + format!("{}{} ", spaces, self.discord_input_field.value()), Style::default().fg(VIVID_SKY_BLUE).bg(INDIGO).underlined(), )) .alignment(Alignment::Center); f.render_widget(input, layer_two[1]); - let text = Paragraph::new(vec![ - Line::from(Span::styled( - "Submit your username and track your progress on our Discord server.", - Style::default(), - )), - Line::from(Span::styled("\n\n", Style::default())), - Line::from(Span::styled( - "Note: your username may be different from your display name.", - Style::default(), - )), - ]) + let text = Paragraph::new(vec![Line::from(Span::styled( + "Find out more about compatible wallets, and how to track your earnings:", + Style::default(), + ))]) .block(Block::default().padding(Padding::horizontal(2))) .wrap(Wrap { trim: false }); f.render_widget(text.fg(GHOST_WHITE), layer_two[2]); + let link = Hyperlink::new( + Span::styled( + " https://autonomi.com/wallet", + Style::default().fg(VIVID_SKY_BLUE), + ), + "https://autonomi.com/wallet", + ); + + f.render_widget_ref(link, layer_two[3]); + let dash = Block::new() .borders(Borders::BOTTOM) .border_style(Style::new().fg(GHOST_WHITE)); - f.render_widget(dash, layer_two[3]); + f.render_widget(dash, layer_two[4]); let buttons_layer = Layout::horizontal(vec![ Constraint::Percentage(50), Constraint::Percentage(50), ]) - .split(layer_two[4]); + .split(layer_two[5]); let button_no = Line::from(vec![Span::styled( - " No, Cancel [Esc]", + " Cancel [Esc]", Style::default().fg(LIGHT_PERIWINKLE), )]); f.render_widget(button_no, buttons_layer[0]); - let button_yes = Line::from(vec![Span::styled( - "Submit Username [Enter]", - Style::default().fg(EUCALYPTUS), - )]); + let button_yes = Paragraph::new(Line::from(vec![Span::styled( + "Save Wallet [Enter] ", + if self.can_save { + Style::default().fg(EUCALYPTUS) + } else { + Style::default().fg(LIGHT_PERIWINKLE) + }, + )])) + .alignment(Alignment::Right); f.render_widget(button_yes, buttons_layer[1]); } } diff --git a/node-launchpad/src/components/status.rs b/node-launchpad/src/components/status.rs index 69dd9a4d90..90706c488d 100644 --- a/node-launchpad/src/components/status.rs +++ b/node-launchpad/src/components/status.rs @@ -59,7 +59,7 @@ const MAX_ERRORS_WHILE_RUNNING_NAT_DETECTION: usize = 3; // Table Widths const NODE_WIDTH: usize = 10; const VERSION_WIDTH: usize = 7; -const NANOS_WIDTH: usize = 5; +const ATTOS_WIDTH: usize = 5; const MEMORY_WIDTH: usize = 7; const MBPS_WIDTH: usize = 15; const RECORDS_WIDTH: usize = 4; @@ -207,7 +207,7 @@ impl Status<'_> { .iter() .find(|s| s.service_name == node_item.service_name) { - item.nanos = stats.forwarded_rewards; + item.attos = stats.forwarded_rewards; item.memory = stats.memory_usage_mb; item.mbps = format!( "↓{:06.2} ↑{:06.2}", @@ -222,7 +222,7 @@ impl Status<'_> { let new_item = NodeItem { name: node_item.service_name.clone(), version: node_item.version.to_string(), - nanos: 0, + attos: 0, memory: 0, mbps: "-".to_string(), records: 0, @@ -256,7 +256,7 @@ impl Status<'_> { Some(NodeItem { name: node_item.service_name.clone().to_string(), version: node_item.version.to_string(), - nanos: 0, + attos: 0, memory: 0, mbps: "-".to_string(), records: 0, @@ -539,6 +539,7 @@ impl Component for Status<'_> { action_sender: action_sender.clone(), connection_mode: self.connection_mode, port_range: Some(port_range), + rewards_address: self.discord_username.clone(), }; debug!("Calling maintain_n_running_nodes"); @@ -561,7 +562,11 @@ impl Component for Status<'_> { stop_nodes(running_nodes, action_sender); } StatusActions::TriggerBetaProgramme => { - return Ok(Some(Action::SwitchScene(Scene::StatusBetaProgrammePopUp))); + if self.discord_username.is_empty() { + return Ok(Some(Action::SwitchScene(Scene::StatusBetaProgrammePopUp))); + } else { + return Ok(None); + } } }, Action::OptionsActions(OptionsActions::ResetNodes) => { @@ -661,54 +666,44 @@ impl Component for Status<'_> { let column_constraints = [Constraint::Length(23), Constraint::Fill(1)]; let stats_table = Table::new(stats_rows, stats_width).widths(column_constraints); - // Combine "Nanos Earned" and "Username" into a single row - let discord_username_placeholder = "Username: "; // Used to calculate the width of the username column - let discord_username_no_username = "[Ctrl+B] to set"; - let discord_username_title = Span::styled( - discord_username_placeholder, - Style::default().fg(VIVID_SKY_BLUE), - ); - - let discord_username = if !self.discord_username.is_empty() { - Span::styled( - self.discord_username.clone(), - Style::default().fg(VIVID_SKY_BLUE), - ) - .bold() + let wallet_not_set = if self.discord_username.is_empty() { + vec![ + Span::styled("Press ".to_string(), Style::default().fg(VIVID_SKY_BLUE)), + Span::styled("[Ctrl+B] ".to_string(), Style::default().fg(GHOST_WHITE)), + Span::styled( + "to add your ".to_string(), + Style::default().fg(VIVID_SKY_BLUE), + ), + Span::styled( + "Wallet Address".to_string(), + Style::default().fg(VIVID_SKY_BLUE).bold(), + ), + ] } else { - Span::styled( - discord_username_no_username, - Style::default().fg(GHOST_WHITE), - ) + vec![] }; - let total_nanos_earned_and_discord_row = Row::new(vec![ - Cell::new("Nanos Earned".to_string()).fg(VIVID_SKY_BLUE), + let total_attos_earned_and_wallet_row = Row::new(vec![ + Cell::new("Attos Earned".to_string()).fg(VIVID_SKY_BLUE), Cell::new(self.node_stats.total_forwarded_rewards.to_string()) .fg(VIVID_SKY_BLUE) .bold(), - Cell::new( - Line::from(vec![discord_username_title, discord_username]) - .alignment(Alignment::Right), - ), + Cell::new(Line::from(wallet_not_set).alignment(Alignment::Right)), ]); - let nanos_discord_rows = vec![total_nanos_earned_and_discord_row]; - let nanos_discord_width = [Constraint::Length(5)]; + let attos_wallet_rows = vec![total_attos_earned_and_wallet_row]; + let attos_wallet_width = [Constraint::Length(5)]; let column_constraints = [ Constraint::Length(23), Constraint::Fill(1), - Constraint::Length( - discord_username_placeholder.len() as u16 - + if !self.discord_username.is_empty() { - self.discord_username.len() as u16 - } else { - discord_username_no_username.len() as u16 - }, - ), + Constraint::Length(if self.discord_username.is_empty() { + 41 //TODO: make it dynamic with wallet_not_set + } else { + 0 + }), ]; - let nanos_discord_table = - Table::new(nanos_discord_rows, nanos_discord_width).widths(column_constraints); + let attos_wallet_table = + Table::new(attos_wallet_rows, attos_wallet_width).widths(column_constraints); let inner_area = combined_block.inner(layout[1]); let device_layout = Layout::new( @@ -719,7 +714,7 @@ impl Component for Status<'_> { // Render both tables inside the combined block f.render_widget(stats_table, device_layout[0]); - f.render_widget(nanos_discord_table, device_layout[1]); + f.render_widget(attos_wallet_table, device_layout[1]); // ==== Node Status ===== @@ -783,7 +778,7 @@ impl Component for Status<'_> { let node_widths = [ Constraint::Min(NODE_WIDTH as u16), Constraint::Min(VERSION_WIDTH as u16), - Constraint::Min(NANOS_WIDTH as u16), + Constraint::Min(ATTOS_WIDTH as u16), Constraint::Min(MEMORY_WIDTH as u16), Constraint::Min(MBPS_WIDTH as u16), Constraint::Min(RECORDS_WIDTH as u16), @@ -797,7 +792,7 @@ impl Component for Status<'_> { let header_row = Row::new(vec![ Cell::new("Node").fg(COOL_GREY), Cell::new("Version").fg(COOL_GREY), - Cell::new("Nanos").fg(COOL_GREY), + Cell::new("Attos").fg(COOL_GREY), Cell::new("Memory").fg(COOL_GREY), Cell::new( format!("{}{}", " ".repeat(MBPS_WIDTH - "Mbps".len()), "Mbps") @@ -1022,7 +1017,7 @@ impl fmt::Display for NodeStatus { pub struct NodeItem<'a> { name: String, version: String, - nanos: u64, + attos: u64, memory: usize, mbps: String, records: usize, @@ -1075,8 +1070,8 @@ impl NodeItem<'_> { self.version.to_string(), format!( "{}{}", - " ".repeat(NANOS_WIDTH.saturating_sub(self.nanos.to_string().len())), - self.nanos.to_string() + " ".repeat(ATTOS_WIDTH.saturating_sub(self.attos.to_string().len())), + self.attos.to_string() ), format!( "{}{} MB", diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 852e8da8a7..3049a3930b 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -51,6 +51,7 @@ pub struct MaintainNodesArgs { pub action_sender: UnboundedSender, pub connection_mode: ConnectionMode, pub port_range: Option, + pub rewards_address: String, } /// Maintain the specified number of nodes @@ -172,6 +173,7 @@ struct NodeConfig { data_dir_path: Option, peers_args: PeersArgs, safenode_path: Option, + rewards_address: String, } /// Run the NAT detection process @@ -234,6 +236,7 @@ fn prepare_node_config(args: &MaintainNodesArgs) -> NodeConfig { data_dir_path: args.data_dir_path.clone(), peers_args: args.peers_args.clone(), safenode_path: args.safenode_path.clone(), + rewards_address: args.rewards_address.clone(), } } @@ -299,7 +302,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { None, // We don't care about the port, as we are scaling down config.owner.clone(), config.peers_args.clone(), - RewardsAddress::from_str("0x1111111111111111111111111111111111111111").unwrap(), + RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(), None, None, config.safenode_path.clone(), @@ -373,7 +376,7 @@ async fn add_nodes( port_range, config.owner.clone(), config.peers_args.clone(), - RewardsAddress::from_str("0x1111111111111111111111111111111111111111").unwrap(), + RewardsAddress::from_str(config.rewards_address.as_str()).unwrap(), None, None, config.safenode_path.clone(), From 62e1fbccfbcf8a2cefb3e77fcbbf402f49b5758f Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Wed, 16 Oct 2024 12:03:20 +0200 Subject: [PATCH 243/255] fix(service-management): default values for evm network --- evmlib/src/lib.rs | 9 ++------- sn_service_management/src/node.rs | 2 ++ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/evmlib/src/lib.rs b/evmlib/src/lib.rs index 0093aeac0e..fe712e1b27 100644 --- a/evmlib/src/lib.rs +++ b/evmlib/src/lib.rs @@ -73,8 +73,9 @@ impl CustomNetwork { } } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] pub enum Network { + #[default] ArbitrumOne, ArbitrumSepolia, Custom(CustomNetwork), @@ -150,9 +151,3 @@ impl Network { .await } } - -impl Default for Network { - fn default() -> Self { - Self::ArbitrumOne - } -} diff --git a/sn_service_management/src/node.rs b/sn_service_management/src/node.rs index b2e4af4eaa..c9d853a009 100644 --- a/sn_service_management/src/node.rs +++ b/sn_service_management/src/node.rs @@ -283,6 +283,7 @@ pub struct NodeServiceData { )] pub connected_peers: Option>, pub data_dir_path: PathBuf, + #[serde(default)] pub evm_network: EvmNetwork, pub genesis: bool, pub home_network: bool, @@ -307,6 +308,7 @@ pub struct NodeServiceData { )] pub peer_id: Option, pub pid: Option, + #[serde(default)] pub rewards_address: RewardsAddress, pub reward_balance: Option, pub rpc_socket_addr: SocketAddr, From 66cbf21db2da4a74c1e31bcb14b6c2f513c798cf Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 21 Oct 2024 16:51:29 +0200 Subject: [PATCH 244/255] chore(launchpad): remove env variables for build artifacts --- .github/workflows/build-release-artifacts.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/build-release-artifacts.yml b/.github/workflows/build-release-artifacts.yml index b30d4e1803..4bbc2f8f7b 100644 --- a/.github/workflows/build-release-artifacts.yml +++ b/.github/workflows/build-release-artifacts.yml @@ -17,17 +17,6 @@ on: description: Set to build a particular tag type: string -# The key variables also need to be passed to `cross`, which runs in a container and does not -# inherit variables from the parent environment. The `cross` tool is used in the `build` -# job. If any keys are added, the `build-release-artifacts` target in the Justfile must -# also be updated. -env: - GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }} - GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }} - FOUNDATION_PK: ${{ secrets.STABLE_FOUNDATION_PK }} - NETWORK_ROYALTIES_PK: ${{ secrets.STABLE_NETWORK_ROYALTIES_PK }} - PAYMENT_FORWARD_PK: ${{ secrets.STABLE_REWARD_FORWARDING_PK }} - jobs: build: name: build From 09c36c85bd0bea1931824469e8b934608441a28b Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 21 Oct 2024 17:22:43 +0100 Subject: [PATCH 245/255] Revert "Merge pull request #2224 from joshuef/RangeBasedGets" This reverts commit 1d141961323d70cfb0ac64c2a61f55703d278915, reversing changes made to 567200ea31cd8894deb6878fb7c8e9f5be25c1fc. --- .github/workflows/merge.yml | 392 ++++++++------------ sn_networking/src/bootstrap.rs | 120 +++++- sn_networking/src/cmd.rs | 163 ++++---- sn_networking/src/driver.rs | 157 +++----- sn_networking/src/error.rs | 14 +- sn_networking/src/event/kad.rs | 331 ++++++----------- sn_networking/src/event/request_response.rs | 147 ++++---- sn_networking/src/event/swarm.rs | 54 +-- sn_networking/src/lib.rs | 244 +----------- sn_networking/src/network_discovery.rs | 37 +- sn_networking/src/record_store.rs | 23 +- sn_networking/src/record_store_api.rs | 14 +- sn_networking/src/replication_fetcher.rs | 64 +--- sn_networking/src/transfers.rs | 34 +- sn_node/src/put_validation.rs | 13 +- sn_node/src/replication.rs | 120 ++++-- sn_node/tests/double_spend.rs | 196 ++++------ sn_node/tests/storage_payments.rs | 257 +++++++------ sn_node/tests/verify_data_location.rs | 22 +- sn_node/tests/verify_routing_table.rs | 2 +- sn_protocol/src/error.rs | 3 - sn_protocol/src/storage.rs | 5 +- sn_protocol/src/storage/header.rs | 27 -- sn_transfers/src/wallet/error.rs | 10 - 24 files changed, 1011 insertions(+), 1438 deletions(-) diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 1b5395b028..98ee999b06 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -531,19 +531,15 @@ jobs: # platform: ${{ matrix.os }} # build: true - # # incase the faucet is not ready yet - # - name: 30s sleep for faucet completion - # run: sleep 30 - - # - name: Check SAFE_PEERS was set - # shell: bash - # run: | - # if [[ -z "$SAFE_PEERS" ]]; then - # echo "The SAFE_PEERS variable has not been set" - # exit 1 - # else - # echo "SAFE_PEERS has been set to $SAFE_PEERS" - # fi + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi # - name: execute token_distribution tests # run: cargo test --release --features=local,distribution token_distribution -- --nocapture --test-threads=1 @@ -635,37 +631,7 @@ jobs: log_file_prefix: safe_test_logs_churn platform: ${{ matrix.os }} - - name: Get total node count - shell: bash - timeout-minutes: 1 - run: | - node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) - echo "Node dir count is $node_count" - - - name: Get restart of nodes using rg - shell: bash - timeout-minutes: 1 - # get the counts, then the specific line, and then the digit count only - # then check we have an expected level of restarts - # TODO: make this use an env var, or relate to testnet size - run: | - restart_count=$(rg "Node is restarting in" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) - echo "Restarted $restart_count nodes" - - - name: Get peers removed from nodes using rg - shell: bash - timeout-minutes: 1 - run: | - peer_removed=$(rg "PeerRemovedFromRoutingTable" "${{ matrix.node_data_path }}" -c --stats | \ - rg "(\d+) matches" | rg "\d+" -o) || { echo "Failed to extract peer removal count"; exit 1; } - if [ -z "$peer_removed" ]; then - echo "No peer removal count found" - exit 1 - fi - echo "PeerRemovedFromRoutingTable $peer_removed times" - - - name: Verify peers removed exceed restarted node counts + - name: Verify restart of nodes using rg shell: bash timeout-minutes: 1 # get the counts, then the specific line, and then the digit count only @@ -682,6 +648,8 @@ jobs: echo "PeerRemovedFromRoutingTable times of: $peer_removed is less than the restart count of: $restart_count" exit 1 fi + node_count=$(ls "${{ matrix.node_data_path }}" | wc -l) + echo "Node dir count is $node_count" # TODO: reenable this once the testnet dir creation is tidied up to avoid a large count here # if [ $restart_count -lt $node_count ]; then @@ -800,7 +768,7 @@ jobs: uses: maidsafe/sn-local-testnet-action@main with: action: stop - log_file_prefix: safe_test_logs_data_location_routing_table + log_file_prefix: safe_test_logs_data_location platform: ${{ matrix.os }} - name: Verify restart of nodes using rg @@ -892,15 +860,15 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Create and fund a wallet first time - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt - # echo "----------" - # cat first.txt - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet first time + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt + # echo "----------" + # cat first.txt + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Move faucet log to the working folder # run: | @@ -926,64 +894,44 @@ jobs: # continue-on-error: true # if: always() - # - name: Cleanup prior faucet and cashnotes - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Create a new wallet - # run: ~/safe --log-output-dest=data-dir wallet create --no-password - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Attempt second faucet genesis disbursement - # run: ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) > second.txt 2>&1 || true - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: cat second.txt - # run: cat second.txt - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Verify a second disbursement is rejected - # run: | - # if grep "Faucet disbursement has already occured" second.txt; then - # echo "Duplicated faucet rejected" - # else - # echo "Duplicated faucet not rejected!" - # exit 1 - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 - - # - name: Create and fund a wallet with different keypair - # run: | - # ls -l /home/runner/.local/share - # ls -l /home/runner/.local/share/safe - # rm -rf /home/runner/.local/share/safe/test_faucet - # rm -rf /home/runner/.local/share/safe/test_genesis - # rm -rf /home/runner/.local/share/safe/client - # ~/safe --log-output-dest=data-dir wallet create --no-password - # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then - # echo "Faucet with different genesis key not rejected!" - # exit 1 - # else - # echo "Faucet with different genesis key rejected" - # fi - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet second time + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt + # echo "----------" + # cat second.txt + # if grep "genesis is already spent" second.txt; then + # echo "Duplicated faucet rejected" + # else + # echo "Duplicated faucet not rejected!" + # exit 1 + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 + + # - name: Create and fund a wallet with different keypair + # run: | + # ls -l /home/runner/.local/share + # ls -l /home/runner/.local/share/safe + # rm -rf /home/runner/.local/share/safe/test_faucet + # rm -rf /home/runner/.local/share/safe/test_genesis + # rm -rf /home/runner/.local/share/safe/client + # ~/safe --log-output-dest=data-dir wallet create --no-password + # if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then + # echo "Faucet with different genesis key not rejected!" + # exit 1 + # else + # echo "Faucet with different genesis key rejected" + # fi + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Build faucet binary again without the gifting feature # run: cargo build --release --bin faucet @@ -1111,14 +1059,14 @@ jobs: # echo "PWD subdirs:" # du -sh */ - # - name: Create and fund a wallet to pay for files storage - # run: | - # ~/safe --log-output-dest=data-dir wallet create --no-password - # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ~/safe --log-output-dest=data-dir wallet create --no-password + # ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ~/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload # run: ~/safe --log-output-dest=data-dir files upload "ubuntu-14.04.6-desktop-i386.iso" --retry-strategy quick @@ -1168,8 +1116,6 @@ jobs: # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" # name: Replication bench with heavy upload # runs-on: ubuntu-latest - # env: - # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client # steps: # - uses: actions/checkout@v4 @@ -1246,28 +1192,14 @@ jobs: # echo "SAFE_PEERS has been set to $SAFE_PEERS" # fi - # - name: Sleep 15s - # shell: bash - # run: sleep 15 - - # - name: Check faucet has been funded - # shell: bash - # run: | - # cash_note_count=$(ls -l /home/runner/.local/share/safe/test_faucet/wallet/cash_notes/ | wc -l) - # echo $cash_note_count - # if [ "$cash_note_count" -eq 0 ]; then - # echo "Error: Expected at least 1 cash note, but found $cash_note_count" - # exit 1 - # fi - - # - name: Create and fund a wallet to pay for files storage - # run: | - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # timeout-minutes: 5 + # - name: Create and fund a wallet to pay for files storage + # run: | + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # timeout-minutes: 5 # - name: Start a client to upload first file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_1.tar.gz" --retry-strategy quick @@ -1275,32 +1207,29 @@ jobs: # SN_LOG: "all" # timeout-minutes: 5 - # - name: Check current directories - # run: | - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # timeout-minutes: 1 - - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 @@ -1312,49 +1241,52 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) - # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then - # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) - # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(find $CLIENT_DATA_PATH/wallet/cash_notes -type f | wc -l) + # if (( $(echo "$cash_note_files > $expected_cash_notes_files" | bc -l) )); then + # echo "Got too many cash_note files leftover: $cash_note_files when we expected $expected_cash_notes_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(find $CLIENT_DATA_PATH/wallet/payments -type f | wc -l) + # if (( $(echo "$payment_files > $expected_payment_files" | bc -l) )); then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 # - name: Wait for certain period # run: sleep 300 # timeout-minutes: 6 - # # Start a different client to avoid local wallet slow down with more payments handled. - # - name: Start a different client - # run: | - # pwd - # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first - # ls -l $SAFE_DATA_PATH - # ls -l $SAFE_DATA_PATH/client_first - # mkdir $SAFE_DATA_PATH/client - # ls -l $SAFE_DATA_PATH - # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs - # ls -l $CLIENT_DATA_PATH - # ./target/release/safe --log-output-dest=data-dir wallet create --no-password - # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex - # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex - # env: - # SN_LOG: "all" - # SAFE_DATA_PATH: /home/runner/.local/share/safe - # timeout-minutes: 25 + # # Start a different client to avoid local wallet slow down with more payments handled. + # - name: Start a different client + # run: | + # pwd + # mv $CLIENT_DATA_PATH $SAFE_DATA_PATH/client_first + # ls -l $SAFE_DATA_PATH + # ls -l $SAFE_DATA_PATH/client_first + # mkdir $SAFE_DATA_PATH/client + # ls -l $SAFE_DATA_PATH + # mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs + # ls -l $CLIENT_DATA_PATH + # ./target/release/safe --log-output-dest=data-dir wallet create --no-password + # ./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex + # ./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex + # env: + # SN_LOG: "all" + # SAFE_DATA_PATH: /home/runner/.local/share/safe + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 25 # - name: Use second client to upload third file # run: ./target/release/safe --log-output-dest=data-dir files upload "./test_data_3.tar.gz" --retry-strategy quick @@ -1362,27 +1294,29 @@ jobs: # SN_LOG: "all" # timeout-minutes: 10 - # - name: Ensure no leftover cash_notes and payment files - # run: | - # expected_cash_notes_files="1" - # expected_payment_files="0" - # pwd - # ls $CLIENT_DATA_PATH/ -l - # ls $CLIENT_DATA_PATH/wallet -l - # ls $CLIENT_DATA_PATH/wallet/cash_notes -l - # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) - # echo "Find $cash_note_files cash_note files" - # if [ $expected_cash_notes_files -lt $cash_note_files ]; then - # echo "Got too many cash_note files leftover: $cash_note_files" - # exit 1 - # fi - # ls $CLIENT_DATA_PATH/wallet/payments -l - # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) - # if [ $expected_payment_files -lt $payment_files ]; then - # echo "Got too many payment files leftover: $payment_files" - # exit 1 - # fi - # timeout-minutes: 10 + # - name: Ensure no leftover cash_notes and payment files + # run: | + # expected_cash_notes_files="1" + # expected_payment_files="0" + # pwd + # ls $CLIENT_DATA_PATH/ -l + # ls $CLIENT_DATA_PATH/wallet -l + # ls $CLIENT_DATA_PATH/wallet/cash_notes -l + # cash_note_files=$(ls $CLIENT_DATA_PATH/wallet/cash_notes | wc -l) + # echo "Find $cash_note_files cash_note files" + # if [ $expected_cash_notes_files -lt $cash_note_files ]; then + # echo "Got too many cash_note files leftover: $cash_note_files" + # exit 1 + # fi + # ls $CLIENT_DATA_PATH/wallet/payments -l + # payment_files=$(ls $CLIENT_DATA_PATH/wallet/payments | wc -l) + # if [ $expected_payment_files -lt $payment_files ]; then + # echo "Got too many payment files leftover: $payment_files" + # exit 1 + # fi + # env: + # CLIENT_DATA_PATH: /home/runner/.local/share/safe/client + # timeout-minutes: 10 # - name: Stop the local network and upload logs # if: always() diff --git a/sn_networking/src/bootstrap.rs b/sn_networking/src/bootstrap.rs index ec6c019a88..f8b7cf1e59 100644 --- a/sn_networking/src/bootstrap.rs +++ b/sn_networking/src/bootstrap.rs @@ -7,19 +7,45 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{driver::PendingGetClosestType, SwarmDriver}; +use rand::{rngs::OsRng, Rng}; use tokio::time::Duration; -use crate::target_arch::Instant; +use crate::target_arch::{interval, Instant, Interval}; /// The default interval at which NetworkDiscovery is triggered. The interval is increased as more peers are added to the /// routing table. -pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(15); +pub(crate) const BOOTSTRAP_INTERVAL: Duration = Duration::from_secs(10); + +/// Every BOOTSTRAP_CONNECTED_PEERS_STEP connected peer, we step up the BOOTSTRAP_INTERVAL to slow down bootstrapping +/// process +const BOOTSTRAP_CONNECTED_PEERS_STEP: u32 = 5; + +/// If the previously added peer has been before LAST_PEER_ADDED_TIME_LIMIT, then we should slowdown the bootstrapping +/// process. This is to make sure we don't flood the network with `FindNode` msgs. +const LAST_PEER_ADDED_TIME_LIMIT: Duration = Duration::from_secs(180); + +/// A minimum interval to prevent bootstrap got triggered too often +const LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT: Duration = Duration::from_secs(30); + +/// The bootstrap interval to use if we haven't added any new peers in a while. +const NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S: u64 = 600; impl SwarmDriver { /// This functions triggers network discovery based on when the last peer was added to the RT and the number of - /// peers in RT. - pub(crate) fn run_bootstrap_continuously(&mut self) { - self.trigger_network_discovery(); + /// peers in RT. The function also returns a new bootstrap interval that is proportional to the number of + /// peers in RT, so more peers in RT, the longer the interval. + pub(crate) async fn run_bootstrap_continuously( + &mut self, + current_bootstrap_interval: Duration, + ) -> Option { + let (should_bootstrap, new_interval) = self + .bootstrap + .should_we_bootstrap(self.peers_in_rt as u32, current_bootstrap_interval) + .await; + if should_bootstrap { + self.trigger_network_discovery(); + } + new_interval } pub(crate) fn trigger_network_discovery(&mut self) { @@ -35,27 +61,27 @@ impl SwarmDriver { .get_closest_peers(addr.as_bytes()); let _ = self.pending_get_closest_peers.insert( query_id, - ( - addr, - PendingGetClosestType::NetworkDiscovery, - Default::default(), - ), + (PendingGetClosestType::NetworkDiscovery, Default::default()), ); } self.bootstrap.initiated(); - info!("Trigger network discovery took {:?}", now.elapsed()); + debug!("Trigger network discovery took {:?}", now.elapsed()); } } /// Tracks and helps with the continuous kad::bootstrapping process pub(crate) struct ContinuousBootstrap { + initial_bootstrap_done: bool, + last_peer_added_instant: Instant, last_bootstrap_triggered: Option, } impl ContinuousBootstrap { pub(crate) fn new() -> Self { Self { + initial_bootstrap_done: false, + last_peer_added_instant: Instant::now(), last_bootstrap_triggered: None, } } @@ -64,4 +90,76 @@ impl ContinuousBootstrap { pub(crate) fn initiated(&mut self) { self.last_bootstrap_triggered = Some(Instant::now()); } + + /// Notify about a newly added peer to the RT. This will help with slowing down the bootstrap process. + /// Returns `true` if we have to perform the initial bootstrapping. + pub(crate) fn notify_new_peer(&mut self) -> bool { + self.last_peer_added_instant = Instant::now(); + // true to kick off the initial bootstrapping. `run_bootstrap_continuously` might kick of so soon that we might + // not have a single peer in the RT and we'd not perform any bootstrapping for a while. + if !self.initial_bootstrap_done { + self.initial_bootstrap_done = true; + true + } else { + false + } + } + + /// Returns `true` if we should carry out the Kademlia Bootstrap process immediately. + /// Also optionally returns the new interval to re-bootstrap. + pub(crate) async fn should_we_bootstrap( + &self, + peers_in_rt: u32, + current_interval: Duration, + ) -> (bool, Option) { + let is_ongoing = if let Some(last_bootstrap_triggered) = self.last_bootstrap_triggered { + last_bootstrap_triggered.elapsed() < LAST_BOOTSTRAP_TRIGGERED_TIME_LIMIT + } else { + false + }; + let should_bootstrap = !is_ongoing && peers_in_rt >= 1; + + // if it has been a while (LAST_PEER_ADDED_TIME_LIMIT) since we have added a new peer to our RT, then, slowdown + // the bootstrapping process. + // Don't slow down if we haven't even added one peer to our RT. + if self.last_peer_added_instant.elapsed() > LAST_PEER_ADDED_TIME_LIMIT && peers_in_rt != 0 { + // To avoid a heart beat like cpu usage due to the 1K candidates generation, + // randomize the interval within certain range + let no_peer_added_slowdown_interval: u64 = OsRng.gen_range( + NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S / 2..NO_PEER_ADDED_SLOWDOWN_INTERVAL_MAX_S, + ); + let no_peer_added_slowdown_interval_duration = + Duration::from_secs(no_peer_added_slowdown_interval); + info!( + "It has been {LAST_PEER_ADDED_TIME_LIMIT:?} since we last added a peer to RT. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {no_peer_added_slowdown_interval_duration:?}" + ); + + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] + let mut new_interval = interval(no_peer_added_slowdown_interval_duration); + #[cfg(not(target_arch = "wasm32"))] + new_interval.tick().await; + + return (should_bootstrap, Some(new_interval)); + } + + // increment bootstrap_interval in steps of BOOTSTRAP_INTERVAL every BOOTSTRAP_CONNECTED_PEERS_STEP + let step = peers_in_rt / BOOTSTRAP_CONNECTED_PEERS_STEP; + let step = std::cmp::max(1, step); + let new_interval = BOOTSTRAP_INTERVAL * step; + let new_interval = if new_interval > current_interval { + info!("More peers have been added to our RT!. Slowing down the continuous bootstrapping process. Old interval: {current_interval:?}, New interval: {new_interval:?}"); + + // `Interval` ticks immediately for Tokio, but not for `wasmtimer`, which is used for wasm32. + #[cfg_attr(target_arch = "wasm32", allow(unused_mut))] + let mut interval = interval(new_interval); + #[cfg(not(target_arch = "wasm32"))] + interval.tick().await; + + Some(interval) + } else { + None + }; + (should_bootstrap, new_interval) + } } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 5ec9ebd827..b0eda19190 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -7,25 +7,24 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - close_group_majority, driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, log_markers::Marker, - multiaddr_pop_p2p, sort_peers_by_address_and_limit, GetRecordCfg, GetRecordError, MsgResponder, - NetworkEvent, CLOSE_GROUP_SIZE, + multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, + REPLICATION_PEERS_COUNT, }; use libp2p::{ kad::{ store::{Error as StoreError, RecordStore}, - KBucketDistance, Quorum, Record, RecordKey, + Quorum, Record, RecordKey, }, Multiaddr, PeerId, }; use sn_evm::{AttoTokens, PaymentQuote, QuotingMetrics}; use sn_protocol::{ messages::{Cmd, Request, Response}, - storage::{get_type_from_record, RecordType}, + storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; use std::{ @@ -34,6 +33,7 @@ use std::{ time::Duration, }; use tokio::sync::oneshot; +use xor_name::XorName; use crate::target_arch::Instant; @@ -56,15 +56,6 @@ pub enum NodeIssue { /// Commands to send to the Swarm pub enum LocalSwarmCmd { - // Returns all the peers from all the k-buckets from the local Routing Table. - // This includes our PeerId as well. - GetAllLocalPeersExcludingSelf { - sender: oneshot::Sender>, - }, - /// Return the current GetRange as determined by the SwarmDriver - GetCurrentRange { - sender: oneshot::Sender, - }, /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -76,8 +67,8 @@ pub enum LocalSwarmCmd { sender: oneshot::Sender>, }, // Get closest peers from the local RoutingTable - GetCloseRangeLocalPeers { - address: NetworkAddress, + GetCloseGroupLocalPeers { + key: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), @@ -222,11 +213,15 @@ impl Debug for LocalSwarmCmd { PrettyPrintRecordKey::from(key) ) } + LocalSwarmCmd::GetClosestKLocalPeers { .. } => { write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") } - LocalSwarmCmd::GetCloseRangeLocalPeers { address: key, .. } => { - write!(f, "SwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}") + LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { + write!( + f, + "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" + ) } LocalSwarmCmd::GetLocalStoreCost { .. } => { write!(f, "LocalSwarmCmd::GetLocalStoreCost") @@ -247,12 +242,6 @@ impl Debug for LocalSwarmCmd { LocalSwarmCmd::GetKBuckets { .. } => { write!(f, "LocalSwarmCmd::GetKBuckets") } - LocalSwarmCmd::GetCurrentRange { .. } => { - write!(f, "SwarmCmd::GetCurrentRange") - } - LocalSwarmCmd::GetAllLocalPeersExcludingSelf { .. } => { - write!(f, "SwarmCmd::GetAllLocalPeers") - } LocalSwarmCmd::GetSwarmLocalState { .. } => { write!(f, "LocalSwarmCmd::GetSwarmLocalState") } @@ -483,7 +472,6 @@ impl SwarmDriver { let _ = self.pending_get_closest_peers.insert( query_id, ( - key, PendingGetClosestType::FunctionCall(sender), Default::default(), ), @@ -553,7 +541,6 @@ impl SwarmDriver { Ok(()) } - pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); let mut cmd_string; @@ -637,7 +624,28 @@ impl SwarmDriver { let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); - let record_type = get_type_from_record(&record)?; + let record_type = match RecordHeader::from_record(&record) { + Ok(record_header) => { + match record_header.kind { + RecordKind::Chunk => RecordType::Chunk, + RecordKind::Scratchpad => RecordType::Scratchpad, + RecordKind::Spend | RecordKind::Register => { + let content_hash = XorName::from_content(&record.value); + RecordType::NonChunk(content_hash) + } + RecordKind::ChunkWithPayment + | RecordKind::RegisterWithPayment + | RecordKind::ScratchpadWithPayment => { + error!("Record {record_key:?} with payment shall not be stored locally."); + return Err(NetworkError::InCorrectRecordHeader); + } + } + } + Err(err) => { + error!("For record {record_key:?}, failed to parse record_header {err:?}"); + return Err(NetworkError::InCorrectRecordHeader); + } + }; let result = self .swarm @@ -686,8 +694,16 @@ impl SwarmDriver { // The record_store will prune far records and setup a `distance range`, // once reached the `max_records` cap. - self.replication_fetcher - .set_replication_distance_range(self.get_request_range()); + if let Some(distance) = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .get_farthest_replication_distance_bucket() + { + self.replication_fetcher + .set_replication_distance_range(distance); + } if let Err(err) = result { error!("Can't store verified record {record_key:?} locally: {err:?}"); @@ -744,10 +760,6 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } - LocalSwarmCmd::GetCurrentRange { sender } => { - cmd_string = "GetCurrentRange"; - let _ = sender.send(self.get_request_range()); - } LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); @@ -766,13 +778,9 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender } => { - cmd_string = "GetAllLocalPeersExcludingSelf"; - let _ = sender.send(self.get_all_local_peers_excluding_self()); - } - LocalSwarmCmd::GetCloseRangeLocalPeers { address, sender } => { - cmd_string = "GetCloseRangeLocalPeers"; - let key = address.as_kbucket_key(); + LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { + cmd_string = "GetCloseGroupLocalPeers"; + let key = key.as_kbucket_key(); // calls `kbuckets.closest_keys(key)` internally, which orders the peers by // increasing distance // Note it will return all peers, heance a chop down is required. @@ -782,6 +790,7 @@ impl SwarmDriver { .kademlia .get_closest_local_peers(&key) .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) .collect(); let _ = sender.send(closest_peers); @@ -972,72 +981,24 @@ impl SwarmDriver { let _ = self.quotes_history.insert(peer_id, quote); } - /// From all local peers, returns any within (and just exceeding) current get_range for a given key - pub(crate) fn get_filtered_peers_exceeding_range( - &mut self, - target_address: &NetworkAddress, - ) -> Vec { - let acceptable_distance_range = self.get_request_range(); - let target_key = target_address.as_kbucket_key(); - - let peers = self + fn try_interval_replication(&mut self) -> Result<()> { + // get closest peers from buckets, sorted by increasing distance to us + let our_peer_id = self.self_peer_id.into(); + let closest_k_peers = self .swarm .behaviour_mut() .kademlia - .get_closest_local_peers(&target_key) - .filter_map(|key| { - // here we compare _bucket_, not the exact distance. - // We want to include peers that are just outside the range - // Such that we can and will exceed the range in a search eventually - if acceptable_distance_range.ilog2() < target_key.distance(&key).ilog2() { - return None; - } - - // Map KBucketKey to PeerId. - let peer_id = key.into_preimage(); - Some(peer_id) - }) + .get_closest_local_peers(&our_peer_id) + // Map KBucketKey to PeerId. + .map(|key| key.into_preimage()); + + // Only grab the closest nodes within the REPLICATE_RANGE + let mut replicate_targets = closest_k_peers + .into_iter() + // add some leeway to allow for divergent knowledge + .take(REPLICATION_PEERS_COUNT) .collect::>(); - peers - } - - /// From all local peers, returns any within current get_range for a given key - /// Excludes self - pub(crate) fn get_filtered_peers_exceeding_range_or_closest_nodes( - &mut self, - target_address: &NetworkAddress, - ) -> Vec { - let filtered_peers = self.get_filtered_peers_exceeding_range(target_address); - let closest_node_buffer_zone = CLOSE_GROUP_SIZE + close_group_majority(); - if filtered_peers.len() >= closest_node_buffer_zone { - filtered_peers - } else { - warn!("Insufficient peers within replication range of {target_address:?}. Falling back to use {closest_node_buffer_zone:?} closest nodes"); - let all_peers = self.get_all_local_peers_excluding_self(); - match sort_peers_by_address_and_limit( - &all_peers, - target_address, - closest_node_buffer_zone, - ) { - Ok(peers) => peers.iter().map(|p| **p).collect(), - Err(err) => { - error!("sorting peers close to {target_address:?} failed, sort error: {err:?}"); - warn!( - "Using all peers within range even though it's less than CLOSE_GROUP_SIZE." - ); - filtered_peers - } - } - } - } - - fn try_interval_replication(&mut self) -> Result<()> { - let our_address = NetworkAddress::from_peer(self.self_peer_id); - - let mut replicate_targets = - self.get_filtered_peers_exceeding_range_or_closest_nodes(&our_address); - let now = Instant::now(); self.replication_targets .retain(|_peer_id, timestamp| *timestamp > now); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index a895655650..d8d71c5601 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -20,7 +20,6 @@ use crate::{ record_store_api::UnifiedRecordStore, relay_manager::RelayManager, replication_fetcher::ReplicationFetcher, - sort_peers_by_distance_to, target_arch::{interval, spawn, Instant}, GetRecordError, Network, CLOSE_GROUP_SIZE, }; @@ -33,6 +32,7 @@ use futures::future::Either; use futures::StreamExt; #[cfg(feature = "local")] use libp2p::mdns; +use libp2p::Transport as _; use libp2p::{core::muxing::StreamMuxerBox, relay}; use libp2p::{ identity::Keypair, @@ -45,7 +45,6 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use libp2p::{kad::KBucketDistance, Transport as _}; #[cfg(feature = "open-metrics")] use prometheus_client::metrics::info::Info; use sn_evm::PaymentQuote; @@ -60,9 +59,10 @@ use sn_protocol::{ }; use sn_registers::SignedRegister; use std::{ - collections::{btree_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, + collections::{btree_map::Entry, BTreeMap, HashMap, HashSet}, fmt::Debug, net::SocketAddr, + num::NonZeroUsize, path::PathBuf, }; use tokio::sync::{mpsc, oneshot}; @@ -77,9 +77,6 @@ pub(crate) const CLOSET_RECORD_CHECK_INTERVAL: Duration = Duration::from_secs(15 /// Interval over which we query relay manager to check if we can make any more reservations. pub(crate) const RELAY_MANAGER_RESERVATION_INTERVAL: Duration = Duration::from_secs(30); -// Number of range distances to keep in the circular buffer -pub const GET_RANGE_STORAGE_LIMIT: usize = 100; - const KAD_STREAM_PROTOCOL_ID: StreamProtocol = StreamProtocol::new("/autonomi/kad/1.0.0"); /// The ways in which the Get Closest queries are used. @@ -90,9 +87,7 @@ pub(crate) enum PendingGetClosestType { /// These are queries made by a function at the upper layers and contains a channel to send the result back. FunctionCall(oneshot::Sender>), } - -/// Maps a query to the address, the type of query and the peers that are being queried. -type PendingGetClosest = HashMap)>; +type PendingGetClosest = HashMap)>; /// Using XorName to differentiate different record content under the same key. type GetRecordResultMap = HashMap)>; @@ -128,6 +123,13 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); +// Init during compilation, instead of runtime error that should never happen +// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) +const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { + Some(v) => v, + None => panic!("CLOSE_GROUP_SIZE should not be zero"), +}; + /// The various settings to apply to when fetching a record from network #[derive(Clone)] pub struct GetRecordCfg { @@ -347,6 +349,8 @@ impl NetworkBuilder { .set_publication_interval(None) // 1mb packet size .set_max_packet_size(MAX_PACKET_SIZE) + // How many nodes _should_ store data. + .set_replication_factor(REPLICATION_FACTOR) .set_query_timeout(KAD_QUERY_TIMEOUT_S) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) @@ -426,7 +430,9 @@ impl NetworkBuilder { .set_kbucket_inserts(libp2p::kad::BucketInserts::Manual) .set_max_packet_size(MAX_PACKET_SIZE) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. - .disjoint_query_paths(true); + .disjoint_query_paths(true) + // How many nodes _should_ store data. + .set_replication_factor(REPLICATION_FACTOR); let (network, net_event_recv, driver) = self.build( kad_cfg, @@ -692,8 +698,6 @@ impl NetworkBuilder { bad_nodes: Default::default(), quotes_history: Default::default(), replication_targets: Default::default(), - range_distances: VecDeque::with_capacity(GET_RANGE_STORAGE_LIMIT), - first_contact_made: false, }; let network = Network::new( @@ -729,7 +733,7 @@ pub struct SwarmDriver { pub(crate) local_cmd_sender: mpsc::Sender, local_cmd_receiver: mpsc::Receiver, network_cmd_receiver: mpsc::Receiver, - pub(crate) event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. + event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events pub(crate) pending_get_closest_peers: PendingGetClosest, @@ -752,13 +756,6 @@ pub struct SwarmDriver { pub(crate) bad_nodes: BadNodes, pub(crate) quotes_history: BTreeMap, pub(crate) replication_targets: BTreeMap, - - // The recent range_distances calculated by the node - // Each update is generated when there is a routing table change - // We use the largest of these X_STORAGE_LIMIT values as our X distance. - pub(crate) range_distances: VecDeque, - // have we found out initial peer - pub(crate) first_contact_made: bool, } impl SwarmDriver { @@ -809,24 +806,28 @@ impl SwarmDriver { // logging for handling events happens inside handle_swarm_events // otherwise we're rewriting match statements etc around this anwyay if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Issue while handling swarm event: {err}"); + warn!("Error while handling swarm event: {err}"); } }, // thereafter we can check our intervals // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { - self.run_bootstrap_continuously(); + if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { + bootstrap_interval = new_interval; + } } _ = set_farthest_record_interval.tick() => { if !self.is_client { - let get_range = self.get_request_range(); - self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(get_range); - - // the distance range within the replication_fetcher shall be in sync as well - self.replication_fetcher.set_replication_distance_range(get_range); - - + let closest_k_peers = self.get_closest_k_value_local_peers(); + + if let Some(distance) = self.get_responsbile_range_estimate(&closest_k_peers) { + info!("Set responsible range to {distance}"); + // set any new distance to farthest record in the store + self.swarm.behaviour_mut().kademlia.store_mut().set_distance_range(distance); + // the distance range within the replication_fetcher shall be in sync as well + self.replication_fetcher.set_replication_distance_range(distance); + } } } _ = relay_manager_reservation_interval.tick() => self.relay_manager.try_connecting_to_relay(&mut self.swarm, &self.bad_nodes), @@ -838,90 +839,32 @@ impl SwarmDriver { // ---------- Crate helpers ------------------- // -------------------------------------------- - /// Defines a new X distance range to be used for GETs and data replication - /// - /// Enumerates buckets and generates a random distance in the first bucket - /// that has at least `MIN_PEERS_IN_BUCKET` peers. - /// - pub(crate) fn set_request_range( + /// Uses the closest k peers to estimate the farthest address as + /// `K_VALUE / 2`th peer's bucket. + fn get_responsbile_range_estimate( &mut self, - queried_address: NetworkAddress, - network_discovery_peers: &[PeerId], - ) { - info!( - "Adding a GetRange to our stash deriving from {:?} peers", - network_discovery_peers.len() - ); - - let sorted_distances = sort_peers_by_distance_to(network_discovery_peers, queried_address); - - let mapped: Vec<_> = sorted_distances.iter().map(|d| d.ilog2()).collect(); - info!("Sorted distances: {:?}", mapped); - - let farthest_peer_to_check = self - .get_all_local_peers_excluding_self() - .len() - .checked_div(3 * CLOSE_GROUP_SIZE) - .unwrap_or(1); - - info!("Farthest peer we'll check: {:?}", farthest_peer_to_check); - - let yardstick = if sorted_distances.len() >= farthest_peer_to_check { - sorted_distances.get(farthest_peer_to_check.saturating_sub(1)) - } else { - sorted_distances.last() - }; - if let Some(distance) = yardstick { - if self.range_distances.len() >= GET_RANGE_STORAGE_LIMIT { - if let Some(distance) = self.range_distances.pop_front() { - trace!("Removed distance range: {:?}", distance.ilog2()); - } - } - - info!("Adding new distance range: {:?}", distance.ilog2()); - - self.range_distances.push_back(*distance); + // Sorted list of closest k peers to our peer id. + closest_k_peers: &[PeerId], + ) -> Option { + // if we don't have enough peers we don't set the distance range yet. + let mut farthest_distance = None; + + if closest_k_peers.is_empty() { + return farthest_distance; } - info!( - "Distance between peers in set_request_range call: {:?}", - yardstick - ); - } - - /// Returns the KBucketDistance we are currently using as our X value - /// for range based search. - pub(crate) fn get_request_range(&self) -> KBucketDistance { - let mut sorted_distances = self.range_distances.iter().collect::>(); + let our_address = NetworkAddress::from_peer(self.self_peer_id); - sorted_distances.sort_unstable(); + // get `K_VALUE / 2`th peer's address distance + // This is a rough estimate of the farthest address we might be responsible for. + // We want this to be higher than actually necessary, so we retain more data + // and can be sure to pass bad node checks + let target_index = std::cmp::min(K_VALUE.get() / 2, closest_k_peers.len()) - 1; - let median_index = sorted_distances.len() / 2; + let address = NetworkAddress::from_peer(closest_k_peers[target_index]); + farthest_distance = our_address.distance(&address).ilog2(); - let default = KBucketDistance::default(); - let median = sorted_distances.get(median_index).cloned(); - - if let Some(dist) = median { - *dist - } else { - default - } - } - - /// get all the peers from our local RoutingTable. Excluding self - pub(crate) fn get_all_local_peers_excluding_self(&mut self) -> Vec { - let our_peer_id = self.self_peer_id; - let mut all_peers: Vec = vec![]; - for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { - for entry in kbucket.iter() { - let id = entry.node.key.into_preimage(); - - if id != our_peer_id { - all_peers.push(id); - } - } - } - all_peers + farthest_distance } /// Pushes NetworkSwarmCmd off thread so as to be non-blocking diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 99bf1fbe92..6534c84017 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -30,11 +30,10 @@ pub(super) type Result = std::result::Result; #[derive(Error, Clone)] pub enum GetRecordError { #[error("Get Record completed with non enough copies")] - NotEnoughCopiesInRange { + NotEnoughCopies { record: Record, expected: usize, got: usize, - range: u32, }, #[error("Record not found in the network")] @@ -56,18 +55,16 @@ pub enum GetRecordError { impl Debug for GetRecordError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Self::NotEnoughCopiesInRange { + Self::NotEnoughCopies { record, expected, got, - range, } => { let pretty_key = PrettyPrintRecordKey::from(&record.key); - f.debug_struct("NotEnoughCopiesInRange") + f.debug_struct("NotEnoughCopies") .field("record_key", &pretty_key) .field("expected", &expected) .field("got", &got) - .field("range", &range) .finish() } Self::RecordNotFound => write!(f, "RecordNotFound"), @@ -125,6 +122,9 @@ pub enum NetworkError { #[error("The RecordKind obtained from the Record did not match with the expected kind: {0}")] RecordKindMismatch(RecordKind), + #[error("Record header is incorrect")] + InCorrectRecordHeader, + // ---------- Transfer Errors #[error("Failed to get spend: {0}")] FailedToGetSpend(String), @@ -138,7 +138,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double SpendAttempt was detected. The signed spends are: {0:?}")] + #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs index 88a2a7ffca..6551f6e5f0 100644 --- a/sn_networking/src/event/kad.rs +++ b/sn_networking/src/event/kad.rs @@ -7,26 +7,21 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, driver::PendingGetClosestType, get_quorum_value, GetRecordCfg, - GetRecordError, NetworkError, Result, SwarmDriver, CLOSE_GROUP_SIZE, + driver::PendingGetClosestType, get_quorum_value, get_raw_signed_spends_from_record, + target_arch::Instant, GetRecordCfg, GetRecordError, NetworkError, Result, SwarmDriver, + CLOSE_GROUP_SIZE, }; use itertools::Itertools; -use libp2p::{ - kad::{ - self, GetClosestPeersError, InboundRequest, KBucketDistance, PeerRecord, ProgressStep, - QueryId, QueryResult, QueryStats, Quorum, Record, K_VALUE, - }, - PeerId, +use libp2p::kad::{ + self, GetClosestPeersError, InboundRequest, PeerRecord, ProgressStep, QueryId, QueryResult, + QueryStats, Record, K_VALUE, }; use sn_protocol::{ - messages::{Cmd, Request}, - storage::get_type_from_record, - NetworkAddress, PrettyPrintRecordKey, -}; -use std::{ - collections::{hash_map::Entry, HashSet}, - time::Instant, + storage::{try_serialize_record, RecordKind}, + PrettyPrintRecordKey, }; +use sn_transfers::SignedSpend; +use std::collections::{hash_map::Entry, BTreeSet, HashSet}; use tokio::sync::oneshot; use xor_name::XorName; @@ -36,9 +31,6 @@ impl SwarmDriver { let event_string; match kad_event { - // We use this query both to bootstrap and populate our routing table, - // but also to define our GetRange as defined by the largest distance between - // peers in any recent GetClosest call. kad::Event::OutboundQueryProgressed { id, result: QueryResult::GetClosestPeers(Ok(ref closest_peers)), @@ -53,7 +45,7 @@ impl SwarmDriver { ); if let Entry::Occupied(mut entry) = self.pending_get_closest_peers.entry(id) { - let (_, _, current_closest) = entry.get_mut(); + let (_, current_closest) = entry.get_mut(); // TODO: consider order the result and terminate when reach any of the // following criteria: @@ -61,19 +53,16 @@ impl SwarmDriver { // 2, `stats.duration()` is longer than a defined period current_closest.extend(closest_peers.peers.iter().map(|i| i.peer_id)); if current_closest.len() >= usize::from(K_VALUE) || step.last { - let (address, get_closest_type, current_closest) = entry.remove(); - self.network_discovery - .handle_get_closest_query(¤t_closest); - - if let PendingGetClosestType::FunctionCall(sender) = get_closest_type { - sender - .send(current_closest) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } else { - // do not set this via function calls, as that could potentially - // skew the results in favour of heavily queried (and manipulated) - // areas of the network - self.set_request_range(address, ¤t_closest); + let (get_closest_type, current_closest) = entry.remove(); + match get_closest_type { + PendingGetClosestType::NetworkDiscovery => self + .network_discovery + .handle_get_closest_query(current_closest), + PendingGetClosestType::FunctionCall(sender) => { + sender + .send(current_closest) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } } } } else { @@ -92,8 +81,9 @@ impl SwarmDriver { ref step, } => { event_string = "kad_event::get_closest_peers_err"; + error!("GetClosest Query task {id:?} errored with {err:?}, {stats:?} - {step:?}"); - let (address, get_closest_type, mut current_closest) = + let (get_closest_type, mut current_closest) = self.pending_get_closest_peers.remove(&id).ok_or_else(|| { debug!( "Can't locate query task {id:?}, it has likely been completed already." @@ -110,23 +100,13 @@ impl SwarmDriver { match err { GetClosestPeersError::Timeout { ref peers, .. } => { current_closest.extend(peers.iter().map(|i| i.peer_id)); - if current_closest.len() < CLOSE_GROUP_SIZE { - error!( - "GetClosest Query task {id:?} errored, not enough found. {err:?}, {stats:?} - {step:?}" - ); - } } } match get_closest_type { - PendingGetClosestType::NetworkDiscovery => { - // do not set this via function calls, as that could potentially - // skew the results in favour of heavily queried (and manipulated) - // areas of the network - self.set_request_range(address, ¤t_closest); - self.network_discovery - .handle_get_closest_query(¤t_closest); - } + PendingGetClosestType::NetworkDiscovery => self + .network_discovery + .handle_get_closest_query(current_closest), PendingGetClosestType::FunctionCall(sender) => { sender .send(current_closest) @@ -147,7 +127,7 @@ impl SwarmDriver { PrettyPrintRecordKey::from(&peer_record.record.key), peer_record.peer ); - self.accumulate_get_record_found(id, peer_record)?; + self.accumulate_get_record_found(id, peer_record, stats, step)?; } kad::Event::OutboundQueryProgressed { id, @@ -268,13 +248,12 @@ impl SwarmDriver { event_string = "kad_event::RoutingUpdated"; if is_new_peer { self.update_on_peer_addition(peer); - } - if !self.first_contact_made { // This should only happen once - self.first_contact_made = true; - info!("Performing the first bootstrap"); - self.trigger_network_discovery(); + if self.bootstrap.notify_new_peer() { + info!("Performing the first bootstrap"); + self.trigger_network_discovery(); + } } info!("kad_event::RoutingUpdated {:?}: {peer:?}, is_new_peer: {is_new_peer:?} old_peer: {old_peer:?}", self.peers_in_rt); @@ -341,7 +320,6 @@ impl SwarmDriver { // `QueryStats::requests` to be 20 (K-Value) // `QueryStats::success` to be over majority of the requests // `err::NotFound::closest_peers` contains a list of CLOSE_GROUP_SIZE peers - // // 2, targeting an existing entry // there will a sequence of (at least CLOSE_GROUP_SIZE) events of // `kad::Event::OutboundQueryProgressed` to be received @@ -355,30 +333,26 @@ impl SwarmDriver { // where: `cache_candidates`: being the peers supposed to hold the record but not // `ProgressStep::count`: to be `number of received copies plus one` // `ProgressStep::last` to be `true` - // - // /// Accumulates the GetRecord query results - /// If we get enough responses (ie exceed GetRange) for a record with the same content hash: + /// If we get enough responses (quorum) for a record with the same content hash: /// - we return the Record after comparing with the target record. This might return RecordDoesNotMatch if the /// check fails. /// - if multiple content hashes are found, we return a SplitRecord Error /// And then we stop the kad query as we are done here. - /// We do not need to wait for GetRange to be exceeded here and should return early. fn accumulate_get_record_found( &mut self, query_id: QueryId, peer_record: PeerRecord, + _stats: QueryStats, + step: ProgressStep, ) -> Result<()> { - let expected_get_range = self.get_request_range(); - let key = peer_record.record.key.clone(); - let peer_id = if let Some(peer_id) = peer_record.peer { peer_id } else { self.self_peer_id }; - let pretty_key = PrettyPrintRecordKey::from(&key).into_owned(); + let pretty_key = PrettyPrintRecordKey::from(&peer_record.record.key).into_owned(); if let Entry::Occupied(mut entry) = self.pending_get_record.entry(query_id) { let (_key, _senders, result_map, cfg) = entry.get_mut(); @@ -393,97 +367,92 @@ impl SwarmDriver { // Insert the record and the peer into the result_map. let record_content_hash = XorName::from_content(&peer_record.record.value); - - let peer_list = + let responded_peers = if let Entry::Occupied(mut entry) = result_map.entry(record_content_hash) { let (_, peer_list) = entry.get_mut(); - let _ = peer_list.insert(peer_id); - peer_list.clone() + peer_list.len() } else { let mut peer_list = HashSet::new(); let _ = peer_list.insert(peer_id); - result_map.insert( - record_content_hash, - (peer_record.record.clone(), peer_list.clone()), - ); - - peer_list + result_map.insert(record_content_hash, (peer_record.record.clone(), peer_list)); + 1 }; - let responded_peers = peer_list.len(); - let expected_answers = get_quorum_value(&cfg.get_quorum); - trace!("Expecting {expected_answers:?} answers to exceed {expected_get_range:?} for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); - // return error if the entry cannot be found - return Err(NetworkError::ReceivedKademliaEventDropped { - query_id, - event: format!("Accumulate Get Record of {pretty_key:?}"), - }); - } - Ok(()) - } - /// Checks passed peers from a request and checks they are sufficiently spaced to - /// ensure we have searched enough of the network range as determined by our `get_range` - /// - /// We expect any conflicting records to have been reported prior to this check, - /// so we assume we're returning unique records only. - fn have_we_have_searched_thoroughly_for_quorum( - expected_get_range: KBucketDistance, - searched_peers_list: &HashSet, - data_key_address: &NetworkAddress, - quorum: &Quorum, - ) -> bool { - info!("Assessing search: range: {:?}, address: {data_key_address:?}, quorum required: {quorum:?}, peers_returned_count: {:?}", expected_get_range.ilog2(), searched_peers_list.len()); - let is_sensitive_data = matches!(quorum, Quorum::All); - - let required_quorum = get_quorum_value(quorum); - - let met_quorum = searched_peers_list.len() >= required_quorum; - - // we only enforce range if we have sensitive data...for data spends quorum::all - if met_quorum && !is_sensitive_data { - return true; - } + debug!("Expecting {expected_answers:?} answers for record {pretty_key:?} task {query_id:?}, received {responded_peers} so far"); - // get the farthest distance between peers in the response - let mut max_distance_to_data_from_responded_nodes = KBucketDistance::default(); + if responded_peers >= expected_answers { + if !cfg.expected_holders.is_empty() { + debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with non-responded expected holders {:?}", cfg.expected_holders); + } + let cfg = cfg.clone(); - // iterate over peers and see if the distance to the data is greater than the get_range - for peer_id in searched_peers_list.iter() { - let peer_address = NetworkAddress::from_peer(*peer_id); - let distance_to_data = peer_address.distance(data_key_address); - if max_distance_to_data_from_responded_nodes < distance_to_data { - max_distance_to_data_from_responded_nodes = distance_to_data; - } - } + // Remove the query task and consume the variables. + let (_key, senders, result_map, _) = entry.remove(); - // use ilog2 as simplified distance check - // It allows us to say "we've searched up to and including this bucket" - // as opposed to the concrete distance itself (which statistically seems like we can fall outwith a range - // quite easily with a small number of peers) - let exceeded_request_range = if max_distance_to_data_from_responded_nodes.ilog2() - < expected_get_range.ilog2() - { - let dist = max_distance_to_data_from_responded_nodes.ilog2(); - let expected_dist = expected_get_range.ilog2(); + if result_map.len() == 1 { + Self::send_record_after_checking_target(senders, peer_record.record, &cfg)?; + } else { + debug!("For record {pretty_key:?} task {query_id:?}, fetch completed with split record"); + let mut accumulated_spends = BTreeSet::new(); + for (record, _) in result_map.values() { + match get_raw_signed_spends_from_record(record) { + Ok(spends) => { + accumulated_spends.extend(spends); + } + Err(_) => { + continue; + } + } + } + if !accumulated_spends.is_empty() { + info!("For record {pretty_key:?} task {query_id:?}, found split record for a spend, accumulated and sending them as a single record"); + let accumulated_spends = + accumulated_spends.into_iter().collect::>(); + + let bytes = try_serialize_record(&accumulated_spends, RecordKind::Spend)?; + + let new_accumulated_record = Record { + key: peer_record.record.key, + value: bytes.to_vec(), + publisher: None, + expires: None, + }; + for sender in senders { + let new_accumulated_record = new_accumulated_record.clone(); - warn!("RANGE: {data_key_address:?} Insufficient GetRange searched. {dist:?} {expected_dist:?} {max_distance_to_data_from_responded_nodes:?} is less than expcted GetRange of {expected_get_range:?}"); + sender + .send(Ok(new_accumulated_record)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + } else { + for sender in senders { + let result_map = result_map.clone(); + sender + .send(Err(GetRecordError::SplitRecord { result_map })) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + } + } - false + // Stop the query; possibly stops more nodes from being queried. + if let Some(mut query) = self.swarm.behaviour_mut().kademlia.query_mut(&query_id) { + query.finish(); + } + } else if usize::from(step.count) >= CLOSE_GROUP_SIZE { + debug!("For record {pretty_key:?} task {query_id:?}, got {:?} with {} versions so far.", + step.count, result_map.len()); + } } else { - true - }; - - // We assume a finalised query has searched as far as it can in libp2p - - if exceeded_request_range && met_quorum { - warn!("RANGE: {data_key_address:?} Request satisfied as exceeded request range : {exceeded_request_range:?} and Quorum satisfied with {:?} peers exceeding quorum {required_quorum:?}", searched_peers_list.len()); - return true; + // return error if the entry cannot be found + return Err(NetworkError::ReceivedKademliaEventDropped { + query_id, + event: format!("Accumulate Get Record of {pretty_key:?}"), + }); } - - false + Ok(()) } /// Handles the possible cases when a GetRecord Query completes. @@ -500,92 +469,16 @@ impl SwarmDriver { let (result, log_string) = if let Some((record, from_peers)) = result_map.values().next() { - let data_key_address = NetworkAddress::from_record_key(&record.key); - let expected_get_range = self.get_request_range(); - - let we_have_searched_thoroughly = Self::have_we_have_searched_thoroughly_for_quorum( - expected_get_range, - from_peers, - &data_key_address, - &cfg.get_quorum, - ); - - let pretty_key = PrettyPrintRecordKey::from(&record.key); - info!("RANGE: {pretty_key:?} we_have_searched_far_enough: {we_have_searched_thoroughly:?}"); - - let result = if num_of_versions > 1 { - warn!("RANGE: more than one version found!"); - Err(GetRecordError::SplitRecord { - result_map: result_map.clone(), - }) - } else if we_have_searched_thoroughly { - warn!("RANGE: Get record finished: {pretty_key:?} Enough of the network has responded or it's not sensitive data... and we only have one copy..."); - - Ok(record.clone()) - } else { - // We have not searched enough of the network range. - let result = Err(GetRecordError::NotEnoughCopiesInRange { + let result = if num_of_versions == 1 { + Err(GetRecordError::NotEnoughCopies { record: record.clone(), expected: get_quorum_value(&cfg.get_quorum), got: from_peers.len(), - range: expected_get_range.ilog2().unwrap_or(0), - }); - - // This should be a backstop... Quorum::All is the only one that enforces - // a full search of the network range. - if matches!(cfg.get_quorum, Quorum::All) { - warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need to extend the range and PUT the data. {result:?}"); - - warn!("Reputting data to network {pretty_key:?}..."); - - // let's ensure we have an updated network view - self.trigger_network_discovery(); - - warn!("RANGE: {pretty_key:?} Query Finished: Not enough of the network has responded, we need PUT the data back into nodes in that range."); - - let record_type = get_type_from_record(record)?; - - let replicate_targets: HashSet<_> = self - .get_filtered_peers_exceeding_range_or_closest_nodes(&data_key_address) - .iter() - .cloned() - .collect(); - - if from_peers == &replicate_targets { - warn!("RANGE: {pretty_key:?} We asked everyone we know of in that range already!"); - } - - // set holder to someone that has the data - let holder = NetworkAddress::from_peer( - from_peers - .iter() - .next() - .cloned() - .unwrap_or(self.self_peer_id), - ); - - for peer in replicate_targets { - warn!("Reputting data to {peer:?} for {pretty_key:?} if needed..."); - // Do not send to any peer that has already informed us - if from_peers.contains(&peer) { - continue; - } - - debug!("RANGE: (insufficient, so ) Sending data to unresponded peer: {peer:?} for {pretty_key:?}"); - - // nodes will try/fail to trplicate it from us, but grab from the network thereafter - self.queue_network_swarm_cmd(NetworkSwarmCmd::SendRequest { - req: Request::Cmd(Cmd::Replicate { - holder: holder.clone(), - keys: vec![(data_key_address.clone(), record_type.clone())], - }), - peer, - sender: None, - }); - } - } - - result + }) + } else { + Err(GetRecordError::SplitRecord { + result_map: result_map.clone(), + }) }; ( @@ -615,6 +508,8 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } } else { + // We manually perform `query.finish()` if we return early from accumulate fn. + // Thus we will still get FinishedWithNoAdditionalRecord. debug!("Can't locate query task {query_id:?} during GetRecord finished. We might have already returned the result to the sender."); } Ok(()) diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs index ca6808ed1b..4550772bf4 100644 --- a/sn_networking/src/event/request_response.rs +++ b/sn_networking/src/event/request_response.rs @@ -7,21 +7,17 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address_and_limit, MsgResponder, - NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, + cmd::NetworkSwarmCmd, log_markers::Marker, sort_peers_by_address, MsgResponder, NetworkError, + NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE, }; -use libp2p::{ - kad::RecordKey, - request_response::{self, Message}, - PeerId, -}; -use rand::{rngs::OsRng, Rng}; +use itertools::Itertools; +use libp2p::request_response::{self, Message}; +use rand::{rngs::OsRng, thread_rng, Rng}; use sn_protocol::{ messages::{CmdResponse, Request, Response}, storage::RecordType, NetworkAddress, }; -use std::collections::HashMap; impl SwarmDriver { /// Forwards `Request` to the upper layers using `Sender`. Sends `Response` to the peers @@ -194,9 +190,6 @@ impl SwarmDriver { sender: NetworkAddress, incoming_keys: Vec<(NetworkAddress, RecordType)>, ) { - let peers = self.get_all_local_peers_excluding_self(); - let our_peer_id = self.self_peer_id; - let holder = if let Some(peer_id) = sender.as_peer_id() { peer_id } else { @@ -209,12 +202,16 @@ impl SwarmDriver { incoming_keys.len() ); - // accept replication requests from all peers known peers within our GetRange - if !peers.contains(&holder) || holder == our_peer_id { - trace!("Holder {holder:?} is self or not in replication range."); + // accept replication requests from the K_VALUE peers away, + // giving us some margin for replication + let closest_k_peers = self.get_closest_k_value_local_peers(); + if !closest_k_peers.contains(&holder) || holder == self.self_peer_id { + debug!("Holder {holder:?} is self or not in replication range."); return; } + let more_than_one_key = incoming_keys.len() > 1; + // On receive a replication_list from a close_group peer, we undertake two tasks: // 1, For those keys that we don't have: // fetch them if close enough to us @@ -227,94 +224,81 @@ impl SwarmDriver { .behaviour_mut() .kademlia .store_mut() - .record_addresses_ref() - .clone(); - - let keys_to_fetch = - self.replication_fetcher - .add_keys(holder, incoming_keys, &all_keys, &peers); - + .record_addresses_ref(); + let keys_to_fetch = self + .replication_fetcher + .add_keys(holder, incoming_keys, all_keys); if keys_to_fetch.is_empty() { debug!("no waiting keys to fetch from the network"); } else { self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch)); } - let event_sender = self.event_sender.clone(); - let _handle = tokio::spawn(async move { - let keys_to_verify = - Self::select_verification_data_candidates(&peers, &all_keys, &sender); - - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {holder:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: holder, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); - } + // Only trigger chunk_proof check based every X% of the time + let mut rng = thread_rng(); + // 5% probability + if more_than_one_key && rng.gen_bool(0.05) { + self.verify_peer_storage(sender.clone()); // In additon to verify the sender, we also verify a random close node. // This is to avoid malicious node escaping the check by never send a replication_list. // With further reduced probability of 1% (5% * 20%) - let close_group_peers = sort_peers_by_address_and_limit( - &peers, - &NetworkAddress::from_peer(our_peer_id), - CLOSE_GROUP_SIZE, - ) - .unwrap_or_default(); - - loop { - let index: usize = OsRng.gen_range(0..close_group_peers.len()); - let candidate_peer_id = *close_group_peers[index]; - let candidate = NetworkAddress::from_peer(*close_group_peers[index]); - if sender != candidate { - let keys_to_verify = - Self::select_verification_data_candidates(&peers, &all_keys, &candidate); - - if keys_to_verify.is_empty() { - debug!("No valid candidate to be checked against peer {candidate:?}"); - } else if let Err(error) = event_sender - .send(NetworkEvent::ChunkProofVerification { - peer_id: candidate_peer_id, - keys_to_verify, - }) - .await - { - error!("SwarmDriver failed to send event: {}", error); + if rng.gen_bool(0.2) { + let close_group_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&self.self_peer_id.into()) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect_vec(); + if close_group_peers.len() == CLOSE_GROUP_SIZE { + loop { + let index: usize = OsRng.gen_range(0..close_group_peers.len()); + let candidate = NetworkAddress::from_peer(close_group_peers[index]); + if sender != candidate { + self.verify_peer_storage(candidate); + break; + } } - - break; } } - }); + } } /// Check among all chunk type records that we have, select those close to the peer, /// and randomly pick one as the verification candidate. - fn select_verification_data_candidates( - all_peers: &Vec, - all_keys: &HashMap, - peer: &NetworkAddress, - ) -> Vec { + fn verify_peer_storage(&mut self, peer: NetworkAddress) { + let mut closest_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&self.self_peer_id.into()) + .map(|peer| peer.into_preimage()) + .take(20) + .collect_vec(); + closest_peers.push(self.self_peer_id); + let target_peer = if let Some(peer_id) = peer.as_peer_id() { peer_id } else { error!("Target {peer:?} is not a valid PeerId"); - return vec![]; + return; }; + let all_keys = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .record_addresses_ref(); + // Targeted chunk type record shall be expected within the close range from our perspective. let mut verify_candidates: Vec = all_keys .values() .filter_map(|(addr, record_type)| { if RecordType::Chunk == *record_type { - // Here we take the actual closest, as this is where we want to be - // strict about who does have the data... - match sort_peers_by_address_and_limit(all_peers, addr, CLOSE_GROUP_SIZE) { + match sort_peers_by_address(&closest_peers, addr, CLOSE_GROUP_SIZE) { Ok(close_group) => { if close_group.contains(&&target_peer) { Some(addr.clone()) @@ -335,6 +319,17 @@ impl SwarmDriver { verify_candidates.sort_by_key(|a| peer.distance(a)); - verify_candidates + // To ensure the candidate mush have to be held by the peer, + // we only carry out check when there are already certain amount of chunks uploaded + // AND choose candidate from certain reduced range. + if verify_candidates.len() > 50 { + let index: usize = OsRng.gen_range(0..(verify_candidates.len() / 2)); + self.send_event(NetworkEvent::ChunkProofVerification { + peer_id: target_peer, + keys_to_verify: vec![verify_candidates[index].clone()], + }); + } else { + debug!("No valid candidate to be checked against peer {peer:?}"); + } } } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 2416b5681c..982088f102 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,8 +7,8 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::NetworkSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, - relay_manager::is_a_relayed_peer, target_arch::Instant, NetworkEvent, Result, SwarmDriver, + event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, + target_arch::Instant, NetworkEvent, Result, SwarmDriver, }; #[cfg(feature = "local")] use libp2p::mdns; @@ -25,7 +25,7 @@ use libp2p::{ }; use sn_protocol::version::{IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR}; use std::collections::HashSet; -use tokio::{sync::oneshot, time::Duration}; +use tokio::time::Duration; impl SwarmDriver { /// Handle `SwarmEvents` @@ -244,7 +244,7 @@ impl SwarmDriver { } // If we are not local, we care only for peers that we dialed and thus are reachable. - if !self.local && has_dialed { + if self.local || has_dialed { // A bad node cannot establish a connection with us. So we can add it to the RT directly. self.remove_bootstrap_from_full(peer_id); @@ -254,10 +254,7 @@ impl SwarmDriver { multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit)) }); } - } - if self.local || has_dialed { - // If we are not local, we care only for peers that we dialed and thus are reachable. debug!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table"); // Attempt to add the addresses to the routing table. @@ -395,7 +392,6 @@ impl SwarmDriver { let _ = self.live_connected_peers.remove(&connection_id); self.record_connection_metrics(); - let mut failed_peer_addresses = vec![]; // we need to decide if this was a critical error and the peer should be removed from the routing table let should_clean_peer = match error { DialError::Transport(errors) => { @@ -405,14 +401,10 @@ impl SwarmDriver { // so we default to it not being a real issue // unless there are _specific_ errors (connection refused eg) error!("Dial errors len : {:?}", errors.len()); - let mut remove_peer_track_peer_issue = false; - for (addr, err) in errors { + let mut there_is_a_serious_issue = false; + for (_addr, err) in errors { error!("OutgoingTransport error : {err:?}"); - if !failed_peer_addresses.contains(&addr) { - failed_peer_addresses.push(addr) - } - match err { TransportError::MultiaddrNotSupported(addr) => { warn!("Multiaddr not supported : {addr:?}"); @@ -422,13 +414,14 @@ impl SwarmDriver { println!("If this was your bootstrap peer, restart your node with a supported multiaddr"); } // if we can't dial a peer on a given address, we should remove it from the routing table - remove_peer_track_peer_issue = false + there_is_a_serious_issue = true } TransportError::Other(err) => { - let problematic_errors = - ["ConnectionRefused", "HostUnreachable"]; - - let intermittent_errors = ["HandshakeTimedOut"]; + let problematic_errors = [ + "ConnectionRefused", + "HostUnreachable", + "HandshakeTimedOut", + ]; let is_bootstrap_peer = self .bootstrap_peers @@ -439,7 +432,7 @@ impl SwarmDriver { && self.peers_in_rt < self.bootstrap_peers.len() { warn!("OutgoingConnectionError: On bootstrap peer {failed_peer_id:?}, while still in bootstrap mode, ignoring"); - remove_peer_track_peer_issue = false; + there_is_a_serious_issue = false; } else { // It is really difficult to match this error, due to being eg: // Custom { kind: Other, error: Left(Left(Os { code: 61, kind: ConnectionRefused, message: "Connection refused" })) } @@ -450,19 +443,13 @@ impl SwarmDriver { .any(|err| error_msg.contains(err)) { warn!("Problematic error encountered: {error_msg}"); - remove_peer_track_peer_issue = true; - } else if intermittent_errors - .iter() - .any(|err| error_msg.contains(err)) - { - warn!("Intermittent error encountered: {error_msg}"); - remove_peer_track_peer_issue = false; + there_is_a_serious_issue = true; } } } } } - remove_peer_track_peer_issue + there_is_a_serious_issue } DialError::NoAddresses => { // We provided no address, and while we can't really blame the peer @@ -503,7 +490,7 @@ impl SwarmDriver { }; if should_clean_peer { - warn!("Serious issue with {failed_peer_id:?}. Clearing it out for now"); + warn!("Tracking issue of {failed_peer_id:?}. Clearing it out for now"); if let Some(dead_peer) = self .swarm @@ -514,15 +501,6 @@ impl SwarmDriver { self.update_on_peer_removal(*dead_peer.node.key.preimage()); } } - - if !should_clean_peer { - // lets try and redial. - for addr in failed_peer_addresses { - let (sender, _recv) = oneshot::channel(); - - self.queue_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); - } - } } SwarmEvent::IncomingConnectionError { connection_id, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index c9244dbc46..27f07bdb3e 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -78,6 +78,10 @@ use tokio::time::Duration; /// The type of quote for a selected payee. pub type PayeeQuote = (PeerId, RewardsAddress, PaymentQuote); +/// The count of peers that will be considered as close to a record target, +/// that a replication of the record shall be sent/accepted to/by the peer. +pub const REPLICATION_PEERS_COUNT: usize = CLOSE_GROUP_SIZE + 2; + /// Majority of a given group (i.e. > 1/2). #[inline] pub const fn close_group_majority() -> usize { @@ -93,47 +97,17 @@ const MIN_WAIT_BEFORE_READING_A_PUT: Duration = Duration::from_millis(300); /// Sort the provided peers by their distance to the given `NetworkAddress`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_address_and_limit<'a>( +pub fn sort_peers_by_address<'a>( peers: &'a Vec, address: &NetworkAddress, expected_entries: usize, ) -> Result> { - sort_peers_by_key_and_limit(peers, &address.as_kbucket_key(), expected_entries) -} - -/// Sort the provided peers by their distance to the given `NetworkAddress`. -/// Return with the closest expected number of entries if has. -pub fn sort_peers_by_distance_to( - peers: &[PeerId], - queried_address: NetworkAddress, -) -> Vec { - let mut sorted_distances: Vec<_> = peers - .iter() - .map(|peer| { - let addr = NetworkAddress::from_peer(*peer); - queried_address.distance(&addr) - }) - .collect(); - - sorted_distances.sort(); - - sorted_distances -} - -/// Sort the provided peers by their distance to the given `NetworkAddress`. -/// Return with the closest expected number of entries if has. -#[allow(clippy::result_large_err)] -pub fn sort_peers_by_address_and_limit_by_distance<'a>( - peers: &'a Vec, - address: &NetworkAddress, - distance: KBucketDistance, -) -> Result> { - limit_peers_by_distance(peers, &address.as_kbucket_key(), distance) + sort_peers_by_key(peers, &address.as_kbucket_key(), expected_entries) } /// Sort the provided peers by their distance to the given `KBucketKey`. /// Return with the closest expected number of entries if has. -pub fn sort_peers_by_key_and_limit<'a, T>( +pub fn sort_peers_by_key<'a, T>( peers: &'a Vec, key: &KBucketKey, expected_entries: usize, @@ -170,40 +144,6 @@ pub fn sort_peers_by_key_and_limit<'a, T>( Ok(sorted_peers) } -/// Only return peers closer to key than the provided distance -/// Their distance is measured by closeness to the given `KBucketKey`. -/// Return with the closest expected number of entries if has. -#[allow(clippy::result_large_err)] -pub fn limit_peers_by_distance<'a, T>( - peers: &'a Vec, - key: &KBucketKey, - distance: KBucketDistance, -) -> Result> { - // Check if there are enough peers to satisfy the request. - // bail early if that's not the case - if CLOSE_GROUP_SIZE > peers.len() { - warn!("Not enough peers in the k-bucket to satisfy the request"); - return Err(NetworkError::NotEnoughPeers { - found: peers.len(), - required: CLOSE_GROUP_SIZE, - }); - } - - // Create a vector of tuples where each tuple is a reference to a peer and its distance to the key. - // This avoids multiple computations of the same distance in the sorting process. - let mut peers_within_distance: Vec<&PeerId> = Vec::with_capacity(peers.len()); - - for peer_id in peers { - let addr = NetworkAddress::from_peer(*peer_id); - let peer_distance = key.distance(&addr.as_kbucket_key()); - - if peer_distance < distance { - peers_within_distance.push(peer_id); - } - } - - Ok(peers_within_distance) -} #[derive(Clone, Debug)] /// API to interact with the underlying Swarm @@ -257,13 +197,6 @@ impl Network { &self.inner.local_swarm_cmd_sender } - /// Return the GetRange as determined by the internal SwarmDriver - pub async fn get_range(&self) -> Result { - let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetCurrentRange { sender }); - receiver.await.map_err(NetworkError::from) - } - /// Signs the given data with the node's keypair. pub fn sign(&self, msg: &[u8]) -> Result> { self.keypair().sign(msg).map_err(NetworkError::from) @@ -287,121 +220,19 @@ impl Network { receiver.await? } - /// Replicate a fresh record to its close group peers. - /// This should not be triggered by a record we receive via replicaiton fetch - pub async fn replicate_valid_fresh_record(&self, paid_key: RecordKey, record_type: RecordType) { - let network = self; - - let start = std::time::Instant::now(); - let pretty_key = PrettyPrintRecordKey::from(&paid_key); - - // first we wait until our own network store can return the record - // otherwise it may not be fully written yet - let mut retry_count = 0; - trace!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); - loop { - let record = match network.get_local_record(&paid_key).await { - Ok(record) => record, - Err(err) => { - error!( - "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" - ); - None - } - }; - - if record.is_some() { - break; - } - - if retry_count > 10 { - error!( - "Could not get record from store for replication: {pretty_key:?} after 10 retries" - ); - return; - } - - retry_count += 1; - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - - trace!("Start replication of fresh record {pretty_key:?} from store"); - - let all_peers = match network.get_all_local_peers_excluding_self().await { - Ok(peers) => peers, - Err(err) => { - error!( - "Replicating fresh record {pretty_key:?} get_all_local_peers errored: {err:?}" - ); - return; - } - }; - - let data_addr = NetworkAddress::from_record_key(&paid_key); - let mut peers_to_replicate_to = match network.get_range().await { - Err(error) => { - error!("Replicating fresh record {pretty_key:?} get_range errored: {error:?}"); - - return; - } - - Ok(our_get_range) => { - match sort_peers_by_address_and_limit_by_distance( - &all_peers, - &data_addr, - our_get_range, - ) { - Ok(result) => result, - Err(err) => { - error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); - return; - } - } - } - }; - - if peers_to_replicate_to.len() < CLOSE_GROUP_SIZE { - warn!( - "Replicating fresh record {pretty_key:?} current GetRange insufficient for secure replication. Falling back to CLOSE_GROUP_SIZE" - ); - - peers_to_replicate_to = - match sort_peers_by_address_and_limit(&all_peers, &data_addr, CLOSE_GROUP_SIZE) { - Ok(result) => result, - Err(err) => { - error!("When replicating fresh record {pretty_key:?}, sort error: {err:?}"); - return; - } - }; - } - - let our_peer_id = network.peer_id(); - let our_address = NetworkAddress::from_peer(our_peer_id); - #[allow(clippy::mutable_key_type)] // for Bytes in NetworkAddress - let keys = vec![(data_addr.clone(), record_type.clone())]; - - for peer_id in &peers_to_replicate_to { - trace!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); - let request = Request::Cmd(Cmd::Replicate { - holder: our_address.clone(), - keys: keys.clone(), - }); - - network.send_req_ignore_reply(request, **peer_id); - } - trace!( - "Completed replicate fresh record {pretty_key:?} to {:?} peers on store, in {:?}", - peers_to_replicate_to.len(), - start.elapsed() - ); - } - /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. /// Excludes the client's `PeerId` while calculating the closest peers. pub async fn client_get_closest_peers(&self, key: &NetworkAddress) -> Result> { self.get_closest_peers(key, true).await } + /// Returns the closest peers to the given `NetworkAddress`, sorted by their distance to the key. + /// + /// Includes our node's `PeerId` while calculating the closest peers. + pub async fn node_get_closest_peers(&self, key: &NetworkAddress) -> Result> { + self.get_closest_peers(key, false).await + } + /// Returns a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. /// Does not include self @@ -414,10 +245,10 @@ impl Network { } /// Returns all the PeerId from all the KBuckets from our local Routing Table - /// Excludes our own PeerId. - pub async fn get_all_local_peers_excluding_self(&self) -> Result> { + /// Also contains our own PeerId. + pub async fn get_closest_k_value_local_peers(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalPeersExcludingSelf { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); receiver .await @@ -667,10 +498,6 @@ impl Network { key: RecordKey, cfg: &GetRecordCfg, ) -> Result { - use std::collections::BTreeSet; - - use sn_transfers::SignedSpend; - let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( backoff::ExponentialBackoff { @@ -701,7 +528,7 @@ impl Network { Err(GetRecordError::RecordDoesNotMatch(_)) => { warn!("The returned record does not match target {pretty_key:?}."); } - Err(GetRecordError::NotEnoughCopiesInRange { expected, got, .. }) => { + Err(GetRecordError::NotEnoughCopies { expected, got, .. }) => { warn!("Not enough copies ({got}/{expected}) found yet for {pretty_key:?}."); } // libp2p RecordNotFound does mean no holders answered. @@ -710,39 +537,8 @@ impl Network { Err(GetRecordError::RecordNotFound) => { warn!("No holder of record '{pretty_key:?}' found."); } - Err(GetRecordError::SplitRecord { result_map }) => { + Err(GetRecordError::SplitRecord { .. }) => { error!("Encountered a split record for {pretty_key:?}."); - - // attempt to deserialise and accumulate any spends - let mut accumulated_spends = BTreeSet::new(); - let results_count = result_map.len(); - // try and accumulate any SpendAttempts - if results_count > 1 { - info!("For record {pretty_key:?}, we have more than one result returned."); - // Allow for early bail if we've already seen a split SpendAttempt - for (record, _) in result_map.values() { - match get_raw_signed_spends_from_record(record) { - Ok(spends) => { - accumulated_spends.extend(spends); - } - Err(_) => { - continue; - } - } - } - } - - // we have a Double SpendAttempt and will exit - if accumulated_spends.len() > 1 { - info!("For record {pretty_key:?} task found split record for a spend, accumulated and sending them as a single record"); - let accumulated_spends = - accumulated_spends.into_iter().collect::>(); - - return Err(backoff::Error::Permanent(NetworkError::DoubleSpendAttempt( - accumulated_spends, - ))); - } - } Err(GetRecordError::QueryTimeout) => { error!("Encountered query timeout for {pretty_key:?}."); @@ -1107,7 +903,7 @@ impl Network { debug!("Network knowledge of close peers to {key:?} are: {close_peers_pretty_print:?}"); } - let closest_peers = sort_peers_by_address_and_limit(&closest_peers, key, CLOSE_GROUP_SIZE)?; + let closest_peers = sort_peers_by_address(&closest_peers, key, CLOSE_GROUP_SIZE)?; Ok(closest_peers.into_iter().cloned().collect()) } diff --git a/sn_networking/src/network_discovery.rs b/sn_networking/src/network_discovery.rs index 3d82c944fb..f3f4986134 100644 --- a/sn_networking/src/network_discovery.rs +++ b/sn_networking/src/network_discovery.rs @@ -8,6 +8,7 @@ use crate::target_arch::Instant; use libp2p::{kad::KBucketKey, PeerId}; +use rand::{thread_rng, Rng}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use sn_protocol::NetworkAddress; use std::collections::{btree_map::Entry, BTreeMap}; @@ -51,13 +52,13 @@ impl NetworkDiscovery { } /// The result from the kad::GetClosestPeers are again used to update our kbucket. - pub(crate) fn handle_get_closest_query(&mut self, closest_peers: &[PeerId]) { + pub(crate) fn handle_get_closest_query(&mut self, closest_peers: Vec) { let now = Instant::now(); let candidates_map: BTreeMap> = closest_peers - .iter() + .into_iter() .filter_map(|peer| { - let peer = NetworkAddress::from_peer(*peer); + let peer = NetworkAddress::from_peer(peer); let peer_key = peer.as_kbucket_key(); peer_key .distance(&self.self_key) @@ -82,28 +83,18 @@ impl NetworkDiscovery { /// Returns one random candidate per bucket. Also tries to refresh the candidate list. /// Todo: Limit the candidates to return. Favor the closest buckets. - pub(crate) fn candidates(&mut self) -> Vec { - let mut op = Vec::with_capacity(self.candidates.len()); - - let mut generate_fresh_candidates = false; - for addresses in self.candidates.values_mut() { - // get a random candidate from each bucket each time - if addresses.is_empty() { - generate_fresh_candidates = true; - continue; - } + pub(crate) fn candidates(&mut self) -> Vec<&NetworkAddress> { + self.try_refresh_candidates(); - // remove the first each time - let address = addresses.remove(0); - op.push(address); - } - - if generate_fresh_candidates { - // we only refresh when we are running low on candidates - self.try_refresh_candidates(); - } + let mut rng = thread_rng(); + let mut op = Vec::with_capacity(self.candidates.len()); - debug!("Candidates returned: {}", op.len()); + let candidates = self.candidates.values().filter_map(|candidates| { + // get a random index each time + let random_index = rng.gen::() % candidates.len(); + candidates.get(random_index) + }); + op.extend(candidates); op } diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 35b1cdec59..599dee835b 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -90,7 +90,7 @@ pub struct NodeRecordStore { /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. - responsible_distance_range: Option, + responsible_distance_range: Option, #[cfg(feature = "open-metrics")] /// Used to report the number of records held by the store to the metrics server. record_count_metric: Option, @@ -315,6 +315,11 @@ impl NodeRecordStore { self } + /// Returns the current distance ilog2 (aka bucket) range of CLOSE_GROUP nodes. + pub fn get_responsible_distance_range(&self) -> Option { + self.responsible_distance_range + } + // Converts a Key into a Hex string. fn generate_filename(key: &Key) -> String { hex::encode(key.as_ref()) @@ -469,7 +474,8 @@ impl NodeRecordStore { let mut removed_keys = Vec::new(); self.records.retain(|key, _val| { let kbucket_key = KBucketKey::new(key.to_vec()); - let is_in_range = responsible_range >= self.local_key.distance(&kbucket_key); + let is_in_range = + responsible_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0); if !is_in_range { removed_keys.push(key.clone()); } @@ -693,7 +699,7 @@ impl NodeRecordStore { pub fn get_records_within_distance_range( &self, records: HashSet<&Key>, - distance_range: Distance, + distance_range: u32, ) -> usize { debug!( "Total record count is {:?}. Distance is: {distance_range:?}", @@ -704,7 +710,7 @@ impl NodeRecordStore { .iter() .filter(|key| { let kbucket_key = KBucketKey::new(key.to_vec()); - distance_range >= self.local_key.distance(&kbucket_key) + distance_range >= self.local_key.distance(&kbucket_key).ilog2().unwrap_or(0) }) .count(); @@ -713,8 +719,8 @@ impl NodeRecordStore { } /// Setup the distance range. - pub(crate) fn set_responsible_distance_range(&mut self, farthest_distance: Distance) { - self.responsible_distance_range = Some(farthest_distance); + pub(crate) fn set_responsible_distance_range(&mut self, farthest_responsible_bucket: u32) { + self.responsible_distance_range = Some(farthest_responsible_bucket); } } @@ -1494,7 +1500,10 @@ mod tests { .wrap_err("Could not parse record store key")?, ); // get the distance to this record from our local key - let distance = self_address.distance(&halfway_record_address); + let distance = self_address + .distance(&halfway_record_address) + .ilog2() + .unwrap_or(0); // must be plus one bucket from the halfway record store.set_responsible_distance_range(distance); diff --git a/sn_networking/src/record_store_api.rs b/sn_networking/src/record_store_api.rs index 64fd790ccd..8e3bc67364 100644 --- a/sn_networking/src/record_store_api.rs +++ b/sn_networking/src/record_store_api.rs @@ -10,7 +10,7 @@ use crate::record_store::{ClientRecordStore, NodeRecordStore}; use libp2p::kad::{ store::{RecordStore, Result}, - KBucketDistance, ProviderRecord, Record, RecordKey, + ProviderRecord, Record, RecordKey, }; use sn_evm::{AttoTokens, QuotingMetrics}; use sn_protocol::{storage::RecordType, NetworkAddress}; @@ -130,7 +130,17 @@ impl UnifiedRecordStore { } } - pub(crate) fn set_distance_range(&mut self, distance: KBucketDistance) { + pub(crate) fn get_farthest_replication_distance_bucket(&self) -> Option { + match self { + Self::Client(_store) => { + warn!("Calling get_distance_range at Client. This should not happen"); + None + } + Self::Node(store) => store.get_responsible_distance_range(), + } + } + + pub(crate) fn set_distance_range(&mut self, distance: u32) { match self { Self::Client(_store) => { warn!("Calling set_distance_range at Client. This should not happen"); diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 5e0d3a3ad4..1b90ac9a53 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -8,9 +8,7 @@ #![allow(clippy::mutable_key_type)] use crate::target_arch::spawn; -use crate::CLOSE_GROUP_SIZE; use crate::{event::NetworkEvent, target_arch::Instant}; -use itertools::Itertools; use libp2p::{ kad::{KBucketDistance as Distance, RecordKey, K_VALUE}, PeerId, @@ -43,8 +41,8 @@ pub(crate) struct ReplicationFetcher { // Avoid fetching same chunk from different nodes AND carry out too many parallel tasks. on_going_fetches: HashMap<(RecordKey, RecordType), (PeerId, ReplicationTimeout)>, event_sender: mpsc::Sender, - /// KBucketDistance range that the incoming key shall be fetched - distance_range: Option, + /// ilog2 bucket distance range that the incoming key shall be fetched + distance_range: Option, /// Restrict fetch range to closer than this value /// used when the node is full, but we still have "close" data coming in /// that is _not_ closer than our farthest max record @@ -65,7 +63,7 @@ impl ReplicationFetcher { } /// Set the distance range. - pub(crate) fn set_replication_distance_range(&mut self, distance_range: Distance) { + pub(crate) fn set_replication_distance_range(&mut self, distance_range: u32) { self.distance_range = Some(distance_range); } @@ -78,7 +76,6 @@ impl ReplicationFetcher { holder: PeerId, incoming_keys: Vec<(NetworkAddress, RecordType)>, locally_stored_keys: &HashMap, - all_local_peers: &[PeerId], ) -> Vec<(PeerId, RecordKey)> { // remove locally stored from incoming_keys let mut new_incoming_keys: Vec<_> = incoming_keys @@ -136,30 +133,12 @@ impl ReplicationFetcher { .retain(|_, time_out| *time_out > Instant::now()); let mut out_of_range_keys = vec![]; - // Filter out those out_of_range ones among the incoming_keys. if let Some(ref distance_range) = self.distance_range { new_incoming_keys.retain(|(addr, _record_type)| { - // find all closer peers to the data - let closer_peers_len = all_local_peers - .iter() - .filter(|peer_id| { - let peer_address = NetworkAddress::from_peer(**peer_id); - addr.distance(&peer_address) <= *distance_range - }) - .collect_vec() - .len(); - - // we consider ourselves in range if - // A) We don't know enough closer peers than ourselves - // or B) The distance to the data is within our GetRange - let is_in_range = closer_peers_len <= CLOSE_GROUP_SIZE - || self_address.distance(addr).ilog2() <= distance_range.ilog2(); + let is_in_range = + self_address.distance(addr).ilog2().unwrap_or(0) <= *distance_range; if !is_in_range { - warn!( - "Rejecting incoming key: {addr:?} as out of range. {:?} is larger than {:?} ", - self_address.distance(addr).ilog2(), - distance_range.ilog2()); out_of_range_keys.push(addr.clone()); } is_in_range @@ -449,12 +428,8 @@ mod tests { incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = replication_fetcher.add_keys( - PeerId::random(), - incoming_keys, - &locally_stored_keys, - &[], - ); + let keys_to_fetch = + replication_fetcher.add_keys(PeerId::random(), incoming_keys, &locally_stored_keys); assert_eq!(keys_to_fetch.len(), MAX_PARALLEL_FETCH); // we should not fetch anymore keys @@ -466,7 +441,6 @@ mod tests { PeerId::random(), vec![(key_1, RecordType::Chunk), (key_2, RecordType::Chunk)], &locally_stored_keys, - &[], ); assert!(keys_to_fetch.is_empty()); @@ -477,7 +451,6 @@ mod tests { PeerId::random(), vec![(key, RecordType::Chunk)], &locally_stored_keys, - &[], ); assert!(!keys_to_fetch.is_empty()); @@ -503,41 +476,34 @@ mod tests { let mut replication_fetcher = ReplicationFetcher::new(peer_id, event_sender); // Set distance range - // way to update this test let distance_target = NetworkAddress::from_peer(PeerId::random()); - let distance_range = self_address.distance(&distance_target); + let distance_range = self_address.distance(&distance_target).ilog2().unwrap_or(1); replication_fetcher.set_replication_distance_range(distance_range); - // generate a list of close peers - let close_peers = (0..100).map(|_| PeerId::random()).collect::>(); - let mut incoming_keys = Vec::new(); let mut in_range_keys = 0; (0..100).for_each(|_| { let random_data: Vec = (0..50).map(|_| rand::random::()).collect(); let key = NetworkAddress::from_record_key(&RecordKey::from(random_data)); - if key.distance(&self_address).ilog2() <= distance_range.ilog2() { + if key.distance(&self_address).ilog2().unwrap_or(0) <= distance_range { in_range_keys += 1; } incoming_keys.push((key, RecordType::Chunk)); }); - let keys_to_fetch = replication_fetcher.add_keys( - PeerId::random(), - incoming_keys, - &Default::default(), - &close_peers, - ); + let keys_to_fetch = + replication_fetcher.add_keys(PeerId::random(), incoming_keys, &Default::default()); assert_eq!( keys_to_fetch.len(), replication_fetcher.on_going_fetches.len(), "keys to fetch and ongoing fetches should match" ); - assert!( - keys_to_fetch.len() + replication_fetcher.to_be_fetched.len() >= in_range_keys, - "at least all keys in range should be in the fetcher" + assert_eq!( + in_range_keys, + keys_to_fetch.len() + replication_fetcher.to_be_fetched.len(), + "all keys should be in range and in the fetcher" ); } } diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs index 40c6182f94..76b6349ce1 100644 --- a/sn_networking/src/transfers.rs +++ b/sn_networking/src/transfers.rs @@ -6,7 +6,9 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{driver::GetRecordCfg, Network, NetworkError, Result}; +use crate::{ + close_group_majority, driver::GetRecordCfg, GetRecordError, Network, NetworkError, Result, +}; use libp2p::kad::{Quorum, Record}; use sn_protocol::{ storage::{try_deserialize_record, RecordHeader, RecordKind, RetryStrategy, SpendAddress}, @@ -37,7 +39,7 @@ impl Network { }; let record = self.get_record_from_network(key.clone(), &get_cfg).await?; debug!( - "Got raw spends from the network, {:?}", + "Got record from the network, {:?}", PrettyPrintRecordKey::from(&record.key) ); get_raw_signed_spends_from_record(&record) @@ -49,14 +51,38 @@ impl Network { /// If we get a quorum error, we increase the RetryStrategy pub async fn get_spend(&self, address: SpendAddress) -> Result { let key = NetworkAddress::from_spend_address(address).to_record_key(); - let get_cfg = GetRecordCfg { + let mut get_cfg = GetRecordCfg { get_quorum: Quorum::All, retry_strategy: Some(RetryStrategy::Quick), target_record: None, expected_holders: Default::default(), is_register: false, }; - let record = self.get_record_from_network(key.clone(), &get_cfg).await?; + let record = match self.get_record_from_network(key.clone(), &get_cfg).await { + Ok(record) => record, + Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { + record, + expected, + got, + })) => { + // if majority holds the spend, it might be worth to be trusted. + if got >= close_group_majority() { + debug!("At least a majority nodes hold the spend {address:?}, going to trust it if can fetch with majority again."); + get_cfg.get_quorum = Quorum::Majority; + get_cfg.retry_strategy = Some(RetryStrategy::Balanced); + self.get_record_from_network(key, &get_cfg).await? + } else { + return Err(NetworkError::GetRecordError( + GetRecordError::NotEnoughCopies { + record, + expected, + got, + }, + )); + } + } + Err(err) => return Err(err), + }; debug!( "Got record from the network, {:?}", PrettyPrintRecordKey::from(&record.key) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index b0dd3f6857..3f3343f403 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -552,7 +552,7 @@ impl Node { }; debug!( - "Found {} spends with key: {unique_pubkey:?} at {pretty_key:?}", + "Got {} validated spends with key: {unique_pubkey:?} at {pretty_key:?}", validated_spends.len() ); @@ -564,12 +564,14 @@ impl Node { expires: None, }; self.network().put_local_record(record); - debug!("Successfully stored spends with key: {unique_pubkey:?} at {pretty_key:?}"); + debug!( + "Successfully stored validated spends with key: {unique_pubkey:?} at {pretty_key:?}" + ); // Just log the double spend attempt. DoubleSpend error during PUT is not used and would just lead to // RecordRejected marker (which is incorrect, since we store double spends). if validated_spends.len() > 1 { - warn!("Got Burnt SpendAttempts of len {} for the Spend PUT with unique_pubkey {unique_pubkey} at {pretty_key:?}", validated_spends.len()); + warn!("Got double spend(s) of len {} for the Spend PUT with unique_pubkey {unique_pubkey}", validated_spends.len()); } self.record_metrics(Marker::ValidSpendRecordPutFromNetwork(&pretty_key)); @@ -754,14 +756,13 @@ impl Node { } spends } - Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopiesInRange { + Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies { record, got, - range, .. })) => { info!( - "Retrieved {got} copies of the record for {unique_pubkey:?} from the network in range {range}" + "Retrieved {got} copies of the record for {unique_pubkey:?} from the network" ); match get_raw_signed_spends_from_record(&record) { Ok(spends) => spends, diff --git a/sn_node/src/replication.rs b/sn_node/src/replication.rs index 80ec25b157..59e0cff078 100644 --- a/sn_node/src/replication.rs +++ b/sn_node/src/replication.rs @@ -6,18 +6,15 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{ - error::{Error, Result}, - node::Node, -}; +use crate::{error::Result, node::Node}; use libp2p::{ kad::{Quorum, Record, RecordKey}, PeerId, }; -use sn_networking::{GetRecordCfg, Network}; +use sn_networking::{sort_peers_by_address, GetRecordCfg, Network, REPLICATION_PEERS_COUNT}; use sn_protocol::{ - messages::{Query, QueryResponse, Request, Response}, - storage::{try_serialize_record, RecordKind, RecordType}, + messages::{Cmd, Query, QueryResponse, Request, Response}, + storage::RecordType, NetworkAddress, PrettyPrintRecordKey, }; use tokio::task::spawn; @@ -82,27 +79,12 @@ impl Node { // Hence value of the flag actually doesn't matter. is_register: false, }; - match node - .network() - .get_record_from_network(key.clone(), &get_cfg) - .await - { + match node.network().get_record_from_network(key, &get_cfg).await { Ok(record) => record, - Err(error) => match error { - sn_networking::NetworkError::DoubleSpendAttempt(spends) => { - debug!("Failed to fetch record {pretty_key:?} from the network, double spend attempt {spends:?}"); - - let bytes = try_serialize_record(&spends, RecordKind::Spend)?; - - Record { - key, - value: bytes.to_vec(), - publisher: None, - expires: None, - } - } - other_error => return Err(other_error.into()), - }, + Err(err) => { + error!("During replication fetch of {pretty_key:?}, failed in re-attempt of get from network {err:?}"); + return; + } } }; @@ -114,7 +96,6 @@ impl Node { } else { debug!("Completed storing Replication Record {pretty_key:?} from network."); } - Ok::<(), Error>(()) }); } Ok(()) @@ -130,9 +111,86 @@ impl Node { let network = self.network().clone(); let _handle = spawn(async move { - network - .replicate_valid_fresh_record(paid_key, record_type) - .await; + let start = std::time::Instant::now(); + let pretty_key = PrettyPrintRecordKey::from(&paid_key); + + // first we wait until our own network store can return the record + // otherwise it may not be fully written yet + let mut retry_count = 0; + debug!("Checking we have successfully stored the fresh record {pretty_key:?} in the store before replicating"); + loop { + let record = match network.get_local_record(&paid_key).await { + Ok(record) => record, + Err(err) => { + error!( + "Replicating fresh record {pretty_key:?} get_record_from_store errored: {err:?}" + ); + None + } + }; + + if record.is_some() { + break; + } + + if retry_count > 10 { + error!( + "Could not get record from store for replication: {pretty_key:?} after 10 retries" + ); + return; + } + + retry_count += 1; + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + debug!("Start replication of fresh record {pretty_key:?} from store"); + + // Already contains self_peer_id + let mut closest_k_peers = match network.get_closest_k_value_local_peers().await { + Ok(peers) => peers, + Err(err) => { + error!("Replicating fresh record {pretty_key:?} get_closest_local_peers errored: {err:?}"); + return; + } + }; + + // remove ourself from these calculations + closest_k_peers.retain(|peer_id| peer_id != &network.peer_id()); + + let data_addr = NetworkAddress::from_record_key(&paid_key); + + let sorted_based_on_addr = match sort_peers_by_address( + &closest_k_peers, + &data_addr, + REPLICATION_PEERS_COUNT, + ) { + Ok(result) => result, + Err(err) => { + error!( + "When replicating fresh record {pretty_key:?}, having error when sort {err:?}" + ); + return; + } + }; + + let our_peer_id = network.peer_id(); + let our_address = NetworkAddress::from_peer(our_peer_id); + let keys = vec![(data_addr.clone(), record_type.clone())]; + + for peer_id in sorted_based_on_addr { + debug!("Replicating fresh record {pretty_key:?} to {peer_id:?}"); + let request = Request::Cmd(Cmd::Replicate { + holder: our_address.clone(), + keys: keys.clone(), + }); + + network.send_req_ignore_reply(request, *peer_id); + } + debug!( + "Completed replicate fresh record {pretty_key:?} on store, in {:?}", + start.elapsed() + ); }); } } diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 21ba72d619..8d06a87187 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -13,19 +13,18 @@ // use common::client::{get_client_and_funded_wallet, get_wallet}; // use eyre::{bail, Result}; // use itertools::Itertools; -// use sn_logging::LogBuilder; -// use sn_networking::NetworkError; // use sn_transfers::{ -// get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, SignedTransaction, SpendReason, -// WalletError, GENESIS_CASHNOTE, +// get_genesis_sk, rng, NanoTokens, DerivationIndex, HotWallet, SignedTransaction, +// SpendReason, WalletError, GENESIS_CASHNOTE, // }; +// use sn_logging::LogBuilder; +// use sn_networking::NetworkError; // use std::time::Duration; // use tracing::*; // #[tokio::test] // async fn cash_note_transfer_double_spend_fail() -> Result<()> { -// let _log_guards = -// LogBuilder::init_single_threaded_tokio_test("cash_note_transfer_double_spend_fail", true); +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); // // create 1 wallet add money from faucet // let first_wallet_dir = TempDir::new()?; @@ -41,7 +40,7 @@ // assert_eq!(third_wallet.balance(), NanoTokens::zero()); // // manually forge two transfers of the same source -// let amount = NanoTokens::from(first_wallet_balance / 3); +// let amount = first_wallet_balance / 3; // let to1 = first_wallet.address(); // let to2 = second_wallet.address(); // let to3 = third_wallet.address(); @@ -71,50 +70,31 @@ // )?; // // send both transfers to the network - +// // upload won't error out, only error out during verification. // info!("Sending both transfers to the network..."); -// // These may error (but may not depending on network speed) -// // so we're not going to rely on it here. -// let _ = client.send_spends(transfer_to_2.spends.iter(), true).await; +// let res = client.send_spends(transfer_to_2.spends.iter(), false).await; +// assert!(res.is_ok()); +// let res = client.send_spends(transfer_to_3.spends.iter(), false).await; +// assert!(res.is_ok()); -// let _ = client.send_spends(transfer_to_3.spends.iter(), true).await; - -// // check the CashNotes, it should fail -// info!("Verifying the transfers from first wallet..."); +// // we wait 5s to ensure that the double spend attempt is detected and accumulated +// info!("Verifying the transfers from first wallet... Sleeping for 10 seconds."); +// tokio::time::sleep(Duration::from_secs(10)).await; // let cash_notes_for_2: Vec<_> = transfer_to_2.output_cashnotes.clone(); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); -// let mut should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; -// let mut should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; - -// for i in 0..5 { -// if should_err1.is_err() && should_err2.is_err() { -// break; -// } - -// tokio::time::sleep(Duration::from_secs(1)).await; -// info!("Retrying verification.{i}... for should_err1+2"); -// println!("Retrying verification{i} ... for should_err1+2"); -// should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; -// should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; -// } - -// info!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); -// println!("Both should fail during GET record accumulation + Double SpendAttempt should be flagged: {should_err1:?} {should_err2:?}"); +// // check the CashNotes, it should fail +// let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; +// let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; +// info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); // assert!(should_err1.is_err() && should_err2.is_err()); - -// assert_eq!( -// format!("{should_err1:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpend error, was: {should_err1:?}" -// ); - -// assert_eq!( -// format!("{should_err2:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpend error, was: {should_err2:?}" -// ); +// assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); +// assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // Ok(()) // } @@ -188,7 +168,7 @@ // )?; // // send the transfer to the network which should reject it -// let res = client.send_spends(transfer2.spends.iter(), true).await; +// let res = client.send_spends(transfer2.spends.iter(), false).await; // std::mem::drop(exclusive_access); // assert_matches!(res, Err(WalletError::CouldNotSendMoney(_))); @@ -204,8 +184,8 @@ // let wallet_dir_1 = TempDir::new()?; // let (client, mut wallet_1) = get_client_and_funded_wallet(wallet_dir_1.path()).await?; -// let balance_1 = wallet_1.balance().as_nano(); -// let amount = NanoTokens::from(balance_1 / 2); +// let balance_1 = wallet_1.balance(); +// let amount = balance_1 / 2; // let to1 = wallet_1.address(); // // Send from 1 -> 2 @@ -282,18 +262,14 @@ // reason.clone(), // wallet_1.key(), // )?; // reuse the old cash notes -// // ignore response in case it errors out early, we verify below -// let _res = client.send_spends(transfer_to_3.spends.iter(), true).await; +// client +// .send_spends(transfer_to_3.spends.iter(), false) +// .await?; // info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); // let cash_notes_for_3: Vec<_> = transfer_to_3.output_cashnotes.clone(); - -// let res = client.verify_cashnote(&cash_notes_for_3[0]).await; -// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned - +// assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned // info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - -// let res = client.verify_cashnote(&cash_notes_for_2[0]).await; -// assert!(res.is_err(), "should be error, was {res:?}"); // the old spend has been poisoned +// assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned // // The old spend has been poisoned, but spends from 22 -> 222 should still work // let wallet_dir_222 = TempDir::new()?; @@ -324,16 +300,16 @@ // client.verify_cashnote(&cash_notes_for_222[0]).await?; // // finally assert that we have a double spend attempt error here -// // we wait to ensure that the double spend attempt is detected and accumulated +// // we wait 1s to ensure that the double spend attempt is detected and accumulated // tokio::time::sleep(Duration::from_secs(5)).await; // match client.verify_cashnote(&cash_notes_for_2[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert_eq!( -// e.to_string(), -// format!("{}", WalletError::BurntSpend), -// "error should reflect double spend attempt was: {e:?}", +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", // ); // } // } @@ -341,10 +317,10 @@ // match client.verify_cashnote(&cash_notes_for_3[0]).await { // Ok(_) => bail!("Cashnote verification should have failed"), // Err(e) => { -// assert_eq!( -// e.to_string(), -// format!("{}", WalletError::BurntSpend), -// "error should reflect double spend attempt was: {e:?}", +// assert!( +// e.to_string() +// .contains("Network Error Double spend(s) attempt was detected"), +// "error should reflect double spend attempt", // ); // } // } @@ -363,7 +339,7 @@ // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; // let balance_a = wallet_a.balance().as_nano(); -// let amount = NanoTokens::from(balance_a / 2); +// let amount = balance_a / 2; // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -452,10 +428,12 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// assert!( -// format!("{result:?}").starts_with("Err(UnexpectedParentSpends"), -// "Should have been UnexpectedParentSpends error, was: {result:?}" -// ); +// // sleep for a bit to allow the network to process and accumulate the double spend +// tokio::time::sleep(Duration::from_secs(10)).await; + +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // poisoned // // Try to double spend from B -> Y // let wallet_dir_y = TempDir::new()?; @@ -492,48 +470,32 @@ // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from B -> Y: {result:?}"); -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpent error, was: {result:?}" -// ); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // info!("Verifying the original cashnote of A -> B"); - -// // arbitrary time sleep to allow for network accumulation of double spend. -// tokio::time::sleep(Duration::from_secs(1)).await; - // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; // info!("Got result while verifying the original spend from A -> B: {result:?}"); -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpent error, was: {result:?}" -// ); - -// println!("Verifying the original cashnote of B -> C"); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); +// info!("Verifying the original cashnote of B -> C"); // let result = client.verify_cashnote(&cash_notes_for_c[0]).await; // info!("Got result while verifying the original spend from B -> C: {result:?}"); -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpent error, was: {result:?}" -// ); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpent error, was: {result:?}" -// ); - +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); // let result = client.verify_cashnote(&cash_notes_for_b[0]).await; -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend), -// "Should have been BurntSpent error, was: {result:?}" -// ); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }, "result should be verify error, it was {result:?}"); // Ok(()) // } @@ -549,8 +511,8 @@ // let wallet_dir_a = TempDir::new()?; // let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; -// let balance_a = wallet_a.balance().as_nano(); -// let amount = NanoTokens::from(balance_a / 2); +// let balance_a = wallet_a.balance(); +// let amount = balance_a / 2; // // Send from A -> B // let wallet_dir_b = TempDir::new()?; @@ -612,7 +574,7 @@ // )?; // client -// .send_spends(transfer_to_c.spends.iter(), true) +// .send_spends(transfer_to_c.spends.iter(), false) // .await?; // info!("Verifying the transfers from B -> C wallet..."); @@ -649,10 +611,9 @@ // let result = client.verify_cashnote(&cash_notes_for_x[0]).await; // info!("Got result while verifying double spend from A -> X: {result:?}"); -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend) -// ); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // // the original A should still be present as one of the double spends // let res = client @@ -688,23 +649,20 @@ // reason.clone(), // wallet_a.key(), // )?; // reuse the old cash notes - -// // we actually don't care about the result here, we just want to spam the network with double spends -// let _ = client.send_spends(transfer_to_y.spends.iter(), false).await; - -// // and then we verify the double spend attempt +// client +// .send_spends(transfer_to_y.spends.iter(), false) +// .await?; // info!("Verifying the transfers from A -> Y wallet... It should error out."); // let cash_notes_for_y: Vec<_> = transfer_to_y.output_cashnotes.clone(); // // sleep for a bit to allow the network to process and accumulate the double spend -// tokio::time::sleep(Duration::from_millis(1500)).await; +// tokio::time::sleep(Duration::from_millis(500)).await; // let result = client.verify_cashnote(&cash_notes_for_y[0]).await; // info!("Got result while verifying double spend from A -> Y: {result:?}"); -// assert_eq!( -// format!("{result:?}"), -// format!("Err({:?})", WalletError::BurntSpend) -// ); +// assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { +// assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "Expected double spend, but got {str}"); +// }); // // the original A should still be present as one of the double spends // let res = client diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index d36f680ca2..23fe9c53b0 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -14,6 +14,7 @@ // use libp2p::PeerId; // use rand::Rng; // use sn_client::{Error as ClientError, FilesDownload, Uploader, WalletClient}; +// use sn_evm::{Amount, AttoTokens, PaymentQuote}; // use sn_logging::LogBuilder; // use sn_networking::{GetRecordError, NetworkError}; // use sn_protocol::{ @@ -22,7 +23,6 @@ // NetworkAddress, // }; // use sn_registers::Permissions; -// use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; // use std::collections::BTreeMap; // use tokio::time::{sleep, Duration}; // use tracing::info; @@ -80,7 +80,7 @@ // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // let subset_len = chunks.len() / 3; -// let res = wallet_client +// let _storage_cost = wallet_client // .pay_for_storage( // chunks // .clone() @@ -88,15 +88,7 @@ // .take(subset_len) // .map(|(name, _)| NetworkAddress::ChunkAddress(ChunkAddress::new(name))), // ) -// .await; - -// // if the payment failed, we can log that -// if let Err(error) = res { -// tracing::warn!( -// "Payment failed, (though that doesn't really break this test): {:?}", -// error -// ); -// } +// .await?; // // now let's request to upload all addresses, even that we've already paid for a subset of them // let verify_store = false; @@ -119,7 +111,7 @@ // let paying_wallet_dir: TempDir = TempDir::new()?; // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// let wallet_original_balance = paying_wallet.balance().as_nano(); +// let wallet_original_balance = paying_wallet.balance().as_atto(); // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); // // generate a random number (between 50 and 100) of random addresses @@ -143,10 +135,10 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for the subset of addresses, 1 nano per addr -// let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); +// let new_balance = AttoTokens::from_atto(wallet_original_balance - total_cost.as_atto()); // info!("Verifying new balance on paying wallet is {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// assert_eq!(paying_wallet.balance(), new_balance); +// // assert_eq!(paying_wallet.balance(), new_balance);// TODO adapt to evm // // let's verify payment proofs for the subset have been cached in the wallet // assert!(random_content_addrs @@ -168,12 +160,13 @@ // .ok_or(eyre!("Total storage cost exceed possible token amount"))?; // // check we've paid only for addresses we haven't previously paid for, 1 nano per addr -// let new_balance = NanoTokens::from( -// wallet_original_balance - (random_content_addrs.len() as u64 * total_cost.as_nano()), +// let new_balance = AttoTokens::from_atto( +// wallet_original_balance - (Amount::from(random_content_addrs.len()) * total_cost.as_atto()), // ); // println!("Verifying new balance on paying wallet is now {new_balance} ..."); // let paying_wallet = wallet_client.into_wallet(); -// assert_eq!(paying_wallet.balance(), new_balance); +// // TODO adapt to evm +// // assert_eq!(paying_wallet.balance(), new_balance); // // let's verify payment proofs now for all addresses have been cached in the wallet // // assert!(random_content_addrs @@ -236,16 +229,18 @@ // no_data_payments.insert( // *chunk_name, // ( -// MainPubkey::new(bls::SecretKey::random().public_key()), -// PaymentQuote::test_dummy(*chunk_name, NanoTokens::from(0)), +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(*chunk_name, AttoTokens::from_u64(0)), // PeerId::random().to_bytes(), // ), // ); // } -// let _ = wallet_client -// .mut_wallet() -// .local_send_storage_payment(&no_data_payments)?; +// // TODO adapt to evm +// // let _ = wallet_client +// // .mut_wallet() +// // .send_storage_payment(&no_data_payments) +// // .await?; // sleep(Duration::from_secs(5)).await; @@ -253,131 +248,131 @@ // .upload_test_bytes(content_bytes.clone(), false) // .await?; -// // info!("Reading {content_addr:?} expected to fail"); -// // let mut files_download = FilesDownload::new(files_api); -// // assert!( -// // matches!( -// // files_download.download_file(content_addr, None).await, -// // Err(ClientError::Network(NetworkError::GetRecordError( -// // GetRecordError::RecordNotFound -// // ))) -// // ), -// // "read bytes should fail as we didn't store them" -// // ); +// info!("Reading {content_addr:?} expected to fail"); +// let mut files_download = FilesDownload::new(files_api); +// assert!( +// matches!( +// files_download.download_file(content_addr, None).await, +// Err(ClientError::Network(NetworkError::GetRecordError( +// GetRecordError::RecordNotFound +// ))) +// ), +// "read bytes should fail as we didn't store them" +// ); -// // Ok(()) -// // } +// Ok(()) +// } -// // #[tokio::test] -// // async fn storage_payment_register_creation_succeeds() -> Result<()> { -// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// #[tokio::test] +// async fn storage_payment_register_creation_succeeds() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// // let paying_wallet_dir = TempDir::new()?; +// let paying_wallet_dir = TempDir::new()?; -// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// // let mut rng = rand::thread_rng(); -// // let xor_name = XorName::random(&mut rng); -// // let address = RegisterAddress::new(xor_name, client.signer_pk()); -// // let net_addr = NetworkAddress::from_register_address(address); -// // info!("Paying for random Register address {net_addr:?} ..."); +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_addr = NetworkAddress::from_register_address(address); +// info!("Paying for random Register address {net_addr:?} ..."); -// // let _cost = wallet_client -// // .pay_for_storage(std::iter::once(net_addr)) -// // .await?; +// let _cost = wallet_client +// .pay_for_storage(std::iter::once(net_addr)) +// .await?; -// // let (mut register, _cost, _royalties_fees) = client -// // .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) -// // .await?; +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, true, Permissions::default()) +// .await?; -// // println!("Newly created register has {} ops", register.read().len()); +// println!("Newly created register has {} ops", register.read().len()); -// // let retrieved_reg = client.get_register(address).await?; +// let retrieved_reg = client.get_register(address).await?; -// // assert_eq!(register.read(), retrieved_reg.read()); +// assert_eq!(register.read(), retrieved_reg.read()); -// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// // register.write(&random_entry)?; +// register.write(&random_entry)?; -// // println!( -// // "Register has {} ops after first write", -// // register.read().len() -// // ); +// println!( +// "Register has {} ops after first write", +// register.read().len() +// ); -// // register.sync(&mut wallet_client, true, None).await?; +// register.sync(&mut wallet_client, true, None).await?; -// // let retrieved_reg = client.get_register(address).await?; +// let retrieved_reg = client.get_register(address).await?; -// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// // assert_eq!(retrieved_reg.read().len(), 1); +// assert_eq!(retrieved_reg.read().len(), 1); -// // for index in 1..10 { -// // println!("current index is {index}"); -// // let random_entry = rng.gen::<[u8; 32]>().to_vec(); +// for index in 1..10 { +// println!("current index is {index}"); +// let random_entry = rng.gen::<[u8; 32]>().to_vec(); -// // register.write(&random_entry)?; -// // register.sync(&mut wallet_client, true, None).await?; +// register.write(&random_entry)?; +// register.sync(&mut wallet_client, true, None).await?; -// // let retrieved_reg = client.get_register(address).await?; +// let retrieved_reg = client.get_register(address).await?; -// // println!( -// // "current retrieved register entry length is {}", -// // retrieved_reg.read().len() -// // ); -// // println!("current expected entry length is {}", register.read().len()); +// println!( +// "current retrieved register entry length is {}", +// retrieved_reg.read().len() +// ); +// println!("current expected entry length is {}", register.read().len()); -// // println!( -// // "current retrieved register ops length is {}", -// // retrieved_reg.ops.len() -// // ); -// // println!("current local cached ops length is {}", register.ops.len()); +// println!( +// "current retrieved register ops length is {}", +// retrieved_reg.ops.len() +// ); +// println!("current local cached ops length is {}", register.ops.len()); -// // assert_eq!(retrieved_reg.read().len(), register.read().len()); +// assert_eq!(retrieved_reg.read().len(), register.read().len()); -// // assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); +// assert_eq!(retrieved_reg.read().iter().next().unwrap().1, random_entry); -// // println!("Current fetched register is {:?}", retrieved_reg.register); -// // println!( -// // "Fetched register has update history of {}", -// // retrieved_reg.register.log_update_history() -// // ); +// println!("Current fetched register is {:?}", retrieved_reg.register); +// println!( +// "Fetched register has update history of {}", +// retrieved_reg.register.log_update_history() +// ); -// // std::thread::sleep(std::time::Duration::from_millis(1000)); -// // } +// std::thread::sleep(std::time::Duration::from_millis(1000)); +// } -// // Ok(()) -// // } +// Ok(()) +// } -// // #[tokio::test] -// // #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] -// // async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { -// // let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); +// #[tokio::test] +// #[ignore = "Test currently invalid as we always try to pay and upload registers if none found... need to check if this test is valid"] +// async fn storage_payment_register_creation_and_mutation_fails() -> Result<()> { +// let _log_guards = LogBuilder::init_single_threaded_tokio_test("storage_payments", true); -// // let paying_wallet_dir = TempDir::new()?; +// let paying_wallet_dir = TempDir::new()?; -// // let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; -// // let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); +// let (client, paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); -// // let mut rng = rand::thread_rng(); -// // let xor_name = XorName::random(&mut rng); -// // let address = RegisterAddress::new(xor_name, client.signer_pk()); -// // let net_address = -// // NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); +// let mut rng = rand::thread_rng(); +// let xor_name = XorName::random(&mut rng); +// let address = RegisterAddress::new(xor_name, client.signer_pk()); +// let net_address = +// NetworkAddress::RegisterAddress(RegisterAddress::new(xor_name, client.signer_pk())); -// // let mut no_data_payments = BTreeMap::default(); -// // no_data_payments.insert( -// // net_address -// // .as_xorname() -// // .expect("RegisterAddress should convert to XorName"), -// // ( -// // sn_evm::utils::dummy_address(), -// // PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), -// // vec![], -// // ), -// // ); +// let mut no_data_payments = BTreeMap::default(); +// no_data_payments.insert( +// net_address +// .as_xorname() +// .expect("RegisterAddress should convert to XorName"), +// ( +// sn_evm::utils::dummy_address(), +// PaymentQuote::test_dummy(xor_name, AttoTokens::from_u64(0)), +// vec![], +// ), +// ); // println!( // "current retrieved register entry length is {}", @@ -400,16 +395,16 @@ // // .send_storage_payment(&no_data_payments) // // .await?; -// // // this should fail to store as the amount paid is not enough -// // let (mut register, _cost, _royalties_fees) = client -// // .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) -// // .await?; +// // this should fail to store as the amount paid is not enough +// let (mut register, _cost, _royalties_fees) = client +// .create_and_pay_for_register(xor_name, &mut wallet_client, false, Permissions::default()) +// .await?; -// // sleep(Duration::from_secs(5)).await; -// // assert!(matches!( -// // client.get_register(address).await, -// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// // )); +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// client.get_register(address).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); // println!("Current fetched register is {:?}", retrieved_reg.address()); // println!( @@ -420,11 +415,11 @@ // let random_entry = rng.gen::<[u8; 32]>().to_vec(); // register.write(&random_entry)?; -// // sleep(Duration::from_secs(5)).await; -// // assert!(matches!( -// // register.sync(&mut wallet_client, false, None).await, -// // Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address -// // )); +// sleep(Duration::from_secs(5)).await; +// assert!(matches!( +// register.sync(&mut wallet_client, false, None).await, +// Err(ClientError::Protocol(ProtocolError::RegisterNotFound(addr))) if *addr == address +// )); -// // Ok(()) -// // } +// Ok(()) +// } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 8649d07909..641756fa2c 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -16,10 +16,13 @@ use common::{ get_all_peer_ids, get_safenode_rpc_client, NodeRestart, }; use eyre::{eyre, Result}; -use libp2p::{kad::RecordKey, PeerId}; +use libp2p::{ + kad::{KBucketKey, RecordKey}, + PeerId, +}; use rand::{rngs::OsRng, Rng}; use sn_logging::LogBuilder; -use sn_networking::{sleep, sort_peers_by_address_and_limit, sort_peers_by_key_and_limit}; +use sn_networking::{sleep, sort_peers_by_key}; use sn_protocol::{ safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, @@ -157,8 +160,8 @@ fn print_node_close_groups(all_peers: &[PeerId]) { for (node_index, peer) in all_peers.iter().enumerate() { let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = sort_peers_by_key_and_limit(&all_peers, &key, CLOSE_GROUP_SIZE) - .expect("failed to sort peer"); + let closest_peers = + sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); let closest_peers_idx = closest_peers .iter() .map(|&&peer| { @@ -209,12 +212,11 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd for (key, actual_holders_idx) in record_holders.iter() { println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_address = NetworkAddress::from_record_key(key); - let expected_holders = - sort_peers_by_address_and_limit(all_peers, &record_address, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); + let record_key = KBucketKey::from(key.to_vec()); + let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? + .into_iter() + .cloned() + .collect::>(); let actual_holders = actual_holders_idx .iter() diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index 85dc2e3a09..da19270b69 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -26,7 +26,7 @@ use tracing::{error, info, trace}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(60); +const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); #[tokio::test(flavor = "multi_thread")] async fn verify_routing_table() -> Result<()> { diff --git a/sn_protocol/src/error.rs b/sn_protocol/src/error.rs index 8462ff85f3..f73c356b53 100644 --- a/sn_protocol/src/error.rs +++ b/sn_protocol/src/error.rs @@ -78,7 +78,4 @@ pub enum Error { // The record already exists at this node #[error("The record already exists, so do not charge for it: {0:?}")] RecordExists(PrettyPrintRecordKey<'static>), - - #[error("Record header is incorrect")] - IncorrectRecordHeader, } diff --git a/sn_protocol/src/storage.rs b/sn_protocol/src/storage.rs index 3a6b4ba6a8..2935e43fce 100644 --- a/sn_protocol/src/storage.rs +++ b/sn_protocol/src/storage.rs @@ -18,10 +18,7 @@ use std::{str::FromStr, time::Duration}; pub use self::{ address::{ChunkAddress, RegisterAddress, ScratchpadAddress, SpendAddress}, chunks::Chunk, - header::{ - get_type_from_record, try_deserialize_record, try_serialize_record, RecordHeader, - RecordKind, RecordType, - }, + header::{try_deserialize_record, try_serialize_record, RecordHeader, RecordKind, RecordType}, scratchpad::Scratchpad, }; diff --git a/sn_protocol/src/storage/header.rs b/sn_protocol/src/storage/header.rs index af43c21256..96a4515526 100644 --- a/sn_protocol/src/storage/header.rs +++ b/sn_protocol/src/storage/header.rs @@ -84,33 +84,6 @@ impl Display for RecordKind { } } -/// Return the RecordType -pub fn get_type_from_record(record: &Record) -> Result { - let key = record.key.clone(); - let record_key = PrettyPrintRecordKey::from(&key); - - match RecordHeader::from_record(record) { - Ok(record_header) => match record_header.kind { - RecordKind::Chunk => Ok(RecordType::Chunk), - RecordKind::Scratchpad => Ok(RecordType::Scratchpad), - RecordKind::Spend | RecordKind::Register => { - let content_hash = XorName::from_content(&record.value); - Ok(RecordType::NonChunk(content_hash)) - } - RecordKind::ChunkWithPayment - | RecordKind::RegisterWithPayment - | RecordKind::ScratchpadWithPayment => { - error!("Record {record_key:?} with payment shall not be stored locally."); - Err(Error::IncorrectRecordHeader) - } - }, - Err(err) => { - error!("For record {record_key:?}, failed to parse record_header {err:?}"); - Err(Error::IncorrectRecordHeader) - } - } -} - impl RecordHeader { pub const SIZE: usize = 2; diff --git a/sn_transfers/src/wallet/error.rs b/sn_transfers/src/wallet/error.rs index f60b718f42..5a57b7434a 100644 --- a/sn_transfers/src/wallet/error.rs +++ b/sn_transfers/src/wallet/error.rs @@ -40,19 +40,9 @@ pub enum Error { /// A general error when receiving a transfer fails #[error("Failed to receive transfer due to {0}")] CouldNotReceiveMoney(String), - /// A spend has been burnt (ie there was a DoubleSpendAttempt) - #[error("Failed to verify transfer validity in the network, a burnt SpendAttempt was found")] - BurntSpend, - /// Parents of a spend were not as expected in a provided cash note - #[error("Failed to verify transfer's parents in the network, transfer could be invalid or a parent double spent")] - UnexpectedParentSpends(crate::SpendAddress), - ///No valid unspent cashnotes found - #[error("All the redeemed CashNotes are already spent")] - AllRedeemedCashnotesSpent, /// A general error when verifying a transfer validity in the network #[error("Failed to verify transfer validity in the network {0}")] CouldNotVerifyTransfer(String), - /// Failed to fetch spend from network #[error("Failed to fetch spend from network: {0}")] FailedToGetSpend(String), From 27362f3f71e32b5a942da0b81ca8ffa9ff380891 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 21 Oct 2024 17:28:58 +0100 Subject: [PATCH 246/255] Revert "Merge pull request #2243 from maqi/record_store_dir_prefixed_with_network_keys" This reverts commit 147ba8e1332d56a3c24d18b78fe6c9eaf99422ee, reversing changes made to 00ad51dbf6069b046588893ffba62a7308d3b642. --- sn_networking/src/driver.rs | 7 +++---- sn_protocol/src/version.rs | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index d8d71c5601..ec716cb4df 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -52,8 +52,8 @@ use sn_protocol::{ messages::{ChunkProof, Nonce, Request, Response}, storage::{try_deserialize_record, RetryStrategy}, version::{ - get_key_version_str, IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, - IDENTIFY_PROTOCOL_STR, REQ_RESPONSE_VERSION_STR, + IDENTIFY_CLIENT_VERSION_STR, IDENTIFY_NODE_VERSION_STR, IDENTIFY_PROTOCOL_STR, + REQ_RESPONSE_VERSION_STR, }, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; @@ -364,8 +364,7 @@ impl NetworkBuilder { let store_cfg = { // Configures the disk_store to store records under the provided path and increase the max record size - // The storage dir is appendixed with key_version str to avoid bringing records from old network into new - let storage_dir_path = root_dir.join(format!("record_store_{}", get_key_version_str())); + let storage_dir_path = root_dir.join("record_store"); if let Err(error) = std::fs::create_dir_all(&storage_dir_path) { return Err(NetworkError::FailedToCreateRecordStoreDir { path: storage_dir_path, diff --git a/sn_protocol/src/version.rs b/sn_protocol/src/version.rs index e1c952976c..04921730ef 100644 --- a/sn_protocol/src/version.rs +++ b/sn_protocol/src/version.rs @@ -58,7 +58,7 @@ fn get_truncate_version_str() -> String { /// Get the PKs version string. /// If the public key mis-configed via env variable, /// it shall result in being rejected to join by the network -pub fn get_key_version_str() -> String { +fn get_key_version_str() -> String { let mut f_k_str = FOUNDATION_PK.to_hex(); let _ = f_k_str.split_off(6); let mut g_k_str = GENESIS_PK.to_hex(); From c4796c1dfd7b946339e983c4cb7a59ab85f42d78 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 21 Oct 2024 19:05:12 +0200 Subject: [PATCH 247/255] fix(launchpad): use arbitrum sepolia --- sn_node_manager/src/cmd/node.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 375788bb11..0c087704d9 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -715,15 +715,7 @@ pub async fn maintain_n_running_nodes( enable_metrics_server, env_variables.clone(), // FIXME: Hardcoding for demo. Should be fixed!! - Some(EvmNetwork::Custom(CustomNetwork { - rpc_url_http: "http://165.227.234.109:4343/".parse()?, - payment_token_address: RewardsAddress::from_str( - "0x5FbDB2315678afecb367f032d93F642f64180aa3", - )?, - data_payments_address: RewardsAddress::from_str( - "0x8464135c8F25Da09e49BC8782676a84730C318bC", - )?, - })), + Some(EvmNetwork::ArbitrumSepolia), home_network, local, log_dir_path.clone(), From 033903d6aa9c6b54cd64e11af2b767588d26a0a0 Mon Sep 17 00:00:00 2001 From: Lautaro Mazzitelli Date: Mon, 21 Oct 2024 19:18:18 +0200 Subject: [PATCH 248/255] chore(launchpad): evm network as parameter to maintain_n_running_nodes --- node-launchpad/src/node_mgmt.rs | 4 +++- sn_node_manager/src/cmd/node.rs | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/node-launchpad/src/node_mgmt.rs b/node-launchpad/src/node_mgmt.rs index 3049a3930b..2c3b6205a9 100644 --- a/node-launchpad/src/node_mgmt.rs +++ b/node-launchpad/src/node_mgmt.rs @@ -1,7 +1,7 @@ use crate::action::{Action, StatusActions}; use crate::connection_mode::ConnectionMode; use color_eyre::eyre::{eyre, Error}; -use sn_evm::RewardsAddress; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_node_manager::{ add_services::config::PortRange, config::get_node_registry_path, VerbosityLevel, }; @@ -291,6 +291,7 @@ async fn scale_down_nodes(config: &NodeConfig, count: u16) { config.data_dir_path.clone(), true, None, + Some(EvmNetwork::ArbitrumSepolia), //FIXME: should come from an UI element. config.home_network, false, None, @@ -365,6 +366,7 @@ async fn add_nodes( config.data_dir_path.clone(), true, None, + Some(EvmNetwork::ArbitrumSepolia), //FIXME: Should come from an UI element config.home_network, false, None, diff --git a/sn_node_manager/src/cmd/node.rs b/sn_node_manager/src/cmd/node.rs index 0c087704d9..7d6a10871a 100644 --- a/sn_node_manager/src/cmd/node.rs +++ b/sn_node_manager/src/cmd/node.rs @@ -22,7 +22,7 @@ use color_eyre::{eyre::eyre, Help, Result}; use colored::Colorize; use libp2p_identity::PeerId; use semver::Version; -use sn_evm::{CustomNetwork, EvmNetwork, RewardsAddress}; +use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_peers_acquisition::PeersArgs; use sn_releases::{ReleaseType, SafeReleaseRepoActions}; @@ -610,6 +610,7 @@ pub async fn maintain_n_running_nodes( data_dir_path: Option, enable_metrics_server: bool, env_variables: Option>, + evm_network: Option, home_network: bool, local: bool, log_dir_path: Option, @@ -714,8 +715,7 @@ pub async fn maintain_n_running_nodes( data_dir_path.clone(), enable_metrics_server, env_variables.clone(), - // FIXME: Hardcoding for demo. Should be fixed!! - Some(EvmNetwork::ArbitrumSepolia), + evm_network.clone(), home_network, local, log_dir_path.clone(), From 4dec0b2c624fbbd54d8ef7a2e8457be5c359b3a5 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 21 Oct 2024 19:21:56 +0100 Subject: [PATCH 249/255] chore: reduce max records cache size Using a larger chunk size resulted in the node having a larger memory footprint due to the size of the records cache. --- sn_networking/src/record_store.rs | 2 +- sn_node/src/bin/safenode/main.rs | 8 ++++++-- sn_node/src/bin/safenode/rpc_service.rs | 9 ++++++++- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 599dee835b..ee4e413c5e 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -57,7 +57,7 @@ use xor_name::XorName; const MAX_RECORDS_COUNT: usize = 16 * 1024; /// The maximum number of records to cache in memory. -const MAX_RECORDS_CACHE_SIZE: usize = 100; +const MAX_RECORDS_CACHE_SIZE: usize = 25; /// File name of the recorded historical quoting metrics. const HISTORICAL_QUOTING_METRICS_FILENAME: &str = "historic_quoting_metrics"; diff --git a/sn_node/src/bin/safenode/main.rs b/sn_node/src/bin/safenode/main.rs index dd328b14b2..1b18429e89 100644 --- a/sn_node/src/bin/safenode/main.rs +++ b/sn_node/src/bin/safenode/main.rs @@ -24,7 +24,9 @@ use sn_logging::{Level, LogFormat, LogOutputDest, ReloadHandle}; use sn_node::{Marker, NodeBuilder, NodeEvent, NodeEventsReceiver}; use sn_peers_acquisition::PeersArgs; use sn_protocol::{ - node::get_safenode_root_dir, node_rpc::{NodeCtrl, StopResult}, version::IDENTIFY_PROTOCOL_STR, + node::get_safenode_root_dir, + node_rpc::{NodeCtrl, StopResult}, + version::IDENTIFY_PROTOCOL_STR, }; use std::{ env, @@ -530,7 +532,9 @@ fn monitor_node_events(mut node_events_rx: NodeEventsReceiver, ctrl_tx: mpsc::Se }) .await { - error!("Failed to send node control msg to safenode bin main thread: {err}"); + error!( + "Failed to send node control msg to safenode bin main thread: {err}" + ); break; } } diff --git a/sn_node/src/bin/safenode/rpc_service.rs b/sn_node/src/bin/safenode/rpc_service.rs index eef388b2d5..8d16ba8f3d 100644 --- a/sn_node/src/bin/safenode/rpc_service.rs +++ b/sn_node/src/bin/safenode/rpc_service.rs @@ -202,7 +202,14 @@ impl SafeNode for SafeNodeRpcService { }; let delay = Duration::from_millis(request.get_ref().delay_millis); - match self.ctrl_tx.send(NodeCtrl::Stop { delay, result: StopResult::Success(cause.to_string()) }).await { + match self + .ctrl_tx + .send(NodeCtrl::Stop { + delay, + result: StopResult::Success(cause.to_string()), + }) + .await + { Ok(()) => Ok(Response::new(StopResponse {})), Err(err) => Err(Status::new( Code::Internal, From 04ec14e1718193bcc76b5c07b902161b5210d4a6 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 22 Oct 2024 00:24:08 +0530 Subject: [PATCH 250/255] feat(autonomi): download either a file or directory based on data --- autonomi-cli/src/actions/download.rs | 52 ----------------------- autonomi-cli/src/actions/mod.rs | 4 -- autonomi-cli/src/actions/progress_bar.rs | 1 + autonomi-cli/src/commands.rs | 3 +- autonomi-cli/src/commands/file.rs | 40 +++++++++++++----- autonomi/src/client/archive.rs | 4 +- autonomi/src/client/fs.rs | 54 +++++++++++++++++++----- autonomi/src/client/mod.rs | 1 + autonomi/tests/fs.rs | 9 ++-- 9 files changed, 84 insertions(+), 84 deletions(-) delete mode 100644 autonomi-cli/src/actions/download.rs diff --git a/autonomi-cli/src/actions/download.rs b/autonomi-cli/src/actions/download.rs deleted file mode 100644 index ba004930e3..0000000000 --- a/autonomi-cli/src/actions/download.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use autonomi::{client::address::str_to_addr, Client}; -use color_eyre::eyre::{eyre, Context, Result}; -use std::path::PathBuf; - -pub async fn download(addr: &str, dest_path: &str, client: &mut Client) -> Result<()> { - let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; - let archive = client - .archive_get(address) - .await - .wrap_err("Failed to fetch data from address")?; - - let progress_bar = get_progress_bar(archive.map.len() as u64)?; - let mut all_errs = vec![]; - for (path, addr) in archive.map { - progress_bar.println(format!("Fetching file: {path:?}...")); - let bytes = match client.data_get(addr).await { - Ok(bytes) => bytes, - Err(e) => { - let err = format!("Failed to fetch file {path:?}: {e}"); - all_errs.push(err); - continue; - } - }; - - let path = PathBuf::from(dest_path).join(path); - let here = PathBuf::from("."); - let parent = path.parent().unwrap_or_else(|| &here); - std::fs::create_dir_all(parent)?; - std::fs::write(path, bytes)?; - progress_bar.clone().inc(1); - } - progress_bar.finish_and_clear(); - - if all_errs.is_empty() { - println!("Successfully downloaded data at: {addr}"); - Ok(()) - } else { - let err_no = all_errs.len(); - eprintln!("{err_no} errors while downloading data at: {addr}"); - eprintln!("{all_errs:#?}"); - Err(eyre!("Errors while downloading data")) - } -} diff --git a/autonomi-cli/src/actions/mod.rs b/autonomi-cli/src/actions/mod.rs index 8b4662c3d9..98ef491064 100644 --- a/autonomi-cli/src/actions/mod.rs +++ b/autonomi-cli/src/actions/mod.rs @@ -7,10 +7,6 @@ // permissions and limitations relating to use of the SAFE Network Software. mod connect; -mod download; mod progress_bar; pub use connect::connect_to_network; -pub use download::download; - -pub use progress_bar::get_progress_bar; diff --git a/autonomi-cli/src/actions/progress_bar.rs b/autonomi-cli/src/actions/progress_bar.rs index 2fcfe0ba20..5e2c6c914e 100644 --- a/autonomi-cli/src/actions/progress_bar.rs +++ b/autonomi-cli/src/actions/progress_bar.rs @@ -10,6 +10,7 @@ use color_eyre::eyre::Result; use indicatif::{ProgressBar, ProgressStyle}; use std::time::Duration; +#[allow(dead_code)] pub fn get_progress_bar(length: u64) -> Result { let progress_bar = ProgressBar::new(length); progress_bar.set_style( diff --git a/autonomi-cli/src/commands.rs b/autonomi-cli/src/commands.rs index bb718df43a..4c2067aa87 100644 --- a/autonomi-cli/src/commands.rs +++ b/autonomi-cli/src/commands.rs @@ -9,6 +9,7 @@ mod file; mod register; mod vault; +use std::path::PathBuf; use clap::Subcommand; use color_eyre::Result; @@ -55,7 +56,7 @@ pub enum FileCmd { /// The address of the file to download. addr: String, /// The destination file path. - dest_file: String, + dest_file: PathBuf, }, /// List previous uploads diff --git a/autonomi-cli/src/commands/file.rs b/autonomi-cli/src/commands/file.rs index d99a848214..bfa4719460 100644 --- a/autonomi-cli/src/commands/file.rs +++ b/autonomi-cli/src/commands/file.rs @@ -8,9 +8,11 @@ use crate::utils::collect_upload_summary; use autonomi::client::address::addr_to_str; +use autonomi::client::address::str_to_addr; use autonomi::Multiaddr; use color_eyre::eyre::Context; use color_eyre::eyre::Result; +use std::path::Path; use std::path::PathBuf; pub async fn cost(file: &str, peers: Vec) -> Result<()> { @@ -26,22 +28,35 @@ pub async fn cost(file: &str, peers: Vec) -> Result<()> { println!("Total cost: {cost}"); Ok(()) } -pub async fn upload(file: &str, peers: Vec) -> Result<()> { +pub async fn upload(path: &str, peers: Vec) -> Result<()> { let wallet = crate::keys::load_evm_wallet()?; let mut client = crate::actions::connect_to_network(peers).await?; let event_receiver = client.enable_client_events(); let (upload_summary_thread, upload_completed_tx) = collect_upload_summary(event_receiver); - println!("Uploading data to network..."); + let path = PathBuf::from(path); + + let xor_name = if path.is_dir() { + println!("Uploading directory: {path:?}"); + info!("Uploading directory: {path:?}"); + client + .dir_upload(&path, &wallet) + .await + .wrap_err("Failed to upload directory")? + } else { + println!("Uploading file: {path:?}"); + info!("Uploading file: {path:?}"); + client + .file_upload(&path, &wallet) + .await + .wrap_err("Failed to upload file")? + }; - let xor_name = client - .dir_upload(PathBuf::from(file), &wallet) - .await - .wrap_err("Failed to upload file")?; let addr = addr_to_str(xor_name); - println!("Successfully uploaded: {file}"); + println!("Successfully uploaded: {path:?}"); println!("At address: {addr}"); + info!("Successfully uploaded: {path:?} at address: {addr}"); if let Ok(()) = upload_completed_tx.send(()) { let summary = upload_summary_thread.await?; if summary.record_count == 0 { @@ -50,13 +65,18 @@ pub async fn upload(file: &str, peers: Vec) -> Result<()> { println!("Number of chunks uploaded: {}", summary.record_count); println!("Total cost: {} AttoTokens", summary.tokens_spent); } + info!("Summary for upload of data {path:?} at {addr:?}: {summary:?}"); } Ok(()) } -pub async fn download(addr: &str, dest_path: &str, peers: Vec) -> Result<()> { - let mut client = crate::actions::connect_to_network(peers).await?; - crate::actions::download(addr, dest_path, &mut client).await +pub async fn download(addr: &str, dest_path: &Path, peers: Vec) -> Result<()> { + let client = crate::actions::connect_to_network(peers).await?; + let address = str_to_addr(addr).wrap_err("Failed to parse data address")?; + + client.download_file_or_dir(address, dest_path).await?; + + Ok(()) } pub fn list(_peers: Vec) -> Result<()> { diff --git a/autonomi/src/client/archive.rs b/autonomi/src/client/archive.rs index d3cf9714ec..f38ca24cbc 100644 --- a/autonomi/src/client/archive.rs +++ b/autonomi/src/client/archive.rs @@ -30,7 +30,7 @@ pub struct Archive { impl Archive { /// Deserialize from bytes. - pub fn from_bytes(data: Bytes) -> Result { + pub fn from_bytes(data: &Bytes) -> Result { let root: Archive = rmp_serde::from_slice(&data[..])?; Ok(root) @@ -49,7 +49,7 @@ impl Client { /// Fetch an archive from the network pub async fn archive_get(&self, addr: ArchiveAddr) -> Result { let data = self.data_get(addr).await?; - Ok(Archive::from_bytes(data)?) + Ok(Archive::from_bytes(&data)?) } /// Upload an archive to the network diff --git a/autonomi/src/client/fs.rs b/autonomi/src/client/fs.rs index 8fff06324c..674e03fc2b 100644 --- a/autonomi/src/client/fs.rs +++ b/autonomi/src/client/fs.rs @@ -10,7 +10,7 @@ use crate::client::Client; use bytes::Bytes; use sn_evm::EvmWallet; use std::collections::HashMap; -use std::path::PathBuf; +use std::path::Path; use super::archive::{Archive, ArchiveAddr}; use super::data::{DataAddr, GetError, PutError}; @@ -48,7 +48,7 @@ impl Client { pub async fn file_download( &self, data_addr: DataAddr, - to_dest: PathBuf, + to_dest: &Path, ) -> Result<(), DownloadError> { let data = self.data_get(data_addr).await?; if let Some(parent) = to_dest.parent() { @@ -62,20 +62,52 @@ impl Client { pub async fn dir_download( &self, archive_addr: ArchiveAddr, - to_dest: PathBuf, + to_dest: &Path, ) -> Result<(), DownloadError> { let archive = self.archive_get(archive_addr).await?; for (path, addr) in archive.map { - self.file_download(addr, to_dest.join(path)).await?; + self.file_download(addr, &to_dest.join(path)).await?; } Ok(()) } + /// Download either a file or a directory depending on the data present at the provided address. + pub async fn download_file_or_dir( + &self, + address: DataAddr, + to_dest: &Path, + ) -> Result<(), DownloadError> { + let data = self.data_get(address).await?; + + if let Ok(archive) = Archive::from_bytes(&data) { + info!("Got an Archive from bytes, unpacking directory to {to_dest:?}"); + for (path, addr) in archive.map { + let dest = to_dest.join(path); + + #[cfg(feature = "loud")] + println!("Downloading file: {addr:?} to {dest:?}"); + + debug!("Downloading archived file: {addr:?} to {dest:?}"); + self.file_download(addr, &dest).await?; + } + } else { + info!("The downloaded data is not an Archive, saving it as a file."); + #[cfg(feature = "loud")] + println!("Downloading file: {address:?} to {to_dest:?}"); + if let Some(parent) = to_dest.parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(to_dest, data).await?; + } + + Ok(()) + } + /// Upload a directory to the network. The directory is recursively walked. /// Reads all files, splits into chunks, uploads chunks, uploads datamaps, uploads archive, returns ArchiveAddr (pointing to the archive) pub async fn dir_upload( &self, - dir_path: PathBuf, + dir_path: &Path, wallet: &EvmWallet, ) -> Result { let mut map = HashMap::new(); @@ -87,13 +119,13 @@ impl Client { continue; } - let path = entry.path().to_path_buf(); + let path = entry.path(); tracing::info!("Uploading file: {path:?}"); #[cfg(feature = "loud")] println!("Uploading file: {path:?}"); - let file = self.file_upload(path.clone(), wallet).await?; + let file = self.file_upload(path, wallet).await?; - map.insert(path, file); + map.insert(path.to_path_buf(), file); } let archive = Archive { map }; @@ -106,9 +138,9 @@ impl Client { /// Upload a file to the network. /// Reads file, splits into chunks, uploads chunks, uploads datamap, returns DataAddr (pointing to the datamap) - async fn file_upload( + pub async fn file_upload( &self, - path: PathBuf, + path: &Path, wallet: &EvmWallet, ) -> Result { let data = tokio::fs::read(path).await?; @@ -119,7 +151,7 @@ impl Client { /// Get the cost to upload a file/dir to the network. /// quick and dirty implementation, please refactor once files are cleanly implemented - pub async fn file_cost(&self, path: &PathBuf) -> Result { + pub async fn file_cost(&self, path: &Path) -> Result { let mut map = HashMap::new(); let mut total_cost = sn_evm::Amount::ZERO; diff --git a/autonomi/src/client/mod.rs b/autonomi/src/client/mod.rs index f19216fe84..68dfe0d50a 100644 --- a/autonomi/src/client/mod.rs +++ b/autonomi/src/client/mod.rs @@ -202,6 +202,7 @@ pub enum ClientEvent { } /// Summary of an upload operation. +#[derive(Debug, Clone)] pub struct UploadSummary { pub record_count: usize, pub tokens_spent: Amount, diff --git a/autonomi/tests/fs.rs b/autonomi/tests/fs.rs index 5b1fce533b..4c286725aa 100644 --- a/autonomi/tests/fs.rs +++ b/autonomi/tests/fs.rs @@ -14,6 +14,7 @@ use sha2::{Digest, Sha256}; use sn_logging::LogBuilder; use std::fs::File; use std::io::{BufReader, Read}; +use std::path::PathBuf; use std::time::Duration; use test_utils::{evm::get_funded_wallet, peers_from_env}; use tokio::time::sleep; @@ -30,13 +31,13 @@ async fn dir_upload_download() -> Result<()> { let wallet = get_funded_wallet(); let addr = client - .dir_upload("tests/file/test_dir".into(), &wallet) + .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) .await?; sleep(Duration::from_secs(10)).await; client - .dir_download(addr, "tests/file/test_dir_fetched".into()) + .dir_download(addr, &PathBuf::from("tests/file/test_dir_fetched")) .await?; // compare the two directories @@ -86,7 +87,7 @@ async fn file_into_vault() -> Result<()> { let client_sk = bls::SecretKey::random(); let addr = client - .dir_upload("tests/file/test_dir".into(), &wallet) + .dir_upload(&PathBuf::from("tests/file/test_dir"), &wallet) .await?; sleep(Duration::from_secs(2)).await; @@ -99,7 +100,7 @@ async fn file_into_vault() -> Result<()> { let new_client = Client::connect(&[]).await?; if let Some(ap) = new_client.fetch_and_decrypt_vault(&client_sk).await? { - let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(ap)?; + let ap_archive_fetched = autonomi::client::archive::Archive::from_bytes(&ap)?; assert_eq!( archive.map, ap_archive_fetched.map, From c529ec87fc63f3e106a46ea82ab3b249fe71b188 Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 22 Oct 2024 00:26:24 +0530 Subject: [PATCH 251/255] chore(autonomi): add more logging by default --- autonomi-cli/src/main.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/autonomi-cli/src/main.rs b/autonomi-cli/src/main.rs index 2cf4e9cd61..de4cdcf4c4 100644 --- a/autonomi-cli/src/main.rs +++ b/autonomi-cli/src/main.rs @@ -49,14 +49,16 @@ async fn main() -> Result<()> { fn init_logging_and_metrics(opt: &Opt) -> Result<(ReloadHandle, Option)> { let logging_targets = vec![ + ("autonomi-cli".to_string(), Level::TRACE), + ("autonomi".to_string(), Level::TRACE), + ("evmlib".to_string(), Level::TRACE), + ("sn_evm".to_string(), Level::TRACE), ("sn_networking".to_string(), Level::INFO), ("sn_build_info".to_string(), Level::TRACE), - ("autonomi-cli".to_string(), Level::TRACE), ("sn_logging".to_string(), Level::TRACE), ("sn_peers_acquisition".to_string(), Level::TRACE), ("sn_protocol".to_string(), Level::TRACE), ("sn_registers".to_string(), Level::TRACE), - ("sn_evm".to_string(), Level::TRACE), ]; let mut log_builder = LogBuilder::new(logging_targets); log_builder.output_dest(opt.log_output_dest.clone()); From af19ff01654b20f7efd4e60f56e306bfb910e469 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 21 Oct 2024 21:25:25 +0100 Subject: [PATCH 252/255] chore(release): release candidate `2024.10.3.2` --- Cargo.lock | 42 ++++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 ++++----- autonomi/Cargo.toml | 18 +++++++------- evm_testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++--- node-launchpad/Cargo.toml | 14 +++++------ release-cycle-info | 2 +- sn_build_info/Cargo.toml | 2 +- sn_evm/Cargo.toml | 4 +-- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 12 ++++----- sn_node/Cargo.toml | 28 ++++++++++----------- sn_node_manager/Cargo.toml | 16 ++++++------ sn_node_rpc_client/Cargo.toml | 16 ++++++------ sn_peers_acquisition/Cargo.toml | 4 +-- sn_protocol/Cargo.toml | 10 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++--- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 6 ++--- token_supplies/Cargo.toml | 2 +- 23 files changed, 110 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 921d2e036b..5831006801 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1069,7 +1069,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.0-rc.1" +version = "0.2.0-rc.2" dependencies = [ "bip39", "blsttc", @@ -1113,7 +1113,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" dependencies = [ "autonomi", "clap", @@ -2747,7 +2747,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" dependencies = [ "clap", "dirs-next", @@ -2758,7 +2758,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" dependencies = [ "alloy", "dirs-next", @@ -5565,7 +5565,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.8-rc.1" +version = "0.2.8-rc.2" dependencies = [ "clap", "clap-verbosity-flag", @@ -5682,7 +5682,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" dependencies = [ "atty", "better-panic", @@ -8027,7 +8027,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.0-rc.1" +version = "0.11.0-rc.2" dependencies = [ "assert_cmd", "assert_fs", @@ -8103,7 +8103,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.16-rc.1" +version = "0.1.16-rc.2" dependencies = [ "chrono", "tracing", @@ -8145,7 +8145,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" dependencies = [ "custom_debug", "evmlib", @@ -8168,7 +8168,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.37-rc.1" +version = "0.2.37-rc.2" dependencies = [ "chrono", "color-eyre", @@ -8193,7 +8193,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.17-rc.1" +version = "0.1.17-rc.2" dependencies = [ "clap", "color-eyre", @@ -8207,7 +8207,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.0-rc.1" +version = "0.19.0-rc.2" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8252,7 +8252,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.0-rc.1" +version = "0.112.0-rc.2" dependencies = [ "assert_fs", "async-trait", @@ -8309,7 +8309,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.32-rc.1" +version = "0.6.32-rc.2" dependencies = [ "assert_fs", "async-trait", @@ -8336,7 +8336,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.4-rc.1" +version = "0.5.4-rc.2" dependencies = [ "clap", "lazy_static", @@ -8352,7 +8352,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.12-rc.1" +version = "0.17.12-rc.2" dependencies = [ "blsttc", "bytes", @@ -8382,7 +8382,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" dependencies = [ "blsttc", "crdts", @@ -8399,7 +8399,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" dependencies = [ "async-trait", "dirs-next", @@ -8425,7 +8425,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.0-rc.1" +version = "0.20.0-rc.2" dependencies = [ "assert_fs", "blsttc", @@ -8758,7 +8758,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.8-rc.1" +version = "0.4.8-rc.2" dependencies = [ "bytes", "color-eyre", @@ -8902,7 +8902,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.55-rc.1" +version = "0.1.55-rc.2" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index fabf258488..e346fe9024 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "autonomi-cli" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" edition = "2021" [[bin]] @@ -18,7 +18,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = [ "data", "fs", "registers", @@ -38,12 +38,12 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = [ +autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index 4abb4697dc..cfc97097c4 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.0-rc.1" +version = "0.2.0-rc.2" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -37,11 +37,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } -sn_protocol = { version = "0.17.12-rc.1", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_protocol = { version = "0.17.12-rc.2", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -55,8 +55,8 @@ serde-wasm-bindgen = "0.6.5" [dev-dependencies] eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } @@ -66,7 +66,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.1-rc.1", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.1-rc.2", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 4dc12e3a74..01fbd73043 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index 01a370a6a6..c4a0808ff6 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index e49e8d5eea..13ba4f4b21 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.8-rc.1" +version = "0.2.8-rc.2" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 332878595f..bbf2c5975f 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } -sn-node-manager = { version = "0.11.0-rc.1", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.4-rc.1", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn-node-manager = { version = "0.11.0-rc.2", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.4-rc.2", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.0-rc.1", path = "../sn_service_management" } +sn_service_management = { version = "0.4.0-rc.2", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info index 31b9961213..884a0ad6bf 100644 --- a/release-cycle-info +++ b/release-cycle-info @@ -15,4 +15,4 @@ release-year: 2024 release-month: 10 release-cycle: 3 -release-cycle-counter: 1 +release-cycle-counter: 2 diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index fdd1b6daf8..79a3fcada6 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16-rc.1" +version = "0.1.16-rc.2" build = "build.rs" [build-dependencies] diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index ee66ed91b0..2fd0d77412 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.1" +version = "0.1.1-rc.2" [features] test-utils = [] @@ -16,7 +16,7 @@ local = ["evmlib/local"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 2e03dd6882..91f1daf181 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.37-rc.1" +version = "0.2.37-rc.2" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index 8adb7d35fe..fe79385507 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17-rc.1" +version = "0.1.17-rc.2" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index fb922cef70..ca17d61fbd 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.0-rc.1" +version = "0.19.0-rc.2" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 260d88d563..b3a61473fa 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.0-rc.1" +version = "0.112.0-rc.2" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } -autonomi = { path = "../autonomi", version = "0.2.0-rc.1", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } +autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 2e247010f8..e1ce02e47a 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.0-rc.1" +version = "0.11.0-rc.2" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index d8bc548854..a618304c8d 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.32-rc.1" +version = "0.6.32-rc.2" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } -sn_node = { path = "../sn_node", version = "0.112.0-rc.1" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_node = { path = "../sn_node", version = "0.112.0-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 897473f3c3..96726a32fa 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.4-rc.1" +version = "0.5.4-rc.2" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 2109c53d60..1c2c0f36d3 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.12-rc.1" +version = "0.17.12-rc.2" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.1" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.1" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.1" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } +sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index d83e5e38e4..b71c4619c8 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 0f86b0f817..696ac6f652 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0-rc.1" +version = "0.4.0-rc.2" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.1", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 6ee7fcb0a1..da27b4a0f2 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.0-rc.1" +version = "0.20.0-rc.2" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index e3cc909848..cfb3aec6cb 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.8-rc.1" +version = "0.4.8-rc.2" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1-rc.1" } +evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.1" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index b8604571c3..aa6ec8cdba 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.55-rc.1" +version = "0.1.55-rc.2" [dependencies] From 4ad14d3ff35668a5474cc77a8749b5e3ba63f9f9 Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Mon, 21 Oct 2024 22:21:35 +0100 Subject: [PATCH 253/255] docs: changelog for `2024.10.3.2` --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 070c3c661a..0d1ecc79fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,23 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-10-22 + +Unfortunately the entry for this release will not have fully detailed changes. This release is +special in that it's very large and moves us to a new, EVM-based payments system. The Github Release +description has a list of all the merged PRs. If you want more detail, consult the PR list. Normal +service will resume for subsequent releases. + +Here is a brief summary of the changes: + +- A new `autonomi` CLI that uses EVM payments and replaces the previous `safe` CLI. +- A new `autonomi` API that replaces `sn_client` with a simpler interface. +- The node has been changed to use EVM payments. +- The node runs without a wallet. This increases security and removes the need for forwarding. +- Data is paid for through an EVM smart contract. Payment proofs are not linked to the original + data. +- Payment royalties have been removed, resulting in less centralization and fees. + ## 2024-10-08 ### Network From 07953e7a9a27bdf37443a1c5cffe5852b5a85fed Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 22 Oct 2024 13:13:02 +0100 Subject: [PATCH 254/255] docs: update the readme with the latest keys Advice on setting these keys had been removed, but as far as I know, they are still required. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 31d6c73e43..48751adf0e 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,10 @@ You should build from the `stable` branch, as follows: ``` git checkout stable +export FOUNDATION_PK=88a82d718d16dccc839188eddc9a46cb216667c940cd46285199458c919a170a55490db09763ae216ed25e9db78c3576 +export GENESIS_PK=aa3526db2dbc43998e0b541b8455e2ce9dd4f1cad80090e671da16e3cd11cd5e3550f74c3cefd09ad253d93cacae2320 +export NETWORK_ROYALTIES_PK=8b5463a2c8142959a7b7cfd9295587812eb07ccbe13a85865503c8004eeeb6889ccace3588dcf9f7396784d9ee48f4d5 +export PAYMENT_FORWARD_PK=87d5b511a497183c945df63ab8790a4b94cfe452d00bfbdb39e41ee861384fe0de716a224da1c6fd11356de49877dfc2 cargo build --release --features=network-contacts --bin safenode ``` From e6778f94b16aaa3e22266eb14396cf28558f6ccc Mon Sep 17 00:00:00 2001 From: Chris O'Neil Date: Tue, 22 Oct 2024 13:14:53 +0100 Subject: [PATCH 255/255] chore(release): stable release 2024.10.3.2 ================== Crate Versions ================== autonomi: 0.2.0 autonomi-cli: 0.1.1 evmlib: 0.1.1 evm_testnet: 0.1.1 sn_build_info: 0.1.16 sn_evm: 0.1.1 sn_logging: 0.2.37 sn_metrics: 0.1.17 nat-detection: 0.2.8 sn_networking: 0.19.0 sn_node: 0.112.0 node-launchpad: 0.4.0 sn_node_manager: 0.11.0 sn_node_rpc_client: 0.6.32 sn_peers_acquisition: 0.5.4 sn_protocol: 0.17.12 sn_registers: 0.4.0 sn_service_management: 0.4.0 sn_transfers: 0.20.0 test_utils: 0.4.8 token_supplies: 0.1.55 =================== Binary Versions =================== nat-detection: 0.2.8 node-launchpad: 0.4.0 autonomi: 0.1.1 safenode: 0.112.0 safenode-manager: 0.11.0 safenode_rpc_client: 0.6.32 safenodemand: 0.11.0 --- Cargo.lock | 42 ++++++++++++++++---------------- autonomi-cli/Cargo.toml | 12 ++++----- autonomi/Cargo.toml | 18 +++++++------- evm_testnet/Cargo.toml | 6 ++--- evmlib/Cargo.toml | 2 +- nat-detection/Cargo.toml | 8 +++--- node-launchpad/Cargo.toml | 14 +++++------ sn_build_info/Cargo.toml | 2 +- sn_evm/Cargo.toml | 4 +-- sn_logging/Cargo.toml | 2 +- sn_metrics/Cargo.toml | 2 +- sn_networking/Cargo.toml | 12 ++++----- sn_node/Cargo.toml | 28 ++++++++++----------- sn_node_manager/Cargo.toml | 16 ++++++------ sn_node_rpc_client/Cargo.toml | 16 ++++++------ sn_peers_acquisition/Cargo.toml | 4 +-- sn_protocol/Cargo.toml | 10 ++++---- sn_registers/Cargo.toml | 2 +- sn_service_management/Cargo.toml | 8 +++--- sn_transfers/Cargo.toml | 2 +- test_utils/Cargo.toml | 6 ++--- token_supplies/Cargo.toml | 2 +- 22 files changed, 109 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5831006801..81f3daed4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1069,7 +1069,7 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "autonomi" -version = "0.2.0-rc.2" +version = "0.2.0" dependencies = [ "bip39", "blsttc", @@ -1113,7 +1113,7 @@ dependencies = [ [[package]] name = "autonomi-cli" -version = "0.1.1-rc.2" +version = "0.1.1" dependencies = [ "autonomi", "clap", @@ -2747,7 +2747,7 @@ dependencies = [ [[package]] name = "evm_testnet" -version = "0.1.1-rc.2" +version = "0.1.1" dependencies = [ "clap", "dirs-next", @@ -2758,7 +2758,7 @@ dependencies = [ [[package]] name = "evmlib" -version = "0.1.1-rc.2" +version = "0.1.1" dependencies = [ "alloy", "dirs-next", @@ -5565,7 +5565,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.8-rc.2" +version = "0.2.8" dependencies = [ "clap", "clap-verbosity-flag", @@ -5682,7 +5682,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.4.0-rc.2" +version = "0.4.0" dependencies = [ "atty", "better-panic", @@ -8027,7 +8027,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.11.0-rc.2" +version = "0.11.0" dependencies = [ "assert_cmd", "assert_fs", @@ -8103,7 +8103,7 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.16-rc.2" +version = "0.1.16" dependencies = [ "chrono", "tracing", @@ -8145,7 +8145,7 @@ dependencies = [ [[package]] name = "sn_evm" -version = "0.1.1-rc.2" +version = "0.1.1" dependencies = [ "custom_debug", "evmlib", @@ -8168,7 +8168,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.37-rc.2" +version = "0.2.37" dependencies = [ "chrono", "color-eyre", @@ -8193,7 +8193,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.17-rc.2" +version = "0.1.17" dependencies = [ "clap", "color-eyre", @@ -8207,7 +8207,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.19.0-rc.2" +version = "0.19.0" dependencies = [ "aes-gcm-siv", "async-trait", @@ -8252,7 +8252,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.112.0-rc.2" +version = "0.112.0" dependencies = [ "assert_fs", "async-trait", @@ -8309,7 +8309,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.32-rc.2" +version = "0.6.32" dependencies = [ "assert_fs", "async-trait", @@ -8336,7 +8336,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.5.4-rc.2" +version = "0.5.4" dependencies = [ "clap", "lazy_static", @@ -8352,7 +8352,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.12-rc.2" +version = "0.17.12" dependencies = [ "blsttc", "bytes", @@ -8382,7 +8382,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.4.0-rc.2" +version = "0.4.0" dependencies = [ "blsttc", "crdts", @@ -8399,7 +8399,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.4.0-rc.2" +version = "0.4.0" dependencies = [ "async-trait", "dirs-next", @@ -8425,7 +8425,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.20.0-rc.2" +version = "0.20.0" dependencies = [ "assert_fs", "blsttc", @@ -8758,7 +8758,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.8-rc.2" +version = "0.4.8" dependencies = [ "bytes", "color-eyre", @@ -8902,7 +8902,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.55-rc.2" +version = "0.1.55" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/autonomi-cli/Cargo.toml b/autonomi-cli/Cargo.toml index e346fe9024..83adf193d2 100644 --- a/autonomi-cli/Cargo.toml +++ b/autonomi-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "autonomi-cli" -version = "0.1.1-rc.2" +version = "0.1.1" edition = "2021" [[bin]] @@ -18,7 +18,7 @@ name = "files" harness = false [dependencies] -autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = [ +autonomi = { path = "../autonomi", version = "0.2.0", features = [ "data", "fs", "registers", @@ -38,12 +38,12 @@ tokio = { version = "1.32.0", features = [ "fs", ] } tracing = { version = "~0.1.26" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } [dev-dependencies] -autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = [ +autonomi = { path = "../autonomi", version = "0.2.0", features = [ "data", "fs", ] } diff --git a/autonomi/Cargo.toml b/autonomi/Cargo.toml index cfc97097c4..617452db53 100644 --- a/autonomi/Cargo.toml +++ b/autonomi/Cargo.toml @@ -3,7 +3,7 @@ authors = ["MaidSafe Developers "] description = "Autonomi client API" name = "autonomi" license = "GPL-3.0" -version = "0.2.0-rc.2" +version = "0.2.0" edition = "2021" homepage = "https://maidsafe.net" readme = "README.md" @@ -37,11 +37,11 @@ rand = "0.8.5" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } -sn_protocol = { version = "0.17.12-rc.2", path = "../sn_protocol" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { version = "0.17.12", path = "../sn_protocol" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } thiserror = "1.0.23" tokio = { version = "1.35.0", features = ["sync"] } tracing = { version = "~0.1.26" } @@ -55,8 +55,8 @@ serde-wasm-bindgen = "0.6.5" [dev-dependencies] eyre = "0.6.5" sha2 = "0.10.6" -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } # Do not specify the version field. Release process expects even the local dev deps to be published. # Removing the version field is a workaround. test_utils = { path = "../test_utils" } @@ -66,7 +66,7 @@ wasm-bindgen-test = "0.3.43" [target.'cfg(target_arch = "wasm32")'.dependencies] console_error_panic_hook = "0.1.7" -evmlib = { path = "../evmlib", version = "0.1.1-rc.2", features = ["wasm-bindgen"] } +evmlib = { path = "../evmlib", version = "0.1.1", features = ["wasm-bindgen"] } # See https://github.com/sebcrozet/instant/blob/7bd13f51f5c930239fddc0476a837870fb239ed7/README.md#using-instant-for-a-wasm-platform-where-performancenow-is-not-available instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] } js-sys = "0.3.70" diff --git a/evm_testnet/Cargo.toml b/evm_testnet/Cargo.toml index 01fbd73043..6712604130 100644 --- a/evm_testnet/Cargo.toml +++ b/evm_testnet/Cargo.toml @@ -6,13 +6,13 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evm_testnet" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.2" +version = "0.1.1" [dependencies] clap = { version = "4.5", features = ["derive"] } dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.1" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } tokio = { version = "1.40", features = ["rt-multi-thread", "signal"] } [lints] diff --git a/evmlib/Cargo.toml b/evmlib/Cargo.toml index c4a0808ff6..fa5b2d9769 100644 --- a/evmlib/Cargo.toml +++ b/evmlib/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://maidsafe.net" license = "GPL-3.0" name = "evmlib" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.2" +version = "0.1.1" [features] wasm-bindgen = ["alloy/wasm-bindgen"] diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 13ba4f4b21..bbab570e94 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.8-rc.2" +version = "0.2.8" [[bin]] name = "nat-detection" @@ -31,9 +31,9 @@ libp2p = { version = "0.54.1", features = [ "macros", "upnp", ] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index bbf2c5975f..583edb4e60 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.4.0-rc.2" +version = "0.4.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,13 +51,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } -sn-node-manager = { version = "0.11.0-rc.2", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.5.4-rc.2", path = "../sn_peers_acquisition" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn-node-manager = { version = "0.11.0", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.5.4", path = "../sn_peers_acquisition" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.4.0-rc.2", path = "../sn_service_management" } +sn_service_management = { version = "0.4.0", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 79a3fcada6..de2f27b5cb 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.16-rc.2" +version = "0.1.16" build = "build.rs" [build-dependencies] diff --git a/sn_evm/Cargo.toml b/sn_evm/Cargo.toml index 2fd0d77412..73326d9f36 100644 --- a/sn_evm/Cargo.toml +++ b/sn_evm/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_evm" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.1-rc.2" +version = "0.1.1" [features] test-utils = [] @@ -16,7 +16,7 @@ local = ["evmlib/local"] [dependencies] custom_debug = "~0.6.1" -evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.1" } hex = "~0.4.3" lazy_static = "~1.4.0" libp2p = { version = "0.53", features = ["identify", "kad"] } diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 91f1daf181..090e3f8a12 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.37-rc.2" +version = "0.2.37" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index fe79385507..4a550a58a8 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.17-rc.2" +version = "0.1.17" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index ca17d61fbd..4f2270ff37 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.19.0-rc.2" +version = "0.19.0" [features] default = [] @@ -54,11 +54,11 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index b3a61473fa..85619de0b5 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.112.0-rc.2" +version = "0.112.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -52,15 +52,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.30.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } -sn_networking = { path = "../sn_networking", version = "0.19.0-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_networking = { path = "../sn_networking", version = "0.19.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } sysinfo = { version = "0.30.8", default-features = false } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ @@ -83,16 +83,16 @@ strum = { version = "0.26.2", features = ["derive"] } color-eyre = "0.6.2" [dev-dependencies] -evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } -autonomi = { path = "../autonomi", version = "0.2.0-rc.2", features = ["registers"] } +evmlib = { path = "../evmlib", version = "0.1.1" } +autonomi = { path = "../autonomi", version = "0.2.0", features = ["registers"] } reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.20.0", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index e1ce02e47a..4b152994c4 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.11.0-rc.2" +version = "0.11.0" [[bin]] name = "safenode-manager" @@ -46,14 +46,14 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2" } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12" } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } sn-releases = "0.2.6" -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index a618304c8d..8316e1ea87 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.32-rc.2" +version = "0.6.32" [[bin]] name = "safenode_rpc_client" @@ -26,13 +26,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version = "0.54.1", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } -sn_node = { path = "../sn_node", version = "0.112.0-rc.2" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.4.0-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_node = { path = "../sn_node", version = "0.112.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.4.0" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 96726a32fa..c8e46ee8be 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.5.4-rc.2" +version = "0.5.4" [features] local = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version = "0.54.1", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", optional = true} +sn_protocol = { path = "../sn_protocol", version = "0.17.12", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false } tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 1c2c0f36d3..622ed3dd4d 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.12-rc.2" +version = "0.17.12" [features] default = [] @@ -28,10 +28,10 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_build_info = { path = "../sn_build_info", version = "0.1.16-rc.2" } -sn_transfers = { path = "../sn_transfers", version = "0.20.0-rc.2" } -sn_registers = { path = "../sn_registers", version = "0.4.0-rc.2" } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_build_info = { path = "../sn_build_info", version = "0.1.16" } +sn_transfers = { path = "../sn_transfers", version = "0.20.0" } +sn_registers = { path = "../sn_registers", version = "0.4.0" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index b71c4619c8..fd68714064 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0-rc.2" +version = "0.4.0" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 696ac6f652..b0f60bc453 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0-rc.2" +version = "0.4.0" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.37-rc.2" } -sn_protocol = { path = "../sn_protocol", version = "0.17.12-rc.2", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.37" } +sn_protocol = { path = "../sn_protocol", version = "0.17.12", features = [ "rpc", ] } -sn_evm = { path = "../sn_evm", version = "0.1.1-rc.2" } +sn_evm = { path = "../sn_evm", version = "0.1.1" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index da27b4a0f2..cbd6206fba 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.20.0-rc.2" +version = "0.20.0" [features] reward-forward = [] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index cfb3aec6cb..48955e7e8c 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.8-rc.2" +version = "0.4.8" [features] local = ["sn_peers_acquisition/local"] @@ -16,9 +16,9 @@ local = ["sn_peers_acquisition/local"] bytes = { version = "1.0.1", features = ["serde"] } color-eyre = "~0.6.2" dirs-next = "~2.0.0" -evmlib = { path = "../evmlib", version = "0.1.1-rc.2" } +evmlib = { path = "../evmlib", version = "0.1.1" } libp2p = { version = "0.54.1", features = ["identify", "kad"] } rand = "0.8.5" serde = { version = "1.0.133", features = ["derive"] } serde_json = "1.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4-rc.2" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.4" } diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index aa6ec8cdba..22cdd87d1c 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.55-rc.2" +version = "0.1.55" [dependencies]