From c4a85b666f25ff53d70e49191ed4a1aea6a6fb30 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Wed, 16 Oct 2024 15:01:01 +0900 Subject: [PATCH] feat: remove cli/sn_client and faucet API To reintroduce native tokens now, we should do so encapsulated behind the Wallet struct in the autonomi API. This is used buy ndoes for verification, and clients for paying sending etc. This allows us to keep the payment layer an abstraction in that one place (and optinally enact/use certain types of payment or validation layers. This commit removes everything in One Big Go, such that we can easily retrieve the state of the native token APIs (which should work) at this point. (That said, with EVM in, the CI tests were not running here, but it should not be far off working) --- sn_cli/CHANGELOG.md | 3693 ----------------- sn_cli/Cargo.toml | 86 - sn_cli/README.md | 9 - sn_cli/benches/files.rs | 155 - sn_cli/src/acc_packet.rs | 1603 ------- sn_cli/src/acc_packet/change_tracking.rs | 265 -- sn_cli/src/bin/main.rs | 384 -- sn_cli/src/bin/subcommands/files.rs | 262 -- sn_cli/src/bin/subcommands/folders.rs | 220 - sn_cli/src/bin/subcommands/mod.rs | 102 - sn_cli/src/bin/subcommands/register.rs | 213 - sn_cli/src/bin/subcommands/wallet.rs | 207 - sn_cli/src/bin/subcommands/wallet/audit.rs | 220 - sn_cli/src/bin/subcommands/wallet/helpers.rs | 156 - .../src/bin/subcommands/wallet/hot_wallet.rs | 452 -- .../src/bin/subcommands/wallet/wo_wallet.rs | 310 -- sn_cli/src/files.rs | 34 - sn_cli/src/files/chunk_manager.rs | 1045 ----- sn_cli/src/files/download.rs | 187 - sn_cli/src/files/estimate.rs | 83 - sn_cli/src/files/files_uploader.rs | 480 --- sn_cli/src/files/upload.rs | 71 - sn_cli/src/lib.rs | 17 - sn_cli/src/utils.rs | 37 - sn_client/CHANGELOG.md | 2712 ------------ sn_client/Cargo.toml | 90 - sn_client/README.md | 56 - sn_client/src/acc_packet.rs | 74 - sn_client/src/acc_packet/user_secret.rs | 74 - sn_client/src/api.rs | 1234 ------ sn_client/src/audit.rs | 17 - sn_client/src/audit/dag_crawling.rs | 644 --- sn_client/src/audit/dag_error.rs | 75 - sn_client/src/audit/spend_dag.rs | 831 ---- sn_client/src/audit/tests/mod.rs | 478 --- sn_client/src/audit/tests/setup.rs | 147 - sn_client/src/chunks.rs | 13 - sn_client/src/chunks/error.rs | 75 - sn_client/src/chunks/pac_man.rs | 136 - sn_client/src/error.rs | 164 - sn_client/src/event.rs | 67 - sn_client/src/faucet.rs | 147 - sn_client/src/files.rs | 195 - sn_client/src/files/download.rs | 532 --- sn_client/src/folders.rs | 344 -- sn_client/src/lib.rs | 158 - sn_client/src/register.rs | 833 ---- sn_client/src/test_utils.rs | 124 - sn_client/src/uploader/mod.rs | 461 -- sn_client/src/uploader/tests/mod.rs | 459 -- sn_client/src/uploader/tests/setup.rs | 461 -- sn_client/src/uploader/upload.rs | 1084 ----- sn_client/src/wallet.rs | 1175 ------ sn_client/tests/folders_api.rs | 424 -- sn_faucet/CHANGELOG.md | 1355 ------ sn_faucet/Cargo.toml | 58 - sn_faucet/README.md | 11 - sn_faucet/maid_address_claims.csv | 0 sn_faucet/src/faucet_server.rs | 576 --- sn_faucet/src/gutenberger.rs | 68 - sn_faucet/src/main.rs | 311 -- sn_faucet/src/token_distribution.rs | 734 ---- 62 files changed, 26688 deletions(-) delete mode 100644 sn_cli/CHANGELOG.md delete mode 100644 sn_cli/Cargo.toml delete mode 100644 sn_cli/README.md delete mode 100644 sn_cli/benches/files.rs delete mode 100644 sn_cli/src/acc_packet.rs delete mode 100644 sn_cli/src/acc_packet/change_tracking.rs delete mode 100644 sn_cli/src/bin/main.rs delete mode 100644 sn_cli/src/bin/subcommands/files.rs delete mode 100644 sn_cli/src/bin/subcommands/folders.rs delete mode 100644 sn_cli/src/bin/subcommands/mod.rs delete mode 100644 sn_cli/src/bin/subcommands/register.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/audit.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/helpers.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/hot_wallet.rs delete mode 100644 sn_cli/src/bin/subcommands/wallet/wo_wallet.rs delete mode 100644 sn_cli/src/files.rs delete mode 100644 sn_cli/src/files/chunk_manager.rs delete mode 100644 sn_cli/src/files/download.rs delete mode 100644 sn_cli/src/files/estimate.rs delete mode 100644 sn_cli/src/files/files_uploader.rs delete mode 100644 sn_cli/src/files/upload.rs delete mode 100644 sn_cli/src/lib.rs delete mode 100644 sn_cli/src/utils.rs delete mode 100644 sn_client/CHANGELOG.md delete mode 100644 sn_client/Cargo.toml delete mode 100644 sn_client/README.md delete mode 100644 sn_client/src/acc_packet.rs delete mode 100644 sn_client/src/acc_packet/user_secret.rs delete mode 100644 sn_client/src/api.rs delete mode 100644 sn_client/src/audit.rs delete mode 100644 sn_client/src/audit/dag_crawling.rs delete mode 100644 sn_client/src/audit/dag_error.rs delete mode 100644 sn_client/src/audit/spend_dag.rs delete mode 100644 sn_client/src/audit/tests/mod.rs delete mode 100644 sn_client/src/audit/tests/setup.rs delete mode 100644 sn_client/src/chunks.rs delete mode 100644 sn_client/src/chunks/error.rs delete mode 100644 sn_client/src/chunks/pac_man.rs delete mode 100644 sn_client/src/error.rs delete mode 100644 sn_client/src/event.rs delete mode 100644 sn_client/src/faucet.rs delete mode 100644 sn_client/src/files.rs delete mode 100644 sn_client/src/files/download.rs delete mode 100644 sn_client/src/folders.rs delete mode 100644 sn_client/src/lib.rs delete mode 100644 sn_client/src/register.rs delete mode 100644 sn_client/src/test_utils.rs delete mode 100644 sn_client/src/uploader/mod.rs delete mode 100644 sn_client/src/uploader/tests/mod.rs delete mode 100644 sn_client/src/uploader/tests/setup.rs delete mode 100644 sn_client/src/uploader/upload.rs delete mode 100644 sn_client/src/wallet.rs delete mode 100644 sn_client/tests/folders_api.rs delete mode 100644 sn_faucet/CHANGELOG.md delete mode 100644 sn_faucet/Cargo.toml delete mode 100644 sn_faucet/README.md delete mode 100644 sn_faucet/maid_address_claims.csv delete mode 100644 sn_faucet/src/faucet_server.rs delete mode 100644 sn_faucet/src/gutenberger.rs delete mode 100644 sn_faucet/src/main.rs delete mode 100644 sn_faucet/src/token_distribution.rs diff --git a/sn_cli/CHANGELOG.md b/sn_cli/CHANGELOG.md deleted file mode 100644 index ddcfd25b77..0000000000 --- a/sn_cli/CHANGELOG.md +++ /dev/null @@ -1,3693 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.93.6](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.5...sn_cli-v0.93.6) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.93.5](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.4...sn_cli-v0.93.5) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible - -## [0.93.4](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.3...sn_cli-v0.93.4) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.93.3](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.2...sn_cli-v0.93.3) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.93.2](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.1...sn_cli-v0.93.2) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.93.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.93.0...sn_cli-v0.93.1) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.93.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.92.0...sn_cli-v0.93.0) - 2024-06-03 - -### Added -- integrate DAG crawling fixes from Josh and Qi -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- *(cli)* showing cli final execution result explicitly -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.92.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.91.4...sn_cli-v0.92.0) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- pass sk_str via cli opt -- *(audit)* collect payment forward statistics -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- hide genesis keypair -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- audit flags activated independently -- reduce blabber in dot and royalties audit mode -- *(cli)* avoid mis-estimation due to overflow -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- further improve fast mode gathering speed -- improve cli DAG collection -- improve DAG collection perf -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- improve DAG verification redundancy -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- resolve errors after reverts -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- remove now unused mostly duplicated code -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(cli): track spend creation reasons during audit" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.3...sn_cli-v0.91.4) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.2...sn_cli-v0.91.3) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.1...sn_cli-v0.91.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0...sn_cli-v0.91.1) - 2024-05-08 - -### Other -- update Cargo.lock dependencies -- *(release)* sn_registers-v0.3.13 - -## [0.91.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.91.0-alpha.5...sn_cli-v0.91.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(node)* make spend and cash_note reason field configurable -- *(cli)* readd wallet helper address for dist feat -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(cli)* eip2333 helpers for accounts -- [**breaking**] renamings in CashNote -- [**breaking**] rename token to amount in Spend -- *(cli)* implement FilesUploadStatusNotifier trait for lib code -- *(cli)* return the files upload summary after a successful files upload -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(cli)* track spend creation reasons during audit -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- *(faucet)* log from sn_client -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(transfers)* do not genereate wallet by default -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- *(cli)* acct_packet tests updated -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* move acct_packet mnemonic into client layer -- *(client)* ensure we have a wallet or generate one via mnemonic -- create faucet via account load or generation -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- addres review comments -- *(cli)* update mnemonic wallet seed phrase wording -- *(CI)* upload faucet log during CI -- remove deprecated wallet deposit cmd -- fix typo for issue 1494 -- *(cli)* make FilesUploadSummary public -- *(deps)* bump dependencies -- check DAG crawling performance -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.90.1...sn_cli-v0.90.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.90.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.90.0...sn_cli-v0.90.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.90.0](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.85...sn_cli-v0.90.0) - 2024-03-27 - -### Added -- *(cli)* expose AccountPacket APIs from a lib so it can be used by other apps -- *(uploader)* collect all the uploaded registers -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- make logging simpler to use -- [**breaking**] remove gossip code -- svg caching, fault tolerance during DAG collection -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* use ClientRegister instead of Registers -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(cli)* files should be chunked before checking if the chunks are empty -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(cli)* moving binary target related files onto src/bin dir -- *(uploader)* remove FilesApi dependency -- *(uploader)* implement UploaderInterface for easier testing -- rename of function to be more descriptive -- remove counter run through several functions and replace with simple counter -- *(register)* minor simplification in Register Permissions implementation -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* initial test setup for uploader -- *(uploader)* remove failed_to states -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.89.85](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.84...sn_cli-v0.89.85) - 2024-03-21 - -### Added -- *(cli)* have CLI folders cmds to act on current directory by default -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(log)* set log levels on the fly -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(cli)* adding some high-level doc to acc-packet codebase -- *(node)* reduce bad_nodes check resource usage - -## [0.89.84](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.83...sn_cli-v0.89.84) - 2024-03-18 - -### Other -- *(acc-packet)* adding test for acc-packet moved to a different location on disk -- *(acc-packet)* adding unit test for acc-packet changes scanning logic -- *(acc-packet)* adding unit test to private methods/helpers -- *(cli)* breaking up acc-packet logic within its own mod -- name change to spawn events handler -- increase of text length -- iterate upload code rearranged for clear readability - -## [0.89.83](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82...sn_cli-v0.89.83) - 2024-03-14 - -### Added -- self in import change -- moved param to outside calc -- refactor spend validation - -### Fixed -- *(cli)* allow to upload chunks from acc-packet using chunked files local cache -- *(cli)* use chunk-mgr with iterator skipping tracking info files - -### Other -- *(acc-packet)* adding verifications to compare tracking info generated on acc-packets cloned -- *(acc-packet)* adding verifications to compare the files/dirs stored on acc-packets cloned -- *(acc-packet)* testing sync empty root dirs -- *(acc-packet)* testing mutations syncing across clones of an acc-packet -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- *(cli)* chunk-mgr to report files chunked/uploaded rather than bailing out -- improve code quality -- new `sn_service_management` crate - -## [0.89.82-alpha.1](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.82-alpha.0...sn_cli-v0.89.82-alpha.1) - 2024-03-08 - -### Added -- reference checks -- reference checks -- builder added to estimate -- removal of unnecessary code in upload rs -- remove all use of client in iter uploader - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.89.81](https://github.com/joshuef/safe_network/compare/sn_cli-v0.89.80...sn_cli-v0.89.81) - 2024-03-06 - -### Added -- *(cli)* cmd to initialise a directory as root Folder for storing and syncing on/with network -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(cli)* files download respects filename path -- *(folders)* make payments for local mutations detected before syncing -- *(folders)* build mutations report to be used by status and sync apis -- *(folders)* sync up logic and CLI cmd -- impl iterate uploader self to extract spawn theads -- impl iterate uploader self to extract spawn theads -- elevate files api and cm -- refactor upload with iter -- a more clear param for a message function -- split upload and upload with iter -- removal of some messages from vody body -- batch royalties redemption -- collect royalties through DAG -- *(folders)* avoid chunking files when retrieving them with Folders from the network -- *(folders)* store files data-map within Folders metadata chunk -- file to download -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders - -### Fixed -- *(folders)* set correct change state to folders when scanning -- *(folders)* keep track of root folder sync status - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(cli)* minor improvements to acc-packet codebase comments -- rename to iterative upload -- rename to iterative upload -- *(folders)* some simplifications to acc-packet codebase -- *(folders)* minor improvements to folders status report - -## [0.89.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.79...sn_cli-v0.89.80) - 2024-02-23 - -### Added -- file to upload -- estimate refactor - -## [0.89.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.78...sn_cli-v0.89.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.89.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.77...sn_cli-v0.89.78) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.76...sn_cli-v0.89.77) - 2024-02-20 - -### Added -- dependency reconfiguration -- nano to snt -- concurrent estimate without error messages -- make data public bool -- removal of the retry strategy -- estimate feature with ci and balance after with fn docs - -## [0.89.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.75...sn_cli-v0.89.76) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.89.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.74...sn_cli-v0.89.75) - 2024-02-20 - -### Added -- spend and DAG utilities - -## [0.89.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.73...sn_cli-v0.89.74) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.89.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.72...sn_cli-v0.89.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_registers - -## [0.89.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.71...sn_cli-v0.89.72) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.89.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.70...sn_cli-v0.89.71) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.89.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.69...sn_cli-v0.89.70) - 2024-02-19 - -### Other -- *(cli)* allow to pass files iterator to chunk-mgr and files-upload tools - -## [0.89.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.68...sn_cli-v0.89.69) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(release)* sn_networking-v0.13.19/sn_faucet-v0.3.67/sn_client-v0.104.14/sn_node-v0.104.22 - -## [0.89.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.67...sn_cli-v0.89.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.66...sn_cli-v0.89.67) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.65...sn_cli-v0.89.66) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.89.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.64...sn_cli-v0.89.65) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol, sn_protocol - -## [0.89.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.63...sn_cli-v0.89.64) - 2024-02-13 - -### Added -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.89.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.62...sn_cli-v0.89.63) - 2024-02-12 - -### Other -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.89.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.61...sn_cli-v0.89.62) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review -- *(cli)* adding simple example doc for using Folders cmd -- *(cli)* moving some Folder logic to a private helper function - -## [0.89.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.60...sn_cli-v0.89.61) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.89.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.59...sn_cli-v0.89.60) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.89.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.58...sn_cli-v0.89.59) - 2024-02-09 - -### Other -- update dependencies - -## [0.89.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.57...sn_cli-v0.89.58) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.89.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.56...sn_cli-v0.89.57) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.55...sn_cli-v0.89.56) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download - -### Fixed -- *(bench)* update retry strategy args - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.89.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.54...sn_cli-v0.89.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.53...sn_cli-v0.89.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.52...sn_cli-v0.89.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.51...sn_cli-v0.89.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.89.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.50...sn_cli-v0.89.51) - 2024-02-07 - -### Other -- update dependencies - -## [0.89.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.49...sn_cli-v0.89.50) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.89.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.48...sn_cli-v0.89.49) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.47...sn_cli-v0.89.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.46...sn_cli-v0.89.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.89.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.45...sn_cli-v0.89.46) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.44...sn_cli-v0.89.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.43...sn_cli-v0.89.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.42...sn_cli-v0.89.43) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.41...sn_cli-v0.89.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.40...sn_cli-v0.89.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.89.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.39...sn_cli-v0.89.40) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.38...sn_cli-v0.89.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.37...sn_cli-v0.89.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.89.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.36...sn_cli-v0.89.37) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.35...sn_cli-v0.89.36) - 2024-02-01 - -### Fixed -- *(cli)* move UploadedFiles creation logic from ChunkManager -- *(cli)* chunk manager to return error if fs operation fails - -### Other -- *(cli)* use 'completed' files everywhere - -## [0.89.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.34...sn_cli-v0.89.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.89.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.33...sn_cli-v0.89.34) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.32...sn_cli-v0.89.33) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.31...sn_cli-v0.89.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.89.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.30...sn_cli-v0.89.31) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.29...sn_cli-v0.89.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.28...sn_cli-v0.89.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.27...sn_cli-v0.89.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.26...sn_cli-v0.89.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.89.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.25...sn_cli-v0.89.26) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.24...sn_cli-v0.89.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.23...sn_cli-v0.89.24) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.22...sn_cli-v0.89.23) - 2024-01-29 - -### Other -- *(cli)* moving wallet mod into its own mod folder - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.21...sn_cli-v0.89.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.20...sn_cli-v0.89.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.19...sn_cli-v0.89.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.18...sn_cli-v0.89.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.17...sn_cli-v0.89.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.16...sn_cli-v0.89.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.15...sn_cli-v0.89.16) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.14...sn_cli-v0.89.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.13...sn_cli-v0.89.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.12...sn_cli-v0.89.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.11...sn_cli-v0.89.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.10...sn_cli-v0.89.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.9...sn_cli-v0.89.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.8...sn_cli-v0.89.9) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.7...sn_cli-v0.89.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.6...sn_cli-v0.89.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.5...sn_cli-v0.89.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.4...sn_cli-v0.89.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.3...sn_cli-v0.89.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.2...sn_cli-v0.89.3) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.1...sn_cli-v0.89.2) - 2024-01-18 - -### Other -- update dependencies - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.89.0...sn_cli-v0.89.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.22...sn_cli-v0.89.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.88.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.21...sn_cli-v0.88.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.88.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.20...sn_cli-v0.88.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.19...sn_cli-v0.88.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.18...sn_cli-v0.88.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.17...sn_cli-v0.88.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.88.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.16...sn_cli-v0.88.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.15...sn_cli-v0.88.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.14...sn_cli-v0.88.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.13...sn_cli-v0.88.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.12...sn_cli-v0.88.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.11...sn_cli-v0.88.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.10...sn_cli-v0.88.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.9...sn_cli-v0.88.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.8...sn_cli-v0.88.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.7...sn_cli-v0.88.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.6...sn_cli-v0.88.7) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline -- *(cli)* new cmd to sign a transaction offline -- *(cli)* new wallet cmd to create a unsigned transaction to be used for offline signing - -### Other -- *(transfers)* solving clippy issues about complex fn args - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.5...sn_cli-v0.88.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.4...sn_cli-v0.88.5) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.3...sn_cli-v0.88.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.2...sn_cli-v0.88.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.1...sn_cli-v0.88.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.88.0...sn_cli-v0.88.1) - 2024-01-09 - -### Added -- *(cli)* safe wallet create saves new key - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.87.0...sn_cli-v0.88.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.103...sn_cli-v0.87.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.86.103](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.102...sn_cli-v0.86.103) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.102](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.101...sn_cli-v0.86.102) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.86.101](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.100...sn_cli-v0.86.101) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.100](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.99...sn_cli-v0.86.100) - 2024-01-08 - -### Other -- update dependencies - -## [0.86.99](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.98...sn_cli-v0.86.99) - 2024-01-06 - -### Fixed -- *(cli)* read datamap when the xor addr of the file is provided - -## [0.86.98](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.97...sn_cli-v0.86.98) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.97](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.96...sn_cli-v0.86.97) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.86.96](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.95...sn_cli-v0.86.96) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.95](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.94...sn_cli-v0.86.95) - 2024-01-05 - -### Added -- *(cli)* store uploaded file metadata - -## [0.86.94](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.93...sn_cli-v0.86.94) - 2024-01-05 - -### Other -- *(cli)* error if there is no file to upload - -## [0.86.93](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.92...sn_cli-v0.86.93) - 2024-01-05 - -### Other -- update dependencies - -## [0.86.92](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.91...sn_cli-v0.86.92) - 2024-01-04 - -### Other -- update dependencies - -## [0.86.91](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.90...sn_cli-v0.86.91) - 2024-01-04 - -### Other -- *(cli)* print private data warning -- *(cli)* print the datamap's entire hex addr during first attempt - -## [0.86.90](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.89...sn_cli-v0.86.90) - 2024-01-03 - -### Other -- *(cli)* print the datamap's entire hex addr - -## [0.86.89](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.88...sn_cli-v0.86.89) - 2024-01-03 - -### Added -- *(cli)* keep downloaded files in a safe subdir -- *(client)* clients no longer upload data_map by default - -### Fixed -- *(cli)* write datamap to metadata - -### Other -- clippy test fixes and updates -- *(cli)* add not to non-public uploaded files -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.86.88](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.87...sn_cli-v0.86.88) - 2024-01-03 - -### Other -- update dependencies - -## [0.86.87](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.86...sn_cli-v0.86.87) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.86](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.85...sn_cli-v0.86.86) - 2024-01-02 - -### Other -- update dependencies - -## [0.86.85](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.84...sn_cli-v0.86.85) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.84](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.83...sn_cli-v0.86.84) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.83](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.82...sn_cli-v0.86.83) - 2023-12-29 - -### Other -- update dependencies - -## [0.86.82](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.81...sn_cli-v0.86.82) - 2023-12-26 - -### Other -- update dependencies - -## [0.86.81](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.80...sn_cli-v0.86.81) - 2023-12-22 - -### Other -- update dependencies - -## [0.86.80](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.79...sn_cli-v0.86.80) - 2023-12-22 - -### Fixed -- printout un-verified files to alert user - -## [0.86.79](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.78...sn_cli-v0.86.79) - 2023-12-21 - -### Other -- log full Register address when created in cli and example app - -## [0.86.78](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.77...sn_cli-v0.86.78) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.86.77](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.76...sn_cli-v0.86.77) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.86.76](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.75...sn_cli-v0.86.76) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.86.75](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.74...sn_cli-v0.86.75) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.74](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.73...sn_cli-v0.86.74) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.73](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.72...sn_cli-v0.86.73) - 2023-12-19 - -### Other -- update dependencies - -## [0.86.72](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.71...sn_cli-v0.86.72) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.86.71](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.70...sn_cli-v0.86.71) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.70](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.69...sn_cli-v0.86.70) - 2023-12-18 - -### Added -- *(cli)* random shuffle upload chunks to allow clients co-operation - -## [0.86.69](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.68...sn_cli-v0.86.69) - 2023-12-18 - -### Other -- update dependencies - -## [0.86.68](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.67...sn_cli-v0.86.68) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Other -- *(client)* add docs to the Files struct -- *(cli)* use the new client Files api to upload chunks - -## [0.86.67](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.66...sn_cli-v0.86.67) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.66](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.65...sn_cli-v0.86.66) - 2023-12-14 - -### Other -- update dependencies - -## [0.86.65](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.64...sn_cli-v0.86.65) - 2023-12-14 - -### Other -- *(cli)* make upload summary printout clearer - -## [0.86.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.63...sn_cli-v0.86.64) - 2023-12-14 - -### Other -- *(cli)* make sequential payment fail limit a const - -## [0.86.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.62...sn_cli-v0.86.63) - 2023-12-14 - -### Other -- *(cli)* make wallet address easy to copy -- *(cli)* peer list is not printed to stdout - -## [0.86.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.61...sn_cli-v0.86.62) - 2023-12-14 - -### Added -- *(cli)* cli arg for controlling chunk retries -- *(cli)* simple retry mechanism for remaining chunks - -### Other -- prevent retries on ci runs w/ '-r 0' - -## [0.86.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.60...sn_cli-v0.86.61) - 2023-12-13 - -### Other -- *(cli)* refactor upload_files - -## [0.86.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.59...sn_cli-v0.86.60) - 2023-12-13 - -### Other -- update dependencies - -## [0.86.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.58...sn_cli-v0.86.59) - 2023-12-13 - -### Added -- *(cli)* download path is familiar to users - -## [0.86.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.57...sn_cli-v0.86.58) - 2023-12-13 - -### Added -- audit DAG collection and visualization -- cli double spends audit from genesis - -## [0.86.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.56...sn_cli-v0.86.57) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.55...sn_cli-v0.86.56) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.86.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.54...sn_cli-v0.86.55) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.53...sn_cli-v0.86.54) - 2023-12-12 - -### Added -- constant uploading across batches - -### Fixed -- *(cli)* remove chunk_manager clone that is unsafe - -### Other -- *(networking)* add replication logs -- *(networking)* solidify REPLICATION_RANGE use. exclude self_peer_id in some calcs -- *(cli)* bail early on any payment errors -- *(cli)* only report uploaded files if no errors - -## [0.86.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.52...sn_cli-v0.86.53) - 2023-12-12 - -### Other -- update dependencies - -## [0.86.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.51...sn_cli-v0.86.52) - 2023-12-11 - -### Other -- update dependencies - -## [0.86.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.50...sn_cli-v0.86.51) - 2023-12-11 - -### Other -- *(cli)* ux improvements after upload completes - -## [0.86.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.49...sn_cli-v0.86.50) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.48...sn_cli-v0.86.49) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.47...sn_cli-v0.86.48) - 2023-12-08 - -### Other -- update dependencies - -## [0.86.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.46...sn_cli-v0.86.47) - 2023-12-07 - -### Other -- update dependencies - -## [0.86.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.45...sn_cli-v0.86.46) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.44...sn_cli-v0.86.45) - 2023-12-06 - -### Other -- update dependencies - -## [0.86.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.43...sn_cli-v0.86.44) - 2023-12-06 - -### Added -- *(cli)* enable gossipsub for client when wallet cmd requires it -- *(wallet)* basic impl of a watch-only wallet API - -### Other -- *(wallet)* major refactoring removing redundant and unused code -- *(cli)* Fix duplicate use of 'n' short flag -- *(cli)* All --name flags have short 'n' flag - -## [0.86.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.42...sn_cli-v0.86.43) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.86.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.41...sn_cli-v0.86.42) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.40...sn_cli-v0.86.41) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.39...sn_cli-v0.86.40) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.38...sn_cli-v0.86.39) - 2023-12-05 - -### Other -- update dependencies - -## [0.86.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.37...sn_cli-v0.86.38) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.86.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.36...sn_cli-v0.86.37) - 2023-12-05 - -### Other -- *(cli)* print the failed uploads stats -- *(cli)* remove unpaid/paid distinction from chunk manager - -## [0.86.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.35...sn_cli-v0.86.36) - 2023-12-05 - -### Other -- *(networking)* remove triggered bootstrap slowdown - -## [0.86.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.34...sn_cli-v0.86.35) - 2023-12-04 - -### Other -- update dependencies - -## [0.86.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.33...sn_cli-v0.86.34) - 2023-12-01 - -### Other -- *(ci)* fix CI build cache parsing error - -## [0.86.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.32...sn_cli-v0.86.33) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.31...sn_cli-v0.86.32) - 2023-11-29 - -### Added -- most of nodes not subscribe to royalty_transfer topic - -## [0.86.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.30...sn_cli-v0.86.31) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.29...sn_cli-v0.86.30) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.28...sn_cli-v0.86.29) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.27...sn_cli-v0.86.28) - 2023-11-29 - -### Other -- update dependencies - -## [0.86.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.26...sn_cli-v0.86.27) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.86.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.25...sn_cli-v0.86.26) - 2023-11-28 - -### Added -- *(cli)* serialise chunks metadata on disk with MsgPack instead of bincode -- *(royalties)* serialise royalties notifs with MsgPack instead of bincode - -## [0.86.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.24...sn_cli-v0.86.25) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.23...sn_cli-v0.86.24) - 2023-11-28 - -### Other -- update dependencies - -## [0.86.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.22...sn_cli-v0.86.23) - 2023-11-27 - -### Other -- update dependencies - -## [0.86.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.21...sn_cli-v0.86.22) - 2023-11-24 - -### Added -- *(cli)* peers displayed as list - -## [0.86.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.20...sn_cli-v0.86.21) - 2023-11-24 - -### Other -- update dependencies - -## [0.86.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.19...sn_cli-v0.86.20) - 2023-11-23 - -### Added -- record put retry even when not verifying -- retry at the record level, remove all other retries, report errors - -### Other -- appease clippy -- fix tests compilation - -## [0.86.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.18...sn_cli-v0.86.19) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.17...sn_cli-v0.86.18) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.16...sn_cli-v0.86.17) - 2023-11-23 - -### Other -- update dependencies - -## [0.86.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.15...sn_cli-v0.86.16) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.14...sn_cli-v0.86.15) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.86.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.13...sn_cli-v0.86.14) - 2023-11-22 - -### Other -- update dependencies - -## [0.86.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.12...sn_cli-v0.86.13) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.86.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.11...sn_cli-v0.86.12) - 2023-11-21 - -### Other -- update dependencies - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.10...sn_cli-v0.86.11) - 2023-11-20 - -### Other -- increase default batch size - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.9...sn_cli-v0.86.10) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.8...sn_cli-v0.86.9) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.7...sn_cli-v0.86.8) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.6...sn_cli-v0.86.7) - 2023-11-20 - -### Other -- update dependencies - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.5...sn_cli-v0.86.6) - 2023-11-20 - -### Fixed -- use actual quote instead of dummy - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.4...sn_cli-v0.86.5) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.3...sn_cli-v0.86.4) - 2023-11-17 - -### Other -- update dependencies - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.2...sn_cli-v0.86.3) - 2023-11-16 - -### Other -- update dependencies - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.1...sn_cli-v0.86.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.86.0...sn_cli-v0.86.1) - 2023-11-15 - -### Other -- update dependencies - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.20...sn_cli-v0.86.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.19...sn_cli-v0.85.20) - 2023-11-15 - -### Added -- *(royalties)* make royalties payment to be 15% of the total storage cost -- *(protocol)* move test utils behind a feature gate - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.18...sn_cli-v0.85.19) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.17...sn_cli-v0.85.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.16...sn_cli-v0.85.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.15...sn_cli-v0.85.16) - 2023-11-14 - -### Fixed -- *(cli)* marking chunks as verified should mark them as paid too - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.14...sn_cli-v0.85.15) - 2023-11-14 - -### Fixed -- *(cli)* repay unpaid chunks due to transfer failures - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.13...sn_cli-v0.85.14) - 2023-11-13 - -### Fixed -- *(cli)* failed to move chunk path shall not get deleted - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.12...sn_cli-v0.85.13) - 2023-11-13 - -### Fixed -- avoid infinite looping on verification during upload - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.11...sn_cli-v0.85.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.10...sn_cli-v0.85.11) - 2023-11-13 - -### Other -- *(cli)* disable silent ignoring of wallet errors - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.9...sn_cli-v0.85.10) - 2023-11-10 - -### Added -- *(cli)* attempt to reload wallet from disk if storing it fails when receiving transfers online -- *(cli)* new cmd to listen to royalties payments and deposit them into a local wallet - -### Other -- *(cli)* minor improvement to help docs - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.8...sn_cli-v0.85.9) - 2023-11-10 - -### Other -- update dependencies - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.7...sn_cli-v0.85.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.6...sn_cli-v0.85.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.5...sn_cli-v0.85.6) - 2023-11-09 - -### Added -- increase retry count for chunk put -- chunk put retry taking repayment into account - -### Other -- const instead of magic num in code for wait time -- please ci - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.4...sn_cli-v0.85.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.3...sn_cli-v0.85.4) - 2023-11-08 - -### Fixed -- *(bench)* update benchmark to account for de duplicated files - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.2...sn_cli-v0.85.3) - 2023-11-08 - -### Other -- update dependencies - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.1...sn_cli-v0.85.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.85.0...sn_cli-v0.85.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.85.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.51...sn_cli-v0.85.0) - 2023-11-07 - -### Added -- *(cli)* store paid and upaid chunks separately -- *(cli)* use ChunkManager during the upload process -- *(cli)* implement ChunkManager to re-use already chunked files - -### Fixed -- *(cli)* keep track of files that have been completely uploaded -- *(cli)* get bytes from OsStr by first converting it into lossy string -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function -- *(upload)* don't ignore file if filename cannot be converted from OsString to String - -### Other -- rename test function and spell correction -- *(cli)* add more tests to chunk manager for unpaid paid dir refactor -- *(cli)* add some docs to ChunkManager -- *(cli)* add tests for `ChunkManager` -- *(cli)* move chunk management to its own module - -## [0.84.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.50...sn_cli-v0.84.51) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.49...sn_cli-v0.84.50) - 2023-11-07 - -### Other -- update dependencies - -## [0.84.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.48...sn_cli-v0.84.49) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.47...sn_cli-v0.84.48) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.46...sn_cli-v0.84.47) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.45...sn_cli-v0.84.46) - 2023-11-06 - -### Other -- update dependencies - -## [0.84.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.44...sn_cli-v0.84.45) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.84.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.43...sn_cli-v0.84.44) - 2023-11-03 - -### Other -- *(cli)* make file upload output cut n paste friendly - -## [0.84.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.42...sn_cli-v0.84.43) - 2023-11-03 - -### Other -- update dependencies - -## [0.84.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.41...sn_cli-v0.84.42) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.40...sn_cli-v0.84.41) - 2023-11-02 - -### Other -- update dependencies - -## [0.84.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.39...sn_cli-v0.84.40) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.38...sn_cli-v0.84.39) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.37...sn_cli-v0.84.38) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.36...sn_cli-v0.84.37) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.35...sn_cli-v0.84.36) - 2023-11-01 - -### Other -- update dependencies - -## [0.84.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.34...sn_cli-v0.84.35) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.33...sn_cli-v0.84.34) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.32...sn_cli-v0.84.33) - 2023-10-31 - -### Other -- update dependencies - -## [0.84.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.31...sn_cli-v0.84.32) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.30...sn_cli-v0.84.31) - 2023-10-30 - -### Added -- *(cli)* error out if empty wallet -- *(cli)* error out if we do not have enough balance - -## [0.84.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.29...sn_cli-v0.84.30) - 2023-10-30 - -### Other -- update dependencies - -## [0.84.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.28...sn_cli-v0.84.29) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(release)* sn_client-v0.95.11/sn_protocol-v0.8.7/sn_transfers-v0.14.8/sn_networking-v0.9.10 - -## [0.84.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.27...sn_cli-v0.84.28) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.26...sn_cli-v0.84.27) - 2023-10-27 - -### Other -- update dependencies - -## [0.84.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.25...sn_cli-v0.84.26) - 2023-10-27 - -### Added -- *(cli)* verify as we upload when 1 batch - -## [0.84.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.24...sn_cli-v0.84.25) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.23...sn_cli-v0.84.24) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.22...sn_cli-v0.84.23) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.21...sn_cli-v0.84.22) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.20...sn_cli-v0.84.21) - 2023-10-26 - -### Other -- update dependencies - -## [0.84.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.19...sn_cli-v0.84.20) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -### Fixed -- *(cli)* remove Arc from ProgressBar as it is Arc internally - -### Other -- *(cli)* add logs to indicate the time spent on chunking the files - -## [0.84.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.18...sn_cli-v0.84.19) - 2023-10-24 - -### Added -- *(cli)* wallet deposit cmd with no arg was not reading cash notes from disk -- *(cli)* new wallet create cmd allowing users to create a wallet from a given secret key - -## [0.84.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.17...sn_cli-v0.84.18) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.16...sn_cli-v0.84.17) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.15...sn_cli-v0.84.16) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.14...sn_cli-v0.84.15) - 2023-10-24 - -### Added -- *(log)* use LogBuilder to initialize logging - -### Other -- *(client)* log and wait tweaks - -## [0.84.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.13...sn_cli-v0.84.14) - 2023-10-24 - -### Other -- update dependencies - -## [0.84.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.12...sn_cli-v0.84.13) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.11...sn_cli-v0.84.12) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.10...sn_cli-v0.84.11) - 2023-10-23 - -### Fixed -- *(cli)* don't bail if a payment was not found during verify/repayment - -## [0.84.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.9...sn_cli-v0.84.10) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.84.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.8...sn_cli-v0.84.9) - 2023-10-23 - -### Other -- update dependencies - -## [0.84.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.7...sn_cli-v0.84.8) - 2023-10-22 - -### Other -- update dependencies - -## [0.84.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.6...sn_cli-v0.84.7) - 2023-10-21 - -### Other -- update dependencies - -## [0.84.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.5...sn_cli-v0.84.6) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.4...sn_cli-v0.84.5) - 2023-10-20 - -### Other -- update dependencies - -## [0.84.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.3...sn_cli-v0.84.4) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.2...sn_cli-v0.84.3) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.1...sn_cli-v0.84.2) - 2023-10-19 - -### Other -- update dependencies - -## [0.84.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.84.0...sn_cli-v0.84.1) - 2023-10-18 - -### Other -- update dependencies - -## [0.84.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.52...sn_cli-v0.84.0) - 2023-10-18 - -### Added -- *(client)* verify register uploads and retry and repay if failed - -### Other -- *(client)* always validate storage payments -- repay for data in node rewards tests - -## [0.83.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.51...sn_cli-v0.83.52) - 2023-10-18 - -### Other -- update dependencies - -## [0.83.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.50...sn_cli-v0.83.51) - 2023-10-17 - -### Other -- update dependencies - -## [0.83.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.49...sn_cli-v0.83.50) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.48...sn_cli-v0.83.49) - 2023-10-16 - -### Other -- update dependencies - -## [0.83.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.47...sn_cli-v0.83.48) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.46...sn_cli-v0.83.47) - 2023-10-13 - -### Other -- update dependencies - -## [0.83.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.45...sn_cli-v0.83.46) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.44...sn_cli-v0.83.45) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.43...sn_cli-v0.83.44) - 2023-10-12 - -### Other -- update dependencies - -## [0.83.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.42...sn_cli-v0.83.43) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(docs)* cleanup comments and docs - -## [0.83.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.41...sn_cli-v0.83.42) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.40...sn_cli-v0.83.41) - 2023-10-11 - -### Fixed -- make client handle payment error - -## [0.83.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.39...sn_cli-v0.83.40) - 2023-10-11 - -### Added -- showing expected holders to CLI when required - -## [0.83.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.38...sn_cli-v0.83.39) - 2023-10-11 - -### Other -- update dependencies - -## [0.83.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.37...sn_cli-v0.83.38) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.83.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.36...sn_cli-v0.83.37) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.35...sn_cli-v0.83.36) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.34...sn_cli-v0.83.35) - 2023-10-10 - -### Other -- update dependencies - -## [0.83.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.33...sn_cli-v0.83.34) - 2023-10-09 - -### Other -- update dependencies - -## [0.83.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.32...sn_cli-v0.83.33) - 2023-10-09 - -### Added -- ensure temp SE chunks got cleaned after uploading - -## [0.83.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.31...sn_cli-v0.83.32) - 2023-10-08 - -### Other -- update dependencies - -## [0.83.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.30...sn_cli-v0.83.31) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- remove deposit vs received cashnote disctinction - -## [0.83.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.29...sn_cli-v0.83.30) - 2023-10-06 - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.83.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.28...sn_cli-v0.83.29) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.27...sn_cli-v0.83.28) - 2023-10-06 - -### Other -- update dependencies - -## [0.83.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.26...sn_cli-v0.83.27) - 2023-10-05 - -### Added -- *(metrics)* enable node monitoring through dockerized grafana instance - -## [0.83.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.25...sn_cli-v0.83.26) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -### Fixed -- *(client)* remove concurrency limitations - -## [0.83.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.24...sn_cli-v0.83.25) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.83.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.23...sn_cli-v0.83.24) - 2023-10-05 - -### Fixed -- use specific verify func for chunk stored verification - -## [0.83.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.22...sn_cli-v0.83.23) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- use one files api and clarify variable names -- pay_for_chunks returns cost and new balance - -## [0.83.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.21...sn_cli-v0.83.22) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.20...sn_cli-v0.83.21) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.19...sn_cli-v0.83.20) - 2023-10-04 - -### Added -- *(client)* log the command invoked for safe - -## [0.83.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.18...sn_cli-v0.83.19) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.17...sn_cli-v0.83.18) - 2023-10-04 - -### Other -- update dependencies - -## [0.83.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.16...sn_cli-v0.83.17) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.15...sn_cli-v0.83.16) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.14...sn_cli-v0.83.15) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.13...sn_cli-v0.83.14) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.12...sn_cli-v0.83.13) - 2023-10-03 - -### Other -- update dependencies - -## [0.83.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.11...sn_cli-v0.83.12) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.10...sn_cli-v0.83.11) - 2023-10-02 - -### Added -- add read transfer from file option -- faucet using transfers instead of sending raw cashnotes - -### Other -- trim transfer hex nl and spaces -- add some more error info printing - -## [0.83.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.9...sn_cli-v0.83.10) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.8...sn_cli-v0.83.9) - 2023-10-02 - -### Added -- *(client)* show feedback on long wait for costs - -## [0.83.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.7...sn_cli-v0.83.8) - 2023-10-02 - -### Other -- update dependencies - -## [0.83.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.6...sn_cli-v0.83.7) - 2023-09-29 - -### Other -- update dependencies - -## [0.83.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.5...sn_cli-v0.83.6) - 2023-09-29 - -### Fixed -- *(cli)* dont bail on errors during repay/upload - -## [0.83.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.4...sn_cli-v0.83.5) - 2023-09-29 - -### Fixed -- *(client)* just skip empty files - -## [0.83.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.3...sn_cli-v0.83.4) - 2023-09-28 - -### Added -- client to client transfers - -## [0.83.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.2...sn_cli-v0.83.3) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps - -## [0.83.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.1...sn_cli-v0.83.2) - 2023-09-27 - -### Other -- update dependencies - -## [0.83.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.83.0...sn_cli-v0.83.1) - 2023-09-27 - -### Added -- *(logging)* set default log levels to be more verbose -- *(logging)* set default logging to data-dir - -### Other -- *(client)* add timestamp to client log path - -## [0.83.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.8...sn_cli-v0.83.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.82.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.7...sn_cli-v0.82.8) - 2023-09-26 - -### Other -- update dependencies - -## [0.82.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.6...sn_cli-v0.82.7) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.82.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.5...sn_cli-v0.82.6) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.4...sn_cli-v0.82.5) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.3...sn_cli-v0.82.4) - 2023-09-25 - -### Added -- *(cli)* wrap repayment error for clarity - -## [0.82.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.2...sn_cli-v0.82.3) - 2023-09-25 - -### Added -- *(peers)* use a common way to bootstrap into the network for all the bins -- *(cli)* fetch network contacts for the provided network name -- *(cli)* fetch bootstrap peers from network contacts - -### Other -- more logs around parsing network-contacts -- *(cli)* feature gate network contacts and fetch from URL - -## [0.82.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.1...sn_cli-v0.82.2) - 2023-09-25 - -### Other -- update dependencies - -## [0.82.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.82.0...sn_cli-v0.82.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.82.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.64...sn_cli-v0.82.0) - 2023-09-22 - -### Added -- *(cli)* deps update and arbitrary change for cli - -## [0.81.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.63...sn_cli-v0.81.64) - 2023-09-21 - -### Added -- provide a `files ls` command - -### Other -- *(release)* sn_client-v0.89.22 -- store uploaded files list as text -- clarify `files download` usage -- output address of uploaded file - -## [0.81.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.62...sn_cli-v0.81.63) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.61...sn_cli-v0.81.62) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.60...sn_cli-v0.81.61) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.59...sn_cli-v0.81.60) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.58...sn_cli-v0.81.59) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.57...sn_cli-v0.81.58) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.56...sn_cli-v0.81.57) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.55...sn_cli-v0.81.56) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.54...sn_cli-v0.81.55) - 2023-09-20 - -### Fixed -- make clearer cli send asks for whole token amounts, not nanos - -## [0.81.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.53...sn_cli-v0.81.54) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.52...sn_cli-v0.81.53) - 2023-09-20 - -### Other -- update dependencies - -## [0.81.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.51...sn_cli-v0.81.52) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.50...sn_cli-v0.81.51) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.49...sn_cli-v0.81.50) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.81.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.48...sn_cli-v0.81.49) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.47...sn_cli-v0.81.48) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.46...sn_cli-v0.81.47) - 2023-09-19 - -### Other -- update dependencies - -## [0.81.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.45...sn_cli-v0.81.46) - 2023-09-18 - -### Fixed -- avoid verification too close to put; remove un-necessary wait for put - -## [0.81.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.44...sn_cli-v0.81.45) - 2023-09-18 - -### Other -- some cleanups within the upload procedure - -## [0.81.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.43...sn_cli-v0.81.44) - 2023-09-18 - -### Other -- update dependencies - -## [0.81.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.42...sn_cli-v0.81.43) - 2023-09-18 - -### Fixed -- *(cli)* repay and upload after verifying all the chunks - -### Other -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.81.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.41...sn_cli-v0.81.42) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(cli)* move 'chunk_path' to files.rs -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.81.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.40...sn_cli-v0.81.41) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.39...sn_cli-v0.81.40) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.38...sn_cli-v0.81.39) - 2023-09-15 - -### Other -- update dependencies - -## [0.81.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.37...sn_cli-v0.81.38) - 2023-09-14 - -### Other -- update dependencies - -## [0.81.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.36...sn_cli-v0.81.37) - 2023-09-14 - -### Added -- expose batch_size to cli -- split upload procedure into batches - -## [0.81.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.35...sn_cli-v0.81.36) - 2023-09-14 - -### Other -- *(metrics)* rename feature flag and small fixes - -## [0.81.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.34...sn_cli-v0.81.35) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -## [0.81.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.33...sn_cli-v0.81.34) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.81.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.32...sn_cli-v0.81.33) - 2023-09-12 - -### Other -- update dependencies - -## [0.81.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.31...sn_cli-v0.81.32) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.81.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.30...sn_cli-v0.81.31) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.81.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.29...sn_cli-v0.81.30) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.28...sn_cli-v0.81.29) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.81.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.27...sn_cli-v0.81.28) - 2023-09-11 - -### Other -- update dependencies - -## [0.81.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.26...sn_cli-v0.81.27) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Fixed -- *(client)* dont bail on failed upload before verify/repay - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.81.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.25...sn_cli-v0.81.26) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.24...sn_cli-v0.81.25) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.23...sn_cli-v0.81.24) - 2023-09-07 - -### Other -- update dependencies - -## [0.81.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.22...sn_cli-v0.81.23) - 2023-09-06 - -### Other -- update dependencies - -## [0.81.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.21...sn_cli-v0.81.22) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.20...sn_cli-v0.81.21) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.19...sn_cli-v0.81.20) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.18...sn_cli-v0.81.19) - 2023-09-05 - -### Added -- *(cli)* properly init color_eyre, advise on hex parse fail - -## [0.81.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.17...sn_cli-v0.81.18) - 2023-09-05 - -### Other -- update dependencies - -## [0.81.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.16...sn_cli-v0.81.17) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.15...sn_cli-v0.81.16) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.14...sn_cli-v0.81.15) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.13...sn_cli-v0.81.14) - 2023-09-04 - -### Other -- update dependencies - -## [0.81.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.12...sn_cli-v0.81.13) - 2023-09-02 - -### Other -- update dependencies - -## [0.81.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.11...sn_cli-v0.81.12) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.10...sn_cli-v0.81.11) - 2023-09-01 - -### Other -- *(cli)* better formatting for elapsed time statements -- *(transfers)* store dbcs by ref to avoid more clones - -## [0.81.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.9...sn_cli-v0.81.10) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.8...sn_cli-v0.81.9) - 2023-09-01 - -### Other -- update dependencies - -## [0.81.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.7...sn_cli-v0.81.8) - 2023-08-31 - -### Added -- *(cli)* perform wallet actions without connecting to the network - -### Other -- remove unused async - -## [0.81.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.6...sn_cli-v0.81.7) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.5...sn_cli-v0.81.6) - 2023-08-31 - -### Added -- *(cli)* wallet cmd flag enabing to query a node's local wallet balance - -### Fixed -- *(cli)* don't try to create wallet paths when checking balance - -## [0.81.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.4...sn_cli-v0.81.5) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.3...sn_cli-v0.81.4) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.2...sn_cli-v0.81.3) - 2023-08-31 - -### Fixed -- correct bench download calculation - -## [0.81.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.1...sn_cli-v0.81.2) - 2023-08-31 - -### Other -- update dependencies - -## [0.81.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.81.0...sn_cli-v0.81.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* improve download concurrency. - -## [0.81.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.64...sn_cli-v0.81.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning - -## [0.80.64](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.63...sn_cli-v0.80.64) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.63](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.62...sn_cli-v0.80.63) - 2023-08-30 - -### Other -- update dependencies - -## [0.80.62](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.61...sn_cli-v0.80.62) - 2023-08-29 - -### Other -- update dependencies - -## [0.80.61](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.60...sn_cli-v0.80.61) - 2023-08-25 - -### Other -- update dependencies - -## [0.80.60](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.59...sn_cli-v0.80.60) - 2023-08-24 - -### Other -- *(cli)* verify bench uploads once more - -## [0.80.59](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.58...sn_cli-v0.80.59) - 2023-08-24 - -### Other -- rust 1.72.0 fixes - -## [0.80.58](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.57...sn_cli-v0.80.58) - 2023-08-24 - -### Other -- update dependencies - -## [0.80.57](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.56...sn_cli-v0.80.57) - 2023-08-22 - -### Other -- update dependencies - -## [0.80.56](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.55...sn_cli-v0.80.56) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.80.55](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.54...sn_cli-v0.80.55) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.54](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.53...sn_cli-v0.80.54) - 2023-08-21 - -### Other -- update dependencies - -## [0.80.53](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.52...sn_cli-v0.80.53) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.52](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.51...sn_cli-v0.80.52) - 2023-08-18 - -### Other -- update dependencies - -## [0.80.51](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.50...sn_cli-v0.80.51) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.50](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.49...sn_cli-v0.80.50) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.48...sn_cli-v0.80.49) - 2023-08-17 - -### Other -- update dependencies - -## [0.80.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.47...sn_cli-v0.80.48) - 2023-08-17 - -### Fixed -- avoid download bench result polluted - -### Other -- more client logs - -## [0.80.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.46...sn_cli-v0.80.47) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.45...sn_cli-v0.80.46) - 2023-08-16 - -### Other -- update dependencies - -## [0.80.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.44...sn_cli-v0.80.45) - 2023-08-16 - -### Other -- optimize benchmark flow - -## [0.80.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.43...sn_cli-v0.80.44) - 2023-08-15 - -### Other -- update dependencies - -## [0.80.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.42...sn_cli-v0.80.43) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.41...sn_cli-v0.80.42) - 2023-08-14 - -### Other -- update dependencies - -## [0.80.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.40...sn_cli-v0.80.41) - 2023-08-11 - -### Other -- *(cli)* print cost info - -## [0.80.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.39...sn_cli-v0.80.40) - 2023-08-11 - -### Other -- update dependencies - -## [0.80.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.38...sn_cli-v0.80.39) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.37...sn_cli-v0.80.38) - 2023-08-10 - -### Other -- update dependencies - -## [0.80.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.36...sn_cli-v0.80.37) - 2023-08-09 - -### Other -- update dependencies - -## [0.80.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.35...sn_cli-v0.80.36) - 2023-08-08 - -### Fixed -- *(cli)* remove manual faucet claim from benchmarking. -- *(node)* prevent panic in storage calcs - -### Other -- *(cli)* get more money for benching -- log bench errors - -## [0.80.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.34...sn_cli-v0.80.35) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.33...sn_cli-v0.80.34) - 2023-08-07 - -### Other -- *(node)* dont verify during benchmarks - -## [0.80.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.32...sn_cli-v0.80.33) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- cleanup comments and names - -## [0.80.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.31...sn_cli-v0.80.32) - 2023-08-07 - -### Other -- update dependencies - -## [0.80.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.30...sn_cli-v0.80.31) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.29...sn_cli-v0.80.30) - 2023-08-04 - -### Other -- update dependencies - -## [0.80.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.28...sn_cli-v0.80.29) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.27...sn_cli-v0.80.28) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.26...sn_cli-v0.80.27) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.25...sn_cli-v0.80.26) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.24...sn_cli-v0.80.25) - 2023-08-03 - -### Other -- update dependencies - -## [0.80.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.23...sn_cli-v0.80.24) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.22...sn_cli-v0.80.23) - 2023-08-02 - -### Other -- update dependencies - -## [0.80.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.21...sn_cli-v0.80.22) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.20...sn_cli-v0.80.21) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.19...sn_cli-v0.80.20) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.18...sn_cli-v0.80.19) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.17...sn_cli-v0.80.18) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- *(cli)* update logs and ci for payments - -## [0.80.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.16...sn_cli-v0.80.17) - 2023-08-01 - -### Other -- update dependencies - -## [0.80.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.15...sn_cli-v0.80.16) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.14...sn_cli-v0.80.15) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.13...sn_cli-v0.80.14) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.12...sn_cli-v0.80.13) - 2023-07-31 - -### Other -- update dependencies - -## [0.80.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.11...sn_cli-v0.80.12) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.10...sn_cli-v0.80.11) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.9...sn_cli-v0.80.10) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.8...sn_cli-v0.80.9) - 2023-07-28 - -### Other -- update dependencies - -## [0.80.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.7...sn_cli-v0.80.8) - 2023-07-27 - -### Other -- update dependencies - -## [0.80.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.6...sn_cli-v0.80.7) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.5...sn_cli-v0.80.6) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.4...sn_cli-v0.80.5) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.3...sn_cli-v0.80.4) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.2...sn_cli-v0.80.3) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.1...sn_cli-v0.80.2) - 2023-07-26 - -### Other -- update dependencies - -## [0.80.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.80.0...sn_cli-v0.80.1) - 2023-07-25 - -### Other -- update dependencies - -## [0.80.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.32...sn_cli-v0.80.0) - 2023-07-21 - -### Added -- *(cli)* allow to pass the hex-encoded DBC as arg -- *(protocol)* [**breaking**] make Chunks storage payment required - -## [0.79.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.31...sn_cli-v0.79.32) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.30...sn_cli-v0.79.31) - 2023-07-20 - -### Other -- update dependencies - -## [0.79.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.29...sn_cli-v0.79.30) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.28...sn_cli-v0.79.29) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.27...sn_cli-v0.79.28) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.26...sn_cli-v0.79.27) - 2023-07-19 - -### Other -- update dependencies - -## [0.79.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.25...sn_cli-v0.79.26) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.24...sn_cli-v0.79.25) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.23...sn_cli-v0.79.24) - 2023-07-18 - -### Fixed -- client - -## [0.79.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.22...sn_cli-v0.79.23) - 2023-07-18 - -### Other -- update dependencies - -## [0.79.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.21...sn_cli-v0.79.22) - 2023-07-17 - -### Fixed -- *(cli)* add more context when failing to decode a wallet - -## [0.79.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.20...sn_cli-v0.79.21) - 2023-07-17 - -### Other -- update dependencies - -## [0.79.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.19...sn_cli-v0.79.20) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -## [0.79.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.18...sn_cli-v0.79.19) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.79.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.17...sn_cli-v0.79.18) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.16...sn_cli-v0.79.17) - 2023-07-13 - -### Other -- update dependencies - -## [0.79.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.15...sn_cli-v0.79.16) - 2023-07-12 - -### Other -- client to upload paid chunks in batches -- chunk files only once when making payment for their storage - -## [0.79.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.14...sn_cli-v0.79.15) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.13...sn_cli-v0.79.14) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.79.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.12...sn_cli-v0.79.13) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.11...sn_cli-v0.79.12) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.10...sn_cli-v0.79.11) - 2023-07-11 - -### Other -- update dependencies - -## [0.79.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.9...sn_cli-v0.79.10) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.8...sn_cli-v0.79.9) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.7...sn_cli-v0.79.8) - 2023-07-10 - -### Added -- faucet server and cli DBC read - -### Fixed -- use Deposit --stdin instead of Read in cli -- wallet store - -## [0.79.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.6...sn_cli-v0.79.7) - 2023-07-10 - -### Other -- update dependencies - -## [0.79.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.5...sn_cli-v0.79.6) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.4...sn_cli-v0.79.5) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.3...sn_cli-v0.79.4) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.2...sn_cli-v0.79.3) - 2023-07-07 - -### Other -- update dependencies - -## [0.79.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.1...sn_cli-v0.79.2) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.79.0...sn_cli-v0.79.1) - 2023-07-06 - -### Other -- update dependencies - -## [0.79.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.26...sn_cli-v0.79.0) - 2023-07-06 - -### Added -- introduce `--log-format` arguments -- provide `--log-output-dest` arg for `safe` -- provide `--log-output-dest` arg for `safenode` - -### Other -- use data-dir rather than root-dir -- incorporate various feedback items - -## [0.78.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.25...sn_cli-v0.78.26) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.24...sn_cli-v0.78.25) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.23...sn_cli-v0.78.24) - 2023-07-05 - -### Other -- update dependencies - -## [0.78.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.22...sn_cli-v0.78.23) - 2023-07-04 - -### Other -- update dependencies - -## [0.78.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.21...sn_cli-v0.78.22) - 2023-07-03 - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test - -## [0.78.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.20...sn_cli-v0.78.21) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.19...sn_cli-v0.78.20) - 2023-06-29 - -### Other -- update dependencies - -## [0.78.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.18...sn_cli-v0.78.19) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.17...sn_cli-v0.78.18) - 2023-06-28 - -### Added -- register refactor, kad reg without cmds - -## [0.78.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.16...sn_cli-v0.78.17) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.15...sn_cli-v0.78.16) - 2023-06-28 - -### Other -- update dependencies - -## [0.78.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.14...sn_cli-v0.78.15) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.13...sn_cli-v0.78.14) - 2023-06-27 - -### Other -- update dependencies - -## [0.78.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.12...sn_cli-v0.78.13) - 2023-06-27 - -### Other -- benchmark client download - -## [0.78.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.11...sn_cli-v0.78.12) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.10...sn_cli-v0.78.11) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.9...sn_cli-v0.78.10) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.8...sn_cli-v0.78.9) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.7...sn_cli-v0.78.8) - 2023-06-26 - -### Other -- update dependencies - -## [0.78.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.6...sn_cli-v0.78.7) - 2023-06-24 - -### Other -- update dependencies - -## [0.78.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.5...sn_cli-v0.78.6) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.4...sn_cli-v0.78.5) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.3...sn_cli-v0.78.4) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.2...sn_cli-v0.78.3) - 2023-06-23 - -### Other -- update dependencies - -## [0.78.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.1...sn_cli-v0.78.2) - 2023-06-22 - -### Other -- update dependencies - -## [0.78.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.78.0...sn_cli-v0.78.1) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.78.0](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.49...sn_cli-v0.78.0) - 2023-06-22 - -### Added -- use standarised directories for files/wallet commands - -## [0.77.49](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.48...sn_cli-v0.77.49) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.48](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.47...sn_cli-v0.77.48) - 2023-06-21 - -### Other -- update dependencies - -## [0.77.47](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.46...sn_cli-v0.77.47) - 2023-06-21 - -### Other -- *(node)* obtain parent_tx from SignedSpend - -## [0.77.46](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.45...sn_cli-v0.77.46) - 2023-06-21 - -### Added -- provide option for log output in json - -## [0.77.45](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.44...sn_cli-v0.77.45) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.44](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.43...sn_cli-v0.77.44) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.43](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.42...sn_cli-v0.77.43) - 2023-06-20 - -### Other -- include the Tx instead of output DBCs as part of storage payment proofs -- use a set to collect Chunks addrs for build payment proof - -## [0.77.42](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.41...sn_cli-v0.77.42) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.41](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.40...sn_cli-v0.77.41) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.40](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.39...sn_cli-v0.77.40) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.39](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.38...sn_cli-v0.77.39) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.38](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.37...sn_cli-v0.77.38) - 2023-06-20 - -### Other -- update dependencies - -## [0.77.37](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.36...sn_cli-v0.77.37) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.36](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.35...sn_cli-v0.77.36) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.35](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.34...sn_cli-v0.77.35) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.34](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.33...sn_cli-v0.77.34) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.33](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.32...sn_cli-v0.77.33) - 2023-06-19 - -### Other -- update dependencies - -## [0.77.32](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.31...sn_cli-v0.77.32) - 2023-06-19 - -### Fixed -- *(safe)* check if upload path contains a file - -## [0.77.31](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.30...sn_cli-v0.77.31) - 2023-06-16 - -### Fixed -- CLI is missing local-discovery feature - -## [0.77.30](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.29...sn_cli-v0.77.30) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.29](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.28...sn_cli-v0.77.29) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.28](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.27...sn_cli-v0.77.28) - 2023-06-16 - -### Other -- improve memory benchmarks, remove broken download bench - -## [0.77.27](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.26...sn_cli-v0.77.27) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.26](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.25...sn_cli-v0.77.26) - 2023-06-16 - -### Fixed -- *(bin)* negate local-discovery check - -## [0.77.25](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.24...sn_cli-v0.77.25) - 2023-06-16 - -### Other -- update dependencies - -## [0.77.24](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.23...sn_cli-v0.77.24) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.23](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.22...sn_cli-v0.77.23) - 2023-06-15 - -### Fixed -- parent spend issue - -## [0.77.22](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.21...sn_cli-v0.77.22) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.21](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.20...sn_cli-v0.77.21) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.20](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.19...sn_cli-v0.77.20) - 2023-06-15 - -### Other -- update dependencies - -## [0.77.19](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.18...sn_cli-v0.77.19) - 2023-06-15 - -### Other -- use throughput for benchmarking - -## [0.77.18](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.17...sn_cli-v0.77.18) - 2023-06-15 - -### Other -- add initial benchmarks for prs and chart generation - -## [0.77.17](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.16...sn_cli-v0.77.17) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.77.16](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.15...sn_cli-v0.77.16) - 2023-06-14 - -### Other -- update dependencies - -## [0.77.15](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.14...sn_cli-v0.77.15) - 2023-06-14 - -### Other -- use clap env and parse multiaddr - -## [0.77.14](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.13...sn_cli-v0.77.14) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -### Other -- *(client)* parse duration in clap derivation - -## [0.77.13](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.12...sn_cli-v0.77.13) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.12](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.11...sn_cli-v0.77.12) - 2023-06-13 - -### Other -- update dependencies - -## [0.77.11](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.10...sn_cli-v0.77.11) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.10](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.9...sn_cli-v0.77.10) - 2023-06-12 - -### Other -- update dependencies - -## [0.77.9](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.8...sn_cli-v0.77.9) - 2023-06-09 - -### Other -- improve documentation for cli commands - -## [0.77.8](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.7...sn_cli-v0.77.8) - 2023-06-09 - -### Other -- manually change crate version - -## [0.77.7](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.6...sn_cli-v0.77.7) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.6](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.5...sn_cli-v0.77.6) - 2023-06-09 - -### Other -- emit git info with vergen - -## [0.77.5](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.4...sn_cli-v0.77.5) - 2023-06-09 - -### Other -- update dependencies - -## [0.77.4](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.3...sn_cli-v0.77.4) - 2023-06-09 - -### Other -- provide clarity on command arguments - -## [0.77.3](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.2...sn_cli-v0.77.3) - 2023-06-08 - -### Other -- update dependencies - -## [0.77.2](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.1...sn_cli-v0.77.2) - 2023-06-08 - -### Other -- improve documentation for cli arguments - -## [0.77.1](https://github.com/maidsafe/safe_network/compare/sn_cli-v0.77.0...sn_cli-v0.77.1) - 2023-06-07 - -### Added -- making the CLI --peer arg global so it can be passed in any order -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- improve CLI --peer arg doc -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- *(logs)* enable metrics feature by default -- small log wording updates -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml deleted file mode 100644 index 0b130d77e4..0000000000 --- a/sn_cli/Cargo.toml +++ /dev/null @@ -1,86 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network CLI" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_cli" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.95.3" - -[[bin]] -path = "src/bin/main.rs" -name = "safe" - -[[bench]] -name = "files" -harness = false - -[features] -default = ["metrics"] -distribution = ["base64", "bitcoin"] -local = ["sn_client/local", "sn_peers_acquisition/local"] -metrics = ["sn_logging/process-metrics"] -network-contacts = ["sn_peers_acquisition/network-contacts"] -nightly = [] -open-metrics = ["sn_client/open-metrics"] - -[dependencies] -aes = "0.7.5" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", optional = true } -block-modes = "0.8.1" -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -custom_debug = "~0.6.1" -chrono = "~0.4.19" -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "~0.6" -dialoguer = "~0.11.0" -dirs-next = "~2.0.0" -futures = "~0.3.13" -hex = "~0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -libp2p = { version = "0.54.1", features = ["identify", "kad"] } -rand = "0.8.5" -rayon = "1.8.0" -reqwest = { version = "0.12.2", default-features = false, features = [ - "rustls-tls-manual-roots", -] } -rmp-serde = "1.1.1" -rpassword = "7.3.1" -serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -tempfile = "3.6.0" -tiny-keccak = "~2.0.2" -tokio = { version = "1.32.0", features = [ - "io-util", - "macros", - "parking_lot", - "rt", - "sync", - "time", - "fs", -] } -tracing = { version = "~0.1.26" } -url = "2.4.0" -walkdir = "~2.5.0" -xor_name = "5.0.0" - -[dev-dependencies] -eyre = "0.6.8" -criterion = "0.5.1" -tempfile = "3.6.0" -rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.110.4", features = [ - "test-utils", -] } - -[lints] -workspace = true diff --git a/sn_cli/README.md b/sn_cli/README.md deleted file mode 100644 index f1a2f29edf..0000000000 --- a/sn_cli/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# sn_cli - -This directory contains the `safe` client binary. It is used to interact with the Safe Network and provides a range of commands for managing data, keys, wallets, and more. - -The `safe` binary includes the following subcommands: - -- `wallet`: Commands for wallet management. This includes creating wallets, checking balances, and making transactions. -- `files`: Commands for file management. This includes uploading, downloading, and deleting files. -- `register`: Commands for register management. This includes creating, reading, and writing to registers. diff --git a/sn_cli/benches/files.rs b/sn_cli/benches/files.rs deleted file mode 100644 index cece183f5c..0000000000 --- a/sn_cli/benches/files.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use criterion::{criterion_group, criterion_main, Criterion, Throughput}; -use rand::{thread_rng, Rng}; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; -use std::{ - fs::File, - io::Write, - path::{Path, PathBuf}, - process::{exit, Command}, - time::Duration, -}; -use tempfile::tempdir; - -const SAMPLE_SIZE: usize = 20; - -// This procedure includes the client startup, which will be measured by criterion as well. -// As normal user won't care much about initial client startup, -// but be more alerted on communication speed during transmission. -// It will be better to execute bench test with `local`, -// to make the measurement results reflect speed improvement or regression more accurately. -fn safe_files_upload(dir: &str) { - let output = Command::new("./target/release/safe") - .arg("files") - .arg("upload") - .arg(dir) - .arg("--retry-strategy") // no retries - .arg("quick") - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - let err = output.stderr; - let err_string = String::from_utf8(err).expect("Failed to parse error string"); - panic!("Upload command executed with failing error code: {err_string:?}"); - } -} - -fn safe_files_download() { - let output = Command::new("./target/release/safe") - .arg("files") - .arg("download") - .output() - .expect("Failed to execute command"); - - if !output.status.success() { - let err = output.stderr; - let err_string = String::from_utf8(err).expect("Failed to parse error string"); - panic!("Download command executed with failing error code: {err_string:?}"); - } -} - -fn generate_file(path: &PathBuf, file_size_mb: usize) { - let mut file = File::create(path).expect("Failed to create file"); - let mut rng = thread_rng(); - - // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks - let n_small_chunks = file_size_mb * 1024 * 32; - for _ in 0..n_small_chunks { - let random_data: [u8; 32] = rng.gen(); - file.write_all(&random_data) - .expect("Failed to write to file"); - } - let size = file.metadata().expect("Failed to get metadata").len() as f64 / (1024 * 1024) as f64; - assert_eq!(file_size_mb as f64, size); -} - -fn fund_cli_wallet() { - let _ = Command::new("./target/release/safe") - .arg("wallet") - .arg("get-faucet") - .arg("127.0.0.1:8000") - .output() - .expect("Failed to execute 'safe wallet get-faucet' command"); -} - -fn criterion_benchmark(c: &mut Criterion) { - // Check if the binary exists - if !Path::new("./target/release/safe").exists() { - eprintln!("Error: Binary ./target/release/safe does not exist. Please make sure to compile your project first"); - exit(1); - } - - let sizes: [u64; 2] = [1, 10]; // File sizes in MB. Add more sizes as needed - - for size in sizes.iter() { - let temp_dir = tempdir().expect("Failed to create temp dir"); - let temp_dir_path = temp_dir.into_path(); - let temp_dir_path_str = temp_dir_path.to_str().expect("Invalid unicode encountered"); - - // create 23 random files. This is to keep the benchmark results consistent with prior runs. The change to make - // use of ChunkManager means that we don't upload the same file twice and the `uploaded_files` file is now read - // as a set and we don't download the same file twice. Hence create 23 files as counted from the logs - // pre ChunkManager change. - (0..23).into_par_iter().for_each(|idx| { - let path = temp_dir_path.join(format!("random_file_{size}_mb_{idx}")); - generate_file(&path, *size as usize); - }); - fund_cli_wallet(); - - // Wait little bit for the fund to be settled. - std::thread::sleep(Duration::from_secs(10)); - - let mut group = c.benchmark_group(format!("Upload Benchmark {size}MB")); - group.sampling_mode(criterion::SamplingMode::Flat); - // One sample may compose of multiple iterations, and this is decided by `measurement_time`. - // Set this to a lower value to ensure each sample only contains one iteration. - // To ensure the download throughput calculation is correct. - group.measurement_time(Duration::from_secs(5)); - group.warm_up_time(Duration::from_secs(5)); - group.sample_size(SAMPLE_SIZE); - - // Set the throughput to be reported in terms of bytes - group.throughput(Throughput::Bytes(size * 1024 * 1024)); - let bench_id = format!("safe files upload {size}mb"); - group.bench_function(bench_id, |b| { - b.iter(|| safe_files_upload(temp_dir_path_str)) - }); - group.finish(); - } - - let mut group = c.benchmark_group("Download Benchmark".to_string()); - group.sampling_mode(criterion::SamplingMode::Flat); - group.measurement_time(Duration::from_secs(10)); - group.warm_up_time(Duration::from_secs(5)); - - // The download will download all uploaded files during bench. - // If the previous bench executed with the default 100 sample size, - // there will then be around 1.1GB in total, and may take around 40s for each iteratioin. - // Hence we have to reduce the number of iterations from the default 100 to 10, - // To avoid the benchmark test taking over one hour to complete. - // - // During `measurement_time` and `warm_up_time`, there will be one upload run for each. - // Which means two additional `uploaded_files` created and for downloading. - let total_size: u64 = sizes - .iter() - .map(|size| (SAMPLE_SIZE as u64 + 2) * size) - .sum(); - group.sample_size(SAMPLE_SIZE / 2); - - // Set the throughput to be reported in terms of bytes - group.throughput(Throughput::Bytes(total_size * 1024 * 1024)); - let bench_id = "safe files download".to_string(); - group.bench_function(bench_id, |b| b.iter(safe_files_download)); - group.finish(); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/sn_cli/src/acc_packet.rs b/sn_cli/src/acc_packet.rs deleted file mode 100644 index a9430e3449..0000000000 --- a/sn_cli/src/acc_packet.rs +++ /dev/null @@ -1,1603 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod change_tracking; - -use change_tracking::*; - -use super::{ - files::{download_file, FilesUploader}, - ChunkManager, -}; - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - protocol::storage::{Chunk, RegisterAddress, RetryStrategy}, - registers::EntryHash, - transfers::{DerivationIndex, MainSecretKey}, - Client, FilesApi, FolderEntry, FoldersApi, Metadata, UploadCfg, WalletClient, -}; - -use bls::PublicKey; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use std::{ - collections::{ - btree_map::{Entry, OccupiedEntry}, - BTreeMap, - }, - ffi::OsString, - fs::{create_dir_all, remove_dir_all, remove_file, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tokio::task::JoinSet; -use tracing::trace; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// Derivation index used to obtain the account packet root folder xorname -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_ADDR_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x0; 32]); - -/// Derivation index used to obtain the owner key of the account packet root folder. -/// The derived key pair is used to: -/// - Sign all data operations sent to the network. -/// - Set it as the owner of all Folders (Registers) created on the network. -/// - Encrypt all the Folders entries metadata chunks. -// TODO: use eip2333 path for deriving keys -const ACC_PACKET_OWNER_DERIVATION_INDEX: DerivationIndex = DerivationIndex([0x1; 32]); - -/// An `AccountPacket` object allows users to store and manage files, wallets, etc., with the ability -/// and tools necessary to keep an instance tracking a local storage path, as well as keeping it in sync -/// with its remote version stored on the network. -/// A `Client` and a the location for a funded local hot-wallet are required by this object in order to be able to connect -/// to the network, paying for data storage, and upload/retrieve information to/from the network. -/// -/// TODO: currently only files and folders are supported, wallets, keys, etc., to be added later. -/// -/// TODO: make use of eip2333 paths for deriving keys. Currently keys used for encrypting and signing -/// operations are derived from the root key provided using index derivation. -/// -/// The `AccountPacket` keeps a reference to the network address of the root Folder holding the user's -/// files/folder hierarchy. All tracking information is kept under the `.safe` directory on disk, whose -/// content is not uploaded to the network, but only kept locally in order to realise which files/dirs -/// the user has made changes on compared to their last version retrieved from the network. -/// -/// A subdirectory called `metadata` is kept under `.safe` directory with the following files: -/// - A file named `root_folder.addr` which contains the network address where the root Folder is stored, -/// which is the one holding the entire hierarchy of user's files/dirs to be kept in sync with local changes -/// made by the user. -/// - For each of the user's files/dirs, a serialised `MetadataTrackingInfo` instance is stored on using the -/// file/dir metadata chunk xorname as filename. The information stored in these files are used to realise -/// if changes were locally made by the user in comparison with the last version of such files/dirs retrieved -/// from the network. -/// Example of files generated within an account-packet to keep track of changes makde to user's files/dirs: -/// -/// ./my-acc-packet -/// ├── my_dir_1 -/// ├── my_file.txt -/// ├── my_dir_2 -/// │ ├── other_dir -/// │ └── my_other_file.txt -/// └── .safe -/// ├── chunk_artifacts -/// │ ├── ... -/// │ ... -/// ├── metadata -/// │ ├── 082cc90c900fa08d36067246a1e6136a828f1aae4926268c4349c200d56e34b9 -/// │ ├── 102c5536a10682bc3cdd4a1915fe2ad5e839cb94d0d3f124d0c18aee1d49ce50 -/// │ ├── 31824937c47a979df64af591f2e43f76190e65af835c4b338cbe7a7ba3f7d3cb -/// │ ├── 36778e471083140bc111677e2a86e49f4c0c20bc14ff2ad610e22615b72260b8 -/// │ ├── 3edd953cc320449e09b69b7b1b909a53874ee477f602f1a807dfd8057378367e -/// │ └── root_folder.addr -/// └── uploaded_files -/// ├── ... -/// ... -/// -/// There are other files which are stored under `.safe/chunk_artifacts` and `.safe/uploaded_files` directories -/// which are managed by the `ChunkManager` in order to locally cache chunked files, and a list of files -/// already uploaded to the network, to prevent from chunking and/or uploading the same files again. For more -/// details about these files, please refer to the `ChunkManager` module. -pub struct AccountPacket { - client: Client, - wallet_dir: PathBuf, - files_dir: PathBuf, - meta_dir: PathBuf, - tracking_info_dir: PathBuf, - curr_tracking_info: BTreeMap, - root_folder_addr: RegisterAddress, - root_folder_created: bool, -} - -impl AccountPacket { - /// Initialise directory as a fresh new packet. - /// All keys used for encrypting the files/folders metadata chunks and signing - /// operations are derived from the root key provided using index derivation. - /// The root Folder address and owner are also derived from the root SK. - /// A password can be optionally provided to encrypt the root SK before storing it on disk. - pub fn init( - client: Client, - wallet_dir: &Path, - path: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - ) -> Result { - let (_, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - - // If there is already some tracking info we bail out as this is meant ot be a fresh new packet. - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - bail!( - "The local path {path:?} is already being tracked with Folder address: {}", - addr.to_hex() - ); - } - - let (client, root_folder_addr) = derive_keys_and_address(client, root_sk); - store_root_folder_tracking_info(&meta_dir, root_folder_addr, false)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - Self::from_path(client, wallet_dir, path, password) - } - - /// Create AccountPacket instance from a directory which has been already initialised. - pub fn from_path( - client: Client, - wallet_dir: &Path, - path: &Path, - password: Option<&[u8]>, - ) -> Result { - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(path)?; - let root_sk = read_root_sk(&tracking_info_dir, password)?; - let (client, root_folder_addr) = derive_keys_and_address(client, &root_sk); - - // this will fail if the directory was not previously initialised with 'init'. - let curr_tracking_info = read_tracking_info_from_disk(&meta_dir)?; - let (read_folder_addr, root_folder_created) = read_root_folder_addr(&meta_dir) - .map_err(|_| eyre!("Root Folder address not found, make sure the directory {path:?} is initialised."))?; - if read_folder_addr != root_folder_addr { - bail!( - "The path is already tracking another Folder with address: {}", - read_folder_addr.to_hex() - ); - } - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info, - root_folder_addr, - root_folder_created, - }) - } - - /// Return the address of the root Folder - pub fn root_folder_addr(&self) -> RegisterAddress { - self.root_folder_addr - } - - /// Retrieve and store entire Folders hierarchy from the network, generating tracking info. - pub async fn retrieve_folders( - client: &Client, - wallet_dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, - download_path: &Path, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - create_dir_all(download_path)?; - let (files_dir, tracking_info_dir, meta_dir) = build_tracking_info_paths(download_path)?; - - let (client, root_folder_addr) = derive_keys_and_address(client.clone(), root_sk); - - if let Ok((addr, _)) = read_root_folder_addr(&meta_dir) { - // bail out if there is already a root folder address different from the passed in - if addr == root_folder_addr { - bail!("The download path is already tracking that Folder, use 'sync' instead."); - } else { - bail!( - "The download path is already tracking another Folder with address: {}", - addr.to_hex() - ); - } - } else { - store_root_folder_tracking_info(&meta_dir, root_folder_addr, true)?; - store_root_sk(&tracking_info_dir, root_sk, password)?; - } - - let mut acc_packet = Self { - client: client.clone(), - wallet_dir: wallet_dir.to_path_buf(), - files_dir, - meta_dir, - tracking_info_dir, - curr_tracking_info: BTreeMap::default(), - root_folder_addr, - root_folder_created: true, - }; - - let folder_name: OsString = download_path.file_name().unwrap_or_default().into(); - let folders_api = - FoldersApi::retrieve(client.clone(), wallet_dir, root_folder_addr).await?; - let folders_to_download = vec![(folder_name, folders_api, download_path.to_path_buf())]; - - let _ = acc_packet - .download_folders_and_files(folders_to_download, batch_size, retry_strategy) - .await?; - - acc_packet.curr_tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - - Ok(acc_packet) - } - - /// Generate a report with differences found in local files/folders in comparison with their versions stored on the network. - pub fn status(&self) -> Result<()> { - println!("Looking for local changes made to files/folders compared to version on network at: {} ...", self.root_folder_addr().to_hex()); - let changes = self.scan_files_and_folders_for_changes(false)?; - - if changes.mutations.is_empty() { - println!("No local changes made to files/folders."); - } else { - println!("Local changes made to files/folders:"); - changes.mutations.iter().for_each(|m| println!("{m}")); - - let num_of_changes = changes.mutations.len(); - println!("\nChanges found to local files/folders: {num_of_changes}"); - } - Ok(()) - } - - /// Sync local changes made to files and folder with their version on the network, - /// both pushing and pulling changes to/form the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg, make_data_public: bool) -> Result<()> { - let ChangesToApply { folders, mutations } = - self.scan_files_and_folders_for_changes(make_data_public)?; - - if mutations.is_empty() { - println!("No local changes made to files/folders to be pushed to network."); - } else { - println!("Local changes made to files/folders to be synced with network:"); - mutations.iter().for_each(|m| println!("{m}")); - } - - println!("Paying for folders hierarchy and uploading..."); - let synced_folders = self - .pay_and_sync_folders(folders, upload_cfg, make_data_public) - .await?; - - // mark root folder as created if it wasn't already - if !self.root_folder_created { - self.root_folder_created = true; - store_root_folder_tracking_info( - &self.meta_dir, - self.root_folder_addr, - self.root_folder_created, - )?; - } - - // update tracking information based on mutations detected locally - for mutation in mutations { - match mutation { - Mutation::NewFile(tracking_info) | Mutation::NewFolder(tracking_info) => { - self.store_tracking_info(tracking_info)?; - } - Mutation::FileRemoved((_, meta_xorname)) - | Mutation::FolderRemoved((_, meta_xorname)) => { - self.remove_tracking_info(meta_xorname); - } - Mutation::FileContentChanged((meta_xorname, tracking_info)) => { - self.store_tracking_info(tracking_info)?; - self.remove_tracking_info(meta_xorname); - } - } - } - - // download files/folders which are new in the synced folders - let folders_to_download: Vec<_> = synced_folders - .iter() - .map(|(path, (folders_api, _))| { - let folder_name: OsString = path.file_name().unwrap_or_default().into(); - (folder_name, folders_api.clone(), path.clone()) - }) - .collect(); - let mut updated_folders = self - .download_folders_and_files( - folders_to_download, - upload_cfg.batch_size, - upload_cfg.retry_strategy, - ) - .await?; - - // Now let's check if any file/folder was removed remotely so we remove them locally from disk. - // We do it in two phases, first we get rid of all dirs that were removed, then we go through - // the files, this is to make sure we remove files which belong to nested folders being removed. - let mut curr_tracking_info = read_tracking_info_from_disk(&self.meta_dir)?; - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::Folder(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - curr_tracking_info.retain(|_, tracking_info| { - if let FolderEntry::File(_) = tracking_info.metadata.content { - !self.remove_tracking_if_not_found_in_folders(tracking_info, &mut updated_folders) - } else { - true - } - }); - - self.curr_tracking_info = curr_tracking_info; - - Ok(()) - } - - // Private helpers - - // Generate the path relative to the user's root folder - fn get_relative_path(&self, path: &Path) -> Result { - let relative_path = path - .to_path_buf() - .canonicalize()? - .strip_prefix(&self.files_dir)? - .to_path_buf(); - Ok(relative_path) - } - - // Store tracking info in a file to keep track of any changes made to the source file/folder - fn store_tracking_info( - &self, - MetadataTrackingInfo { - file_path, - meta_xorname, - metadata, - entry_hash, - }: MetadataTrackingInfo, - ) -> Result<()> { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - let mut meta_file = File::create(metadata_file_path)?; - - let tracking_info = MetadataTrackingInfo { - // we store the relative path so the root folder can be moved to - // different locations/paths if desired by the user. - file_path: self.get_relative_path(&file_path)?, - meta_xorname, - metadata, - entry_hash, - }; - - meta_file.write_all(&rmp_serde::to_vec(&tracking_info)?)?; - - Ok(()) - } - - // Remove tracking information file for given xorname - fn remove_tracking_info(&self, meta_xorname: XorName) { - let metadata_file_path = self.meta_dir.join(hex::encode(meta_xorname)); - if let Err(err) = remove_file(&metadata_file_path) { - println!("Failed to remove tracking info file {metadata_file_path:?}: {err}"); - } - } - - // If the file/folder referenced by the tracking info provided is not part of the passed Folders - // hierarchy, remove it from local disk along with its tracking information. - // Returns whether the file/folder was removed. - fn remove_tracking_if_not_found_in_folders( - &self, - tracking_info: &MetadataTrackingInfo, - folders: &mut Folders, - ) -> bool { - let mut removed = false; - let abs_path = self.files_dir.join(&tracking_info.file_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_dir_all(&abs_path) { - trace!("Failed to remove directory {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - folders.remove(&abs_path); - removed = true; - } - } - } - FolderEntry::File(_) => { - match find_by_name_in_parent_folder( - &tracking_info.metadata.name, - &abs_path, - folders, - ) { - Some(meta_xorname) => { - if meta_xorname != tracking_info.meta_xorname { - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - None => { - if let Err(err) = remove_file(&abs_path) { - // this is expected if parent folder was just removed as part of this syncing flow. - trace!("Failed to remove file {abs_path:?}: {err:?}"); - } - self.remove_tracking_info(tracking_info.meta_xorname); - removed = true; - } - } - } - } - - removed - } - - // Scan existing files and folders on disk, generating a report of all the detected - // changes based on the tracking info kept locally. - // If make_data_public is false the metadata chunks are encrypted. - fn scan_files_and_folders_for_changes(&self, make_data_public: bool) -> Result { - // we don't use the local cache in order to realise of any changes made to files content. - let mut chunk_manager = ChunkManager::new(&self.tracking_info_dir); - chunk_manager.chunk_with_iter(self.iter_only_files(), false, false)?; - - let encryption_pk = if make_data_public { - None - } else { - // we pass down the key to encrypt the metadata chunk of any new content detected. - Some(self.client.signer_pk()) - }; - - let mut changes = self.read_folders_hierarchy_from_disk(encryption_pk)?; - - // add chunked files to the corresponding Folders - let folders = &mut changes.folders; - for chunked_file in chunk_manager.iter_chunked_files() { - let file_path = &chunked_file.file_path; - if let Some(Entry::Occupied(mut parent_folder)) = file_path - .parent() - .map(|parent| folders.entry(parent.to_path_buf())) - { - // try to find the tracking info of the file/folder by its name - match self.get_tracking_info(file_path) { - Ok(Some(tracking_info)) => match &tracking_info.metadata.content { - FolderEntry::File(chunk) => { - if chunk.address() != &chunked_file.head_chunk_address { - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - - changes.mutations.push(Mutation::FileContentChanged(( - tracking_info.meta_xorname, - MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - }, - ))); - } - } - FolderEntry::Folder(_) => { - // New file found where there used to be a folder - let (entry_hash, meta_xorname, metadata) = replace_item_in_folder( - &mut parent_folder, - tracking_info.entry_hash, - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - }, - Ok(None) => { - let (entry_hash, meta_xorname, metadata) = - parent_folder.get_mut().0.add_file( - chunked_file.file_name.clone(), - chunked_file.data_map.clone(), - encryption_pk, - )?; - parent_folder.get_mut().1.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFile(MetadataTrackingInfo { - file_path: file_path.to_path_buf(), - meta_xorname, - metadata, - entry_hash, - })); - } - Err(err) => { - println!("Skipping file {file_path:?}: {err:?}"); - } - } - } - } - - // now let's check if any file/folder was removed from disk - for (item_path, tracking_info) in self.curr_tracking_info.iter() { - let abs_path = self.files_dir.join(item_path); - match tracking_info.metadata.content { - FolderEntry::Folder(_) => { - if !folders.contains_key(&abs_path) { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FolderRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - FolderEntry::File(_) => { - if chunk_manager - .iter_chunked_files() - .all(|chunked_file| chunked_file.file_path != abs_path) - { - remove_from_parent(folders, &abs_path, tracking_info.entry_hash)?; - changes.mutations.push(Mutation::FileRemoved(( - abs_path, - tracking_info.meta_xorname, - ))); - } - } - } - } - - Ok(changes) - } - - // Build Folders hierarchy from the set files dir. The metadata chunk of every new folder - // will be encrpyted if an encrpytion key has been provided. - fn read_folders_hierarchy_from_disk( - &self, - encryption_pk: Option, - ) -> Result { - let mut changes = ChangesToApply::default(); - for (dir_path, depth, parent, dir_name) in self.iter_only_dirs().filter_map(|entry| { - entry.path().parent().map(|parent| { - ( - entry.path().to_path_buf(), - entry.depth(), - parent.to_owned(), - entry.file_name().to_owned(), - ) - }) - }) { - let (folder, folder_change) = changes - .folders - .entry(dir_path.clone()) - .or_insert(self.find_folder_in_tracking_info(&dir_path)?) - .clone(); - let curr_folder_addr = *folder.address(); - - if depth > 0 { - let (parent_folder, parent_folder_change) = changes - .folders - .entry(parent.clone()) - .or_insert(self.find_folder_in_tracking_info(&parent)?); - - if folder_change.is_new_folder() { - let (entry_hash, meta_xorname, metadata) = - parent_folder.add_folder(dir_name, curr_folder_addr, encryption_pk)?; - parent_folder_change.has_new_entries(); - - changes - .mutations - .push(Mutation::NewFolder(MetadataTrackingInfo { - file_path: dir_path, - meta_xorname, - metadata, - entry_hash, - })); - } - } - } - - Ok(changes) - } - - // Read local tracking info for given file/folder item - fn get_tracking_info(&self, path: &Path) -> Result> { - let path = self.get_relative_path(path)?; - Ok(self.curr_tracking_info.get(&path)) - } - - // Instantiate a FolderApi based on local tracking info for given folder item - fn find_folder_in_tracking_info(&self, path: &Path) -> Result<(FoldersApi, FolderChange)> { - let mut folder_change = FolderChange::NewFolder; - let address = if path == self.files_dir { - if self.root_folder_created { - folder_change = FolderChange::NoChange; - } - Some(self.root_folder_addr) - } else { - self.get_tracking_info(path)?.and_then(|tracking_info| { - match tracking_info.metadata.content { - FolderEntry::Folder(addr) => { - folder_change = FolderChange::NoChange; - Some(addr) - } - FolderEntry::File(_) => None, - } - }) - }; - - let folders_api = FoldersApi::new(self.client.clone(), &self.wallet_dir, address)?; - Ok((folders_api, folder_change)) - } - - // Creates an iterator over the user's dirs names, excluding the '.safe' tracking dir - fn iter_only_dirs(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_dir() && e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - } - - // Creates an iterator over the user's file, excluding the tracking files under '.safe' dir - fn iter_only_files(&self) -> impl Iterator { - WalkDir::new(&self.files_dir) - .into_iter() - .filter_entry(|e| e.file_type().is_file() || e.file_name() != SAFE_TRACKING_CHANGES_DIR) - .flatten() - .filter(|e| e.file_type().is_file()) - } - - // Pay and upload all the files and folder. - async fn pay_and_sync_folders( - &self, - folders: Folders, - upload_cfg: UploadCfg, - make_data_public: bool, - ) -> Result { - let files_uploader = FilesUploader::new(self.client.clone(), self.wallet_dir.clone()) - .set_upload_cfg(upload_cfg) - .set_make_data_public(make_data_public) - .insert_entries(self.iter_only_files()); - let _summary = files_uploader.start_upload().await?; - - // Let's make the storage payment for Folders - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(self.client.clone(), wallet); - let mut net_addresses = vec![]; - let mut new_folders = 0; - // let's collect list of addresses we need to pay for - folders.iter().for_each(|(_, (folder, folder_change))| { - if folder_change.is_new_folder() { - net_addresses.push(folder.as_net_addr()); - new_folders += 1; - } - net_addresses.extend(folder.meta_addrs_to_pay()); - }); - - let payment_result = wallet_client - .pay_for_storage(net_addresses.into_iter()) - .await?; - match payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - { - Some(cost) => { - let balance = wallet_client.balance(); - println!("Made payment of {cost} for {new_folders} Folders. New balance: {balance}",) - } - None => bail!("Failed to calculate total payment cost"), - } - - // Sync Folders concurrently now that payments have been made. - let mut tasks = JoinSet::new(); - for (path, (mut folder, folder_change)) in folders { - let op = if folder_change.is_new_folder() { - "Creation" - } else { - "Syncing" - }; - - tasks.spawn(async move { - match folder.sync(upload_cfg).await { - Ok(()) => { - println!( - "{op} of Folder (for {path:?}) succeeded. Address: {}", - folder.address().to_hex() - ); - } - Err(err) => { - println!("{op} of Folder (for {path:?}) failed: {err}") - } - } - (path, folder, folder_change) - }); - } - - let mut synced_folders = Folders::new(); - while let Some(res) = tasks.join_next().await { - match res { - Ok((path, folder, c)) => { - synced_folders.insert(path, (folder, c)); - } - Err(err) => { - println!("Failed to sync/create a Folder with/on the network: {err:?}"); - } - } - } - - Ok(synced_folders) - } - - // Download a Folders and their files from the network and generate tracking info - async fn download_folders_and_files( - &self, - mut folders_to_download: Vec<(OsString, FoldersApi, PathBuf)>, - batch_size: usize, - retry_strategy: RetryStrategy, - ) -> Result { - let mut files_to_download = vec![]; - let mut updated_folders = Folders::new(); - while let Some((name, mut folders_api, target_path)) = folders_to_download.pop() { - if updated_folders.contains_key(&target_path) { - // we've already downloaded this Folder - continue; - } - - println!( - "Downloading Folder {name:?} from {}", - folders_api.address().to_hex() - ); - self.download_folder_from_network( - &target_path, - &mut folders_api, - &mut files_to_download, - &mut folders_to_download, - ) - .await?; - updated_folders.insert(target_path, (folders_api, FolderChange::NoChange)); - } - - let files_api: FilesApi = FilesApi::new(self.client.clone(), self.files_dir.clone()); - for (file_name, data_map_chunk, path) in files_to_download { - download_file( - files_api.clone(), - *data_map_chunk.name(), - (file_name, Some(data_map_chunk)), - &path, - false, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(updated_folders) - } - - // Download a Folder from the network and generate tracking info - async fn download_folder_from_network( - &self, - target_path: &Path, - folders_api: &mut FoldersApi, - files_to_download: &mut Vec<(OsString, Chunk, PathBuf)>, - folders_to_download: &mut Vec<(OsString, FoldersApi, PathBuf)>, - ) -> Result<()> { - for (entry_hash, (meta_xorname, metadata)) in folders_api.entries().await?.into_iter() { - let name = metadata.name.clone(); - let item_path = target_path.join(name.clone()); - if let Ok(Some(tracking_info)) = self.get_tracking_info(&item_path) { - if tracking_info.meta_xorname == meta_xorname { - // thus we already have this same file/folder locally - continue; - } - } - - match &metadata.content { - FolderEntry::File(data_map_chunk) => { - files_to_download.push(( - name.clone().into(), - data_map_chunk.clone(), - target_path.to_path_buf(), - )); - let _ = File::create(&item_path)?; - } - FolderEntry::Folder(subfolder_addr) => { - let folders_api = FoldersApi::retrieve( - self.client.clone(), - &self.wallet_dir, - *subfolder_addr, - ) - .await?; - - folders_to_download.push((name.clone().into(), folders_api, item_path.clone())); - create_dir_all(&item_path)?; - } - }; - - self.store_tracking_info(MetadataTrackingInfo { - file_path: item_path, - meta_xorname, - metadata, - entry_hash, - })?; - } - - Ok(()) - } -} - -// Given an absolute path, find the Folder containing such item, and remove it from its entries. -fn remove_from_parent(folders: &mut Folders, path: &Path, entry_hash: EntryHash) -> Result<()> { - if let Some((parent_folder, folder_change)) = path.parent().and_then(|p| folders.get_mut(p)) { - folder_change.has_new_entries(); - parent_folder.remove_item(entry_hash)?; - } - Ok(()) -} - -// Replace a file/folder item from a given Folder (passed in as a container's OccupiedEntry'). -// The metadata chunk of the new item (folder/file) will be encrpyted if a key has been provided. -fn replace_item_in_folder( - folder: &mut OccupiedEntry<'_, PathBuf, (FoldersApi, FolderChange)>, - entry_hash: EntryHash, - file_name: OsString, - data_map: Chunk, - encryption_pk: Option, -) -> Result<(EntryHash, XorName, Metadata)> { - let (ref mut folders_api, ref mut folder_change) = folder.get_mut(); - folder_change.has_new_entries(); - let res = folders_api.replace_file( - entry_hash, - file_name.clone(), - data_map.clone(), - encryption_pk, - )?; - Ok(res) -} - -// Search for a file/folder item in its parent Folder by its name, returning its metadata chunk xorname. -fn find_by_name_in_parent_folder(name: &str, path: &Path, folders: &Folders) -> Option { - path.parent() - .and_then(|parent| folders.get(parent)) - .and_then(|(folder, _)| folder.find_by_name(name)) - .map(|(meta_xorname, _)| *meta_xorname) -} - -// Using the provided root SK, derive client signer SK and the root Folder address from it. -// It returns the Client updated with the derived signing key set, along with the derived Register address. -// TODO: use eip2333 path for deriving keys and address. -fn derive_keys_and_address( - mut client: Client, - root_sk: &MainSecretKey, -) -> (Client, RegisterAddress) { - // Set the client signer SK as a derived key from the root key. This will - // be used for signing operations and also for encrypting metadata chunks. - let signer_sk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key(); - client.set_signer_key(signer_sk); - - // Derive a key from the root key to generate the root Folder xorname, and use - // the client signer's corresponding PK as the owner of it. - let derived_pk = root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key(); - let root_folder_addr = RegisterAddress::new( - XorName::from_content(&derived_pk.to_bytes()), - client.signer_pk(), - ); - - (client, root_folder_addr) -} - -#[cfg(test)] -mod tests { - // All tests require a network running so Clients can be instantiated. - - use crate::acc_packet::{ - derive_keys_and_address, RECOVERY_SEED_FILENAME, SAFE_TRACKING_CHANGES_DIR, - }; - - use super::{ - read_root_folder_addr, read_tracking_info_from_disk, AccountPacket, Metadata, - MetadataTrackingInfo, Mutation, ACC_PACKET_ADDR_DERIVATION_INDEX, - ACC_PACKET_OWNER_DERIVATION_INDEX, - }; - use rand::{thread_rng, Rng}; - use sn_client::{ - protocol::storage::{Chunk, RetryStrategy}, - registers::{EntryHash, RegisterAddress}, - test_utils::{get_funded_wallet, get_new_client, random_file_chunk}, - transfers::MainSecretKey, - FolderEntry, UploadCfg, BATCH_SIZE, - }; - - use bls::SecretKey; - use bytes::Bytes; - use eyre::{bail, eyre, Result}; - use std::{ - collections::{BTreeMap, BTreeSet}, - fs::{create_dir_all, remove_dir_all, remove_file, File, OpenOptions}, - io::{Read, Write}, - path::{Path, PathBuf}, - }; - use xor_name::XorName; - - const SYNC_OPTS: (UploadCfg, bool) = { - let cfg = UploadCfg { - verify_store: true, - batch_size: BATCH_SIZE, - retry_strategy: RetryStrategy::Quick, - show_holders: false, - max_repayments_for_failed_data: 1, - collect_registers: false, - }; - let make_data_public = false; - (cfg, make_data_public) - }; - - #[tokio::test] - async fn test_acc_packet_private_helpers() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let files_path = tmp_dir.path().join("myfiles"); - create_dir_all(&files_path)?; - - let owner_pk = root_sk - .derive_key(&ACC_PACKET_OWNER_DERIVATION_INDEX) - .secret_key() - .public_key(); - let xorname = XorName::from_content( - &root_sk - .derive_key(&ACC_PACKET_ADDR_DERIVATION_INDEX) - .secret_key() - .public_key() - .to_bytes(), - ); - let expected_folder_addr = RegisterAddress::new(xorname, owner_pk); - - let acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - assert_eq!( - derive_keys_and_address(client, &root_sk).1, - expected_folder_addr - ); - assert_eq!(acc_packet.root_folder_addr(), expected_folder_addr); - - let mut test_files = create_test_files_on_disk(&files_path)?; - let mut rng = rand::thread_rng(); - let dummy_metadata = Metadata { - name: "dummy".to_string(), - content: FolderEntry::File(Chunk::new(Bytes::new())), - }; - for (relative_path, _) in test_files.iter() { - let abs_path = files_path.join(relative_path); - - // test helper which calculates relative paths based on root files dir of acc packet - assert!( - matches!(acc_packet.get_relative_path(&abs_path), Ok(p) if &p == relative_path), - "AccountPacket::get_relative_path helper returned invalid path" - ); - - // let's test helper to store tracking info - // use just dummy/invalid metadata and meta-xorname since we won't verify it - let meta_xorname = XorName::random(&mut rng); - acc_packet.store_tracking_info(MetadataTrackingInfo { - file_path: abs_path, - meta_xorname, - metadata: dummy_metadata.clone(), - entry_hash: EntryHash::default(), - })?; - assert!(acc_packet.meta_dir.join(hex::encode(meta_xorname)).exists()); - } - - // let's test helpers to read and remove tracking info - let tracking_info = read_tracking_info_from_disk(&acc_packet.meta_dir)?; - assert_eq!(tracking_info.len(), test_files.len()); - for (abs_path, info) in tracking_info.iter() { - assert!(test_files.remove(abs_path).is_some()); - acc_packet.remove_tracking_info(info.meta_xorname); - assert!(!acc_packet - .meta_dir - .join(hex::encode(info.meta_xorname)) - .exists()); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_from_empty_dir() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacketempty"); - create_dir_all(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - // let's sync up with the network from the original empty account packet - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpacketempty-clone"); - let cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's verify both the original and cloned packets are empty - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, BTreeMap::new())?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_upload_download() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket"); - let expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - - let downloaded_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - check_files_and_dirs_match(&acc_packet, &downloaded_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &downloaded_acc_packet, expected_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_scan_files_and_folders_changes() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-to-scan"); - let mut test_files = create_test_files_on_disk(&files_path)?; - let files_path = files_path.canonicalize()?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify changes detected - assert_eq!(changes.mutations.len(), 4); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir1")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir2")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make some mutations/changes - mutate_test_files_on_disk(&files_path, &mut test_files)?; - - let changes = acc_packet.scan_files_and_folders_for_changes(false)?; - // verify new changes detected - assert_eq!(changes.mutations.len(), 8); - assert!(changes.mutations.iter().all(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == files_path.join("file0.txt")) - || matches!(mutation, Mutation::FileRemoved((p, _)) if p == &files_path.join("dir1").join("file1.txt")) - || matches!(mutation, Mutation::FolderRemoved((p,_)) if p == &files_path.join("dir2")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir3").join("dir3_1")) - || matches!(mutation, Mutation::NewFile(i) if i.file_path == files_path.join("dir3").join("dir3_1").join("file3.txt")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4")) - || matches!(mutation, Mutation::NewFolder(i) if i.file_path == files_path.join("dir4").join("dir4_1")) - }), "at least one of the mutations detected was unexpected/incorrect"); - - Ok(()) - } - - #[ignore = "This test sends out invalid 0 transactions and needs to be fixed"] - #[tokio::test] - async fn test_acc_packet_sync_mutations() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpackettosync"); - let mut expected_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - let clone_files_path = tmp_dir.path().join("myaccpackettosync-clone"); - let mut cloned_acc_packet = AccountPacket::retrieve_folders( - &client, - wallet_dir, - &root_sk, - None, - &clone_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await?; - - // let's make mutations to the clone: - mutate_test_files_on_disk(&clone_files_path, &mut expected_files)?; - - // and finally, sync the clone up with the network - cloned_acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's sync up with the network from the original account packet to merge - // changes made earlier from the cloned version - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's verify both the original and cloned packets contain the same content - check_files_and_dirs_match(&acc_packet, &cloned_acc_packet, expected_files.clone())?; - check_tracking_info_match(&acc_packet, &cloned_acc_packet, expected_files)?; - - Ok(()) - } - - // Acc-packets can be moved to different locations on local disk without affecting their tracking info. - // We disable this test for Windows since in CI the use of std::fs::rename gives a permissions issue. - #[cfg(any(target_os = "linux", target_os = "linux"))] - #[tokio::test] - async fn test_acc_packet_moved_folder() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let src_files_path = tmp_dir.path().join("myaccpacket-to-move"); - let mut test_files = create_test_files_on_disk(&src_files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // let's make just one mutation before moving the dir to another disk location - let new_chunk = random_file_chunk(); - let file2modify = Path::new("dir1").join("file1.txt"); - OpenOptions::new() - .write(true) - .open(src_files_path.join(&file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify, Some(new_chunk)); - - // let's now move it to another disk location - let moved_files_path = tmp_dir.path().join("myaccpacket-moved"); - create_dir_all(&moved_files_path)?; - std::fs::rename(src_files_path, &moved_files_path)?; - let moved_files_path = moved_files_path.canonicalize()?; - - let moved_acc_packet = - AccountPacket::from_path(client.clone(), wallet_dir, &moved_files_path, None)?; - - // verify only one change is detected still after moved to another location on disk - let changes = moved_acc_packet.scan_files_and_folders_for_changes(false)?; - assert_eq!(changes.mutations.len(), 1); - assert_eq!(changes.mutations.first().map(|mutation| { - matches!(mutation, Mutation::FileContentChanged((_,i)) if i.file_path == moved_files_path.join("dir1").join("file1.txt")) - }), Some(true)); - - check_tracking_info_match(&moved_acc_packet, &moved_acc_packet, test_files)?; - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_derived_address() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let files_path = tmp_dir.path().join("myaccpacket-unencrypted-metadata"); - let _ = create_test_files_on_disk(&files_path)?; - - let mut acc_packet = - AccountPacket::init(client.clone(), wallet_dir, &files_path, &root_sk, None)?; - acc_packet.sync(SYNC_OPTS.0, SYNC_OPTS.1).await?; - - // try to download Folder with a different root SK should fail since it - // will derive a different addresse than the one used for creating it - let download_files_path = tmp_dir.path().join("myaccpacket-downloaded"); - let other_root_sk = MainSecretKey::random(); - - if AccountPacket::retrieve_folders( - &client, - wallet_dir, - &other_root_sk, - None, - &download_files_path, - BATCH_SIZE, - RetryStrategy::Quick, - ) - .await - .is_ok() - { - bail!("acc-packet retrieval succeeded unexpectedly"); - } - - Ok(()) - } - - #[tokio::test] - async fn test_acc_packet_recovery_seed_encryption() -> Result<()> { - let client = get_new_client(SecretKey::random()).await?; - let root_sk = MainSecretKey::random(); - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - - // let's first test with unencrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_unencrypted_seed"); - create_dir_all(&src_files_path)?; - let _ = AccountPacket::init(client.clone(), wallet_dir, &src_files_path, &root_sk, None)?; - let _ = AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None)?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert_eq!(bytes, root_sk.to_bytes()); - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(b"123456789"), - ) - .is_ok() - { - bail!("acc-packet loading with a password succeeded unexpectedly"); - } - - // let's now test with encrypted recovery seed - let src_files_path = tmp_dir.path().join("myaccpacket_encrypted_seed"); - create_dir_all(&src_files_path)?; - let mut rng = thread_rng(); - let password: [u8; 32] = rng.gen(); - let incorrect_password: [u8; 32] = rng.gen(); - - let _ = AccountPacket::init( - client.clone(), - wallet_dir, - &src_files_path, - &root_sk, - Some(&password), - )?; - - if AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, None).is_ok() { - bail!("acc-packet loading without a password succeeded unexpectedly"); - } - - if AccountPacket::from_path( - client.clone(), - wallet_dir, - &src_files_path, - Some(&incorrect_password), - ) - .is_ok() - { - bail!("acc-packet loading with incorrect password succeeded unexpectedly"); - } - - let _ = - AccountPacket::from_path(client.clone(), wallet_dir, &src_files_path, Some(&password))?; - - let bytes = std::fs::read( - src_files_path - .join(SAFE_TRACKING_CHANGES_DIR) - .join(RECOVERY_SEED_FILENAME), - )?; - assert!(!bytes.is_empty()); - assert_ne!(bytes, root_sk.to_bytes()); - - Ok(()) - } - - // Helpers functions to generate and verify test data - - // Create a hard-coded set of test files and dirs on disk - fn create_test_files_on_disk(base_path: &Path) -> Result>> { - // let's create a hierarchy with dirs and files with random content - let mut files = BTreeMap::new(); - files.insert( - Path::new("file0.txt").to_path_buf(), - Some(random_file_chunk()), - ); - files.insert( - Path::new("dir1").join("file1.txt"), - Some(random_file_chunk()), - ); - files.insert(Path::new("dir2").to_path_buf(), None); - - for (path, chunk) in files.iter() { - let full_path = base_path.join(path); - if let Some(chunk) = chunk { - // it's a file, thus we create it and store its chunk bytes - create_dir_all(full_path.parent().expect("invalid path for test file"))?; - let mut file = File::create(full_path)?; - file.write_all(chunk.value())?; - } else { - // it's a dir, and it shall be empty - create_dir_all(full_path)?; - } - } - Ok(files) - } - - // Apply a hard-coded set of mutations to test files and dirs on disk - fn mutate_test_files_on_disk( - path: &Path, - test_files: &mut BTreeMap>, - ) -> Result<()> { - // - modify the content of a file - let new_chunk = random_file_chunk(); - let file2modify = Path::new("file0.txt"); - OpenOptions::new() - .write(true) - .open(path.join(file2modify))? - .write_all(new_chunk.value())?; - test_files.insert(file2modify.to_path_buf(), Some(new_chunk)); - // - remove one of the files - let file2remove = Path::new("dir1").join("file1.txt"); - remove_file(path.join(&file2remove))?; - test_files.remove(&file2remove); - // we need to keep the empty dir within the list of expected files though - test_files.insert(Path::new("dir1").to_path_buf(), None); - // - remove one of the dirs - let dir2remove = Path::new("dir2"); - remove_dir_all(path.join(dir2remove))?; - test_files.remove(dir2remove); - // - create new file within subdirs - create_dir_all(path.join("dir3").join("dir3_1"))?; - let file2create = Path::new("dir3").join("dir3_1").join("file3.txt"); - let mut file = File::create(path.join(&file2create))?; - let new_chunk = random_file_chunk(); - file.write_all(new_chunk.value())?; - test_files.insert(file2create, Some(new_chunk)); - // - create new subdirs - let dir2create = Path::new("dir4").join("dir4_1"); - create_dir_all(path.join(&dir2create))?; - test_files.insert(dir2create.to_path_buf(), None); - - Ok(()) - } - - // Helper to check if a dir is empty - fn is_empty_dir(path: &Path) -> bool { - path.read_dir() - .map(|mut i| i.next().is_none()) - .unwrap_or(false) - } - - // Collect list of files and empty dirs, to be used for comparing in tests - fn list_of_files_and_empty_dirs(acc_packet: &AccountPacket) -> BTreeSet { - acc_packet - .iter_only_files() - .chain(acc_packet.iter_only_dirs()) - .flat_map(|file_entry| { - let path = file_entry.path(); - if path.is_dir() && !is_empty_dir(path) { - bail!("we skip non empty dirs"); - } - - acc_packet.get_relative_path(path) - }) - .collect() - } - - // Check both acc packets kept the same set of tracking information locally - fn check_tracking_info_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - let root_addr = src_packet.root_folder_addr(); - assert_eq!( - read_root_folder_addr(&src_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in source directory tracking info." - ); - assert_eq!( - read_root_folder_addr(&target_packet.meta_dir)?, - (root_addr, true), - "Root folder address doesn't match in target directory tracking info." - ); - - let src_tracking_info = read_tracking_info_from_disk(&src_packet.meta_dir)?; - let mut target_tracking_info = read_tracking_info_from_disk(&target_packet.meta_dir)?; - - for (path, src_tracking_info) in src_tracking_info { - match target_tracking_info.remove(&path) { - None => { - bail!("Tracking info found in source is missing in target directory for file/dir: {path:?}") - } - Some(info) => { - if info != src_tracking_info { - bail!("Different tracking info kept in source and target for file/dir: {path:?}"); - } - } - } - - let abs_path = src_packet.files_dir.join(&path); - if abs_path.is_dir() { - assert_eq!(src_tracking_info.file_path, path, - "Incorrect path in tracking info found in source and target directories for dir: {path:?}"); - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Incorrect tracking info found in source and target directories for dir: {path:?}"); - // if it's an empty dir we shall find it in the list of expected files - if is_empty_dir(&abs_path) { - let _ = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for dir: {path:?}" - ) - })?; - } - } else { - let chunk = expected_files.remove(&path).ok_or_else(|| { - eyre!( - "Unexpected tracking info found on source and target directories for file: {path:?}" - ) - })?; - - if chunk.is_some() { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::File(_)), - "Tracking info found in source and target directories don't match the file: {path:?}"); - } else { - assert!(matches!(src_tracking_info.metadata.content, FolderEntry::Folder(_)), - "Tracking info found in source and target directories don't match the dir: {path:?}"); - } - } - } - - if !target_tracking_info.is_empty() { - bail!("Tracking info found in target directory but missing in source directory: {target_tracking_info:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s are lacking their tracking info in source or target directories: {expected_files:?}"); - } - - Ok(()) - } - - // Check both dirs have the same set of files and folders and no more - fn check_files_and_dirs_match( - src_packet: &AccountPacket, - target_packet: &AccountPacket, - mut expected_files: BTreeMap>, - ) -> Result<()> { - // let's collect all paths in target acc packet, i.e. files and empty dirs paths - let mut target_packet_files: BTreeSet = - list_of_files_and_empty_dirs(target_packet); - - // let's now compare those paths in target acc packet with those in source acc packet - for relative_path in list_of_files_and_empty_dirs(src_packet) { - if !target_packet_files.remove(&relative_path) { - bail!("File/dir found in source is missing in target directory: {relative_path:?}"); - } - - let src_path = src_packet.files_dir.join(&relative_path); - let target_path = target_packet.files_dir.join(&relative_path); - - let chunk = expected_files.remove(&relative_path).ok_or_else(|| { - eyre!("Unexpected file/dir found on source and target directories: {src_path:?}") - })?; - - if let Some(chunk) = chunk { - // it's a file, let's compare their content - let mut src_file = File::open(&src_path) - .map_err(|err| eyre!("couldn't open source file {src_path:?}: {err:?}"))?; - let mut target_file = File::open(&target_path) - .map_err(|err| eyre!("couldn't open target file {target_path:?}: {err:?}"))?; - - let mut src_content = Vec::new(); - src_file - .read_to_end(&mut src_content) - .expect("couldn't read source file"); - let mut target_content = Vec::new(); - target_file - .read_to_end(&mut target_content) - .expect("couldn't read target file"); - - assert_eq!( - src_content, - chunk.value().slice(..), - "source file content doesn't match with expected" - ); - assert_eq!( - target_content, - chunk.value().slice(..), - "target file content doesn't match with expected" - ); - } else { - // it's a dir, let's check they exist as dirs - assert!(src_path.is_dir(), "source path is not a dir {src_path:?}"); - assert!( - target_path.is_dir(), - "target path is not a dir {target_path:?}" - ); - } - } - - if !target_packet_files.is_empty() { - bail!("File/dir/s found in target directory but missing in source directory: {target_packet_files:?}"); - } - if !expected_files.is_empty() { - bail!("Some expected file/dir/s were not found in source or target directories: {expected_files:?}"); - } - - Ok(()) - } -} diff --git a/sn_cli/src/acc_packet/change_tracking.rs b/sn_cli/src/acc_packet/change_tracking.rs deleted file mode 100644 index a2eba85270..0000000000 --- a/sn_cli/src/acc_packet/change_tracking.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - protocol::storage::RegisterAddress, registers::EntryHash, transfers::MainSecretKey, FoldersApi, - Metadata, -}; - -use aes::Aes256; -use block_modes::{block_padding::Pkcs7, BlockMode, Cbc}; -use bls::{SecretKey, SK_SIZE}; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use std::{ - collections::BTreeMap, - fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use walkdir::WalkDir; -use xor_name::XorName; - -// AES used to encrypt/decrypt the cached recovery seed. -type Aes256Cbc = Cbc; - -// AES Initialisation Vector length used. -const IV_LENGTH: usize = 16; - -// Length of buffers used for AES encryption/decryption. -const AES_BUFFER_LENGTH: usize = 48; - -// Name of hidden folder where tracking information and metadata is locally stored. -pub(super) const SAFE_TRACKING_CHANGES_DIR: &str = ".safe"; - -// Subfolder where files metadata will be cached -pub(super) const METADATA_CACHE_DIR: &str = "metadata"; - -// Name of the file where metadata about root folder is locally cached. -pub(super) const ROOT_FOLDER_METADATA_FILENAME: &str = "root_folder.addr"; - -// Name of the file where the recovery secret/seed is locally cached. -pub(crate) const RECOVERY_SEED_FILENAME: &str = "recovery_seed"; - -// Container to keep track in memory what changes are detected in local Folders hierarchy and files. -pub(super) type Folders = BTreeMap; - -// Type of local changes detected to a Folder -#[derive(Clone, Debug, PartialEq)] -pub(super) enum FolderChange { - NoChange, - NewFolder, - NewEntries, -} - -impl FolderChange { - /// Returns true if it's currently set to NewFolder. - pub fn is_new_folder(&self) -> bool { - self == &Self::NewFolder - } - - /// If it's currently set to NoChange then switch it to NewEntries. - /// Otherwise we don't need to change it as the entire Folder will need to be uploaded. - pub fn has_new_entries(&mut self) { - if self == &Self::NoChange { - *self = Self::NewEntries; - } - } -} - -// Changes detected locally which eventually can be applied and upload to network. -#[derive(Default)] -pub(super) struct ChangesToApply { - pub folders: Folders, - pub mutations: Vec, -} - -// Type of mutation detected locally. -#[derive(Debug)] -pub(super) enum Mutation { - NewFile(MetadataTrackingInfo), - FileRemoved((PathBuf, XorName)), - FileContentChanged((XorName, MetadataTrackingInfo)), - NewFolder(MetadataTrackingInfo), - FolderRemoved((PathBuf, XorName)), -} - -impl fmt::Display for Mutation { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::NewFile(tracking_info) => { - write!(f, "New file: {:?}", tracking_info.file_path) - } - Self::FileRemoved((path, _)) => write!(f, "File removed: {path:?}"), - Self::FileContentChanged((_, tracking_info)) => { - write!(f, "File content changed: {:?}", tracking_info.file_path) - } - Self::NewFolder(tracking_info) => { - write!(f, "New folder: {:?}", tracking_info.file_path) - } - Self::FolderRemoved((path, _)) => write!(f, "Folder removed: {path:?}"), - } - } -} - -// Information stored locally to keep track of local changes to files/folders. -// TODO: to make file changes discovery more efficient, and prevent chunking for -// such purposes, add more info like file size and last modified timestamp. -#[derive(Debug, Serialize, Deserialize, PartialEq)] -pub(super) struct MetadataTrackingInfo { - pub file_path: PathBuf, - pub meta_xorname: XorName, - pub metadata: Metadata, - pub entry_hash: EntryHash, -} - -// Build absolute paths for the different dirs to be used for locally tracking changes -pub(super) fn build_tracking_info_paths(path: &Path) -> Result<(PathBuf, PathBuf, PathBuf)> { - let files_dir = path.to_path_buf().canonicalize()?; - let tracking_info_dir = files_dir.join(SAFE_TRACKING_CHANGES_DIR); - let meta_dir = tracking_info_dir.join(METADATA_CACHE_DIR); - create_dir_all(&meta_dir) - .map_err(|err| eyre!("The path provided needs to be a directory: {err}"))?; - - Ok((files_dir, tracking_info_dir, meta_dir)) -} - -pub(super) fn read_tracking_info_from_disk( - meta_dir: &Path, -) -> Result> { - let mut curr_tracking_info = BTreeMap::new(); - for entry in WalkDir::new(meta_dir) - .into_iter() - .flatten() - .filter(|e| e.file_type().is_file() && e.file_name() != ROOT_FOLDER_METADATA_FILENAME) - { - let path = entry.path(); - let bytes = std::fs::read(path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err}"))?; - let tracking_info: MetadataTrackingInfo = rmp_serde::from_slice(&bytes) - .map_err(|err| eyre!("Error while deserializing tracking info from {path:?}: {err}"))?; - - curr_tracking_info.insert(tracking_info.file_path.clone(), tracking_info); - } - - Ok(curr_tracking_info) -} - -// Store tracking info about the root folder in a file to keep track of any changes made -pub(super) fn store_root_folder_tracking_info( - meta_dir: &Path, - root_folder_addr: RegisterAddress, - created: bool, -) -> Result<()> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let mut meta_file = File::create(path)?; - meta_file.write_all(&rmp_serde::to_vec(&(root_folder_addr, created))?)?; - - Ok(()) -} - -// Store the given root seed/SK on disk, (optionally) encrypted with a password -pub(super) fn store_root_sk( - dir: &Path, - root_sk: &MainSecretKey, - password: Option<&[u8]>, -) -> Result<()> { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut secret_file = File::create(path)?; - let seed_bytes = root_sk.to_bytes(); - - if let Some(pwd) = password { - // encrypt the SK with the (hashed) password - let key = encryption_key_from_hashed_password(pwd); - - let pos = seed_bytes.len(); - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..pos].copy_from_slice(&seed_bytes); - - // IV is randomly chosen and prefixed it to cipher - let mut rng = rand::thread_rng(); - let random_iv: [u8; IV_LENGTH] = rng.gen(); - let mut iv_with_cipher = vec![]; - iv_with_cipher.extend(random_iv); - - let cipher = Aes256Cbc::new_from_slices(&key, &random_iv)?; - let ciphertext = cipher.encrypt(&mut buffer, pos)?; - iv_with_cipher.extend(ciphertext); - - secret_file.write_all(&iv_with_cipher)?; - } else { - secret_file.write_all(&seed_bytes)?; - } - - Ok(()) -} - -// Read the root seed/SK from disk, (optionally) decrypting it with a password -pub(super) fn read_root_sk(dir: &Path, password: Option<&[u8]>) -> Result { - let path = dir.join(RECOVERY_SEED_FILENAME); - let mut bytes = std::fs::read(&path).map_err(|err| { - eyre!("Error while reading the recovery seed/secret from {path:?}: {err:?}") - })?; - - if let Some(pwd) = password { - // decrypt the SK with the (hashed) password - if bytes.len() < IV_LENGTH + AES_BUFFER_LENGTH { - bail!( - "Not enough bytes found on disk ({}) to decrypt the recovery seed", - bytes.len() - ); - } - - // the IV is prefixed - let mut iv = [0u8; IV_LENGTH]; - iv[..IV_LENGTH].copy_from_slice(&bytes[..IV_LENGTH]); - - let mut buffer = [0u8; AES_BUFFER_LENGTH]; - buffer[..48].copy_from_slice(&bytes[IV_LENGTH..]); - - let key = encryption_key_from_hashed_password(pwd); - let cipher = Aes256Cbc::new_from_slices(&key, &iv)?; - bytes = cipher - .decrypt_vec(&buffer) - .map_err(|_| eyre!("Failed to decrypt the recovery seed with the provided password"))?; - } - - if bytes.len() != SK_SIZE { - bail!( - "The length of bytes read from disk ({}) doesn't match a recovery seed's length ({SK_SIZE})", bytes.len() - ); - } - let mut seed_bytes = [0u8; SK_SIZE]; - seed_bytes[..SK_SIZE].copy_from_slice(&bytes); - let sk = MainSecretKey::new(SecretKey::from_bytes(seed_bytes)?); - - Ok(sk) -} - -fn encryption_key_from_hashed_password(password: &[u8]) -> [u8; 32] { - let mut key = [0; 32]; - let mut hasher = Sha3::v256(); - hasher.update(password); - hasher.finalize(&mut key); - key -} - -// Read the tracking info about the root folder -pub(super) fn read_root_folder_addr(meta_dir: &Path) -> Result<(RegisterAddress, bool)> { - let path = meta_dir.join(ROOT_FOLDER_METADATA_FILENAME); - let bytes = std::fs::read(&path) - .map_err(|err| eyre!("Error while reading the tracking info from {path:?}: {err:?}"))?; - - Ok(rmp_serde::from_slice(&bytes)?) -} diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs deleted file mode 100644 index 2fa931f217..0000000000 --- a/sn_cli/src/bin/main.rs +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[macro_use] -extern crate tracing; - -mod subcommands; - -use subcommands::{ - files::files_cmds, - folders::folders_cmds, - register::register_cmds, - wallet::{ - hot_wallet::{wallet_cmds, wallet_cmds_without_client, WalletCmds}, - wo_wallet::{wo_wallet_cmds, wo_wallet_cmds_without_client, WatchOnlyWalletCmds}, - }, - Opt, SubCmd, -}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::Result; -use indicatif::ProgressBar; -use sn_client::transfers::bls_secret_from_hex; -use sn_client::{Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}; -#[cfg(feature = "metrics")] -use sn_logging::{metrics::init_metrics, Level, LogBuilder, LogFormat}; -use sn_protocol::version::IDENTIFY_PROTOCOL_STR; -use std::{io, path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; - -const CLIENT_KEY: &str = "clientkey"; - -#[tokio::main] -async fn main() -> Result<()> { - color_eyre::install()?; - let opt = Opt::parse(); - - if opt.version { - println!( - "{}", - sn_build_info::version_string( - "Autonomi CLI", - env!("CARGO_PKG_VERSION"), - Some(&IDENTIFY_PROTOCOL_STR) - ) - ); - return Ok(()); - } - - if opt.crate_version { - println!("{}", env!("CARGO_PKG_VERSION")); - return Ok(()); - } - - if opt.protocol_version { - println!("{}", *IDENTIFY_PROTOCOL_STR); - return Ok(()); - } - - #[cfg(not(feature = "nightly"))] - if opt.package_version { - println!("{}", sn_build_info::package_version()); - return Ok(()); - } - - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("sn_networking".to_string(), Level::INFO), - ("safe".to_string(), Level::TRACE), - ("sn_build_info".to_string(), Level::TRACE), - ("autonomi".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - log_builder.format(opt.log_format.unwrap_or(LogFormat::Default)); - let _log_handles = log_builder.initialize()?; - - #[cfg(feature = "metrics")] - tokio::spawn(init_metrics(std::process::id())); - - // Log the full command that was run - info!("\"{}\"", std::env::args().collect::>().join(" ")); - - debug!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - println!( - "safe client built with git version: {}", - sn_build_info::git_info() - ); - - let client_data_dir_path = get_client_data_dir_path()?; - // Perform actions that do not require us connecting to the network and return early - if let Some(SubCmd::Wallet(cmds)) = &opt.cmd { - if let WalletCmds::Address { .. } - | WalletCmds::Balance { .. } - | WalletCmds::Create { .. } - | WalletCmds::Sign { .. } - | WalletCmds::Status { .. } - | WalletCmds::Encrypt { .. } = cmds - { - wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - if let Some(SubCmd::WatchOnlyWallet(cmds)) = &opt.cmd { - if let WatchOnlyWalletCmds::Addresses - | WatchOnlyWalletCmds::Balance { .. } - | WatchOnlyWalletCmds::Deposit { .. } - | WatchOnlyWalletCmds::Create { .. } - | WatchOnlyWalletCmds::Transaction { .. } = cmds - { - wo_wallet_cmds_without_client(cmds, &client_data_dir_path).await?; - return Ok(()); - } - } - - println!("Instantiating a SAFE client..."); - let secret_key = get_client_secret_key(&client_data_dir_path)?; - - let bootstrap_peers = opt.peers.get_peers().await?; - - println!( - "Connecting to the network with {} peers", - bootstrap_peers.len(), - ); - - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - // get the broadcaster as we want to have our own progress bar. - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, progress_bar_handler) = - spawn_connection_progress_bar(broadcaster.subscribe()); - - let result = Client::new( - secret_key, - bootstrap_peers, - opt.connection_timeout, - Some(broadcaster), - ) - .await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - return Err(err.into()); - } - }; - progress_bar_handler.await?; - - let should_verify_store = !opt.no_verify; - - // PowerShell seems having issue to showing the unwrapped error - // Hence capture the result and print it out explicity. - let result = match opt.cmd { - Some(SubCmd::Wallet(cmds)) => { - wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::WatchOnlyWallet(cmds)) => { - wo_wallet_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Files(cmds)) => { - files_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Folders(cmds)) => { - folders_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - Some(SubCmd::Register(cmds)) => { - register_cmds(cmds, &client, &client_data_dir_path, should_verify_store).await - } - None => { - println!("Use --help to see available commands"); - return Ok(()); - } - }; - println!("Completed with {result:?}"); - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -fn get_client_secret_key(root_dir: &PathBuf) -> Result { - // create the root directory if it doesn't exist - std::fs::create_dir_all(root_dir)?; - let key_path = root_dir.join(CLIENT_KEY); - let secret_key = if key_path.is_file() { - info!("Client key found. Loading from file..."); - let secret_hex_bytes = std::fs::read(key_path)?; - bls_secret_from_hex(secret_hex_bytes)? - } else { - info!("No key found. Generating a new client key..."); - let secret_key = SecretKey::random(); - std::fs::write(key_path, hex::encode(secret_key.to_bytes()))?; - secret_key - }; - Ok(secret_key) -} - -fn get_client_data_dir_path() -> Result { - let mut home_dirs = dirs_next::data_dir().expect("Data directory is obtainable"); - home_dirs.push("safe"); - home_dirs.push("client"); - std::fs::create_dir_all(home_dirs.as_path())?; - Ok(home_dirs) -} - -fn get_stdin_response(prompt: &str) -> String { - println!("{prompt}"); - let mut buffer = String::new(); - let stdin = io::stdin(); - if stdin.read_line(&mut buffer).is_err() { - // consider if error should process::exit(1) here - return "".to_string(); - }; - // Remove leading and trailing whitespace - buffer.trim().to_owned() -} - -fn get_stdin_password_response(prompt: &str) -> String { - rpassword::prompt_password(prompt) - .map(|v| v.trim().to_owned()) - .unwrap_or("".to_string()) -} - -#[cfg(test)] -mod tests { - use crate::subcommands::wallet::hot_wallet::{wallet_cmds_without_client, WalletCmds}; - use crate::subcommands::wallet::WalletApiHelper; - use bls::SecretKey; - use color_eyre::Result; - use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; - use sn_client::transfers::HotWallet; - use std::path::Path; - - fn create_wallet(root_dir: &Path, derivation_passphrase: Option) -> Result { - let mnemonic = load_or_create_mnemonic(root_dir)?; - let secret_key = secret_key_from_mnemonic(mnemonic, derivation_passphrase)?; - let wallet = HotWallet::create_from_key(root_dir, secret_key, None)?; - Ok(wallet) - } - - #[tokio::test] - async fn test_wallet_address_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - // Create wallet - let _wallet = create_wallet(&root_dir, None).expect("Could not create wallet"); - - let cmds = WalletCmds::Address; - - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_address_command_should_fail_with_no_existing_wallet() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let client_data_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Address; - - // Runs command without a wallet being present, thus should fail - let result = wallet_cmds_without_client(&cmds, &client_data_dir).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_wallet_create_command() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: None, - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_wallet_create_command_with_hex_key() { - let tmp_dir = tempfile::tempdir().expect("Could not create temp dir"); - let root_dir = tmp_dir.path().to_path_buf(); - - let secret_key = SecretKey::random(); - let secret_key_hex = secret_key.to_hex(); - - let cmds = WalletCmds::Create { - no_replace: false, - no_password: true, - key: Some(secret_key_hex), - derivation_passphrase: None, - password: None, - }; - - // Run command and hopefully create a wallet - let result = wallet_cmds_without_client(&cmds, &root_dir).await; - assert!(result.is_ok()); - - // Check if valid wallet exists - let result = WalletApiHelper::load_from(&root_dir); - assert!(result.is_ok()); - - if let WalletApiHelper::HotWallet(wallet) = result.expect("No valid wallet found") { - // Compare public addresses (secret keys are the same if the public addresses are) - assert_eq!(wallet.address().to_hex(), secret_key.public_key().to_hex()); - } else { - panic!("Did not expect a watch only wallet"); - } - } -} diff --git a/sn_cli/src/bin/subcommands/files.rs b/sn_cli/src/bin/subcommands/files.rs deleted file mode 100644 index 2bc3a26fed..0000000000 --- a/sn_cli/src/bin/subcommands/files.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Help, Result, -}; -use sn_cli::{ - download_file, download_files, ChunkManager, Estimator, FilesUploader, UploadedFile, - UPLOADED_FILES, -}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - UploadCfg, -}; -use sn_client::{Client, FilesApi, BATCH_SIZE}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, -}; -use walkdir::WalkDir; -use xor_name::XorName; - -#[derive(Parser, Debug)] -pub enum FilesCmds { - Estimate { - /// The location of the file(s) to upload. Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - path: PathBuf, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - }, - Upload { - /// The location of the file(s) to upload. - /// - /// Can be a file or a directory. - #[clap(name = "path", value_name = "PATH")] - file_path: PathBuf, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the file be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - Download { - /// The name to apply to the downloaded file. - /// - /// If the name argument is used, the address argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "name")] - file_name: Option, - /// The hex address of a file. - /// - /// If the address argument is used, the name argument must also be supplied. - /// - /// If neither are, all the files uploaded by the current user will be downloaded again. - #[clap(name = "address")] - file_addr: Option, - /// Flagging whether to show the holders of the uploaded chunks. - /// Default to be not showing. - #[clap(long, name = "show_holders", default_value = "false")] - show_holders: bool, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn files_cmds( - cmds: FilesCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FilesCmds::Estimate { - path, - make_data_public, - } => { - let files_api = FilesApi::build(client.clone(), root_dir.to_path_buf())?; - let chunk_manager = ChunkManager::new(root_dir); - Estimator::new(chunk_manager, files_api) - .estimate_cost(path, make_data_public, root_dir) - .await? - } - FilesCmds::Upload { - file_path, - batch_size, - retry_strategy, - make_data_public, - } => { - let files_count = count_files_in_path_recursively(&file_path); - - if files_count == 0 { - if file_path.is_dir() { - bail!( - "The directory specified for upload is empty. \ - Please verify the provided path." - ); - } else { - bail!("The provided file path is invalid. Please verify the path."); - } - } - let upload_cfg = UploadCfg { - batch_size, - verify_store, - retry_strategy, - ..Default::default() - }; - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(make_data_public) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let _summary = files_uploader.start_upload().await?; - } - FilesCmds::Download { - file_name, - file_addr, - show_holders, - batch_size, - retry_strategy, - } => { - if (file_name.is_some() && file_addr.is_none()) - || (file_addr.is_some() && file_name.is_none()) - { - return Err( - eyre!("Both the name and address must be supplied if either are used") - .suggestion( - "Please run the command again in the form 'files download
'", - ), - ); - } - - let mut download_dir = root_dir.to_path_buf(); - let mut download_file_name = file_name.clone(); - if let Some(file_name) = file_name { - // file_name may direct the downloaded data to: - // - // the current directory (just a filename) - // eg safe files download myfile.txt ADDRESS - // - // a directory relative to the current directory (relative filename) - // eg safe files download my/relative/path/myfile.txt ADDRESS - // - // a directory relative to root of the filesystem (absolute filename) - // eg safe files download /home/me/mydir/myfile.txt ADDRESS - let file_name_path = Path::new(&file_name); - if file_name_path.is_dir() { - return Err(eyre!("Cannot download file to path: {:?}", file_name)); - } - let file_name_dir = file_name_path.parent(); - if file_name_dir.is_none() { - // just a filename, use the current_dir - download_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - } else if file_name_path.is_relative() { - // relative to the current directory. Make the relative path - // into an absolute path by joining it to current_dir - if let Some(relative_dir) = file_name_dir { - let current_dir = std::env::current_dir().unwrap_or(root_dir.to_path_buf()); - download_dir = current_dir.join(relative_dir); - if !download_dir.exists() { - return Err(eyre!("Directory does not exist: {:?}", download_dir)); - } - if let Some(path_file_name) = file_name_path.file_name() { - download_file_name = Some(OsString::from(path_file_name)); - } - } - } else { - // absolute dir - download_dir = file_name_dir.unwrap_or(root_dir).to_path_buf(); - } - } - let files_api: FilesApi = FilesApi::new(client.clone(), download_dir.clone()); - - match (download_file_name, file_addr) { - (Some(download_file_name), Some(address_provided)) => { - let bytes = - hex::decode(&address_provided).expect("Input address is not a hex string"); - let xor_name_provided = XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - // try to read the data_map if it exists locally. - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let expected_data_map_location = uploaded_files_path.join(address_provided); - let local_data_map = { - if expected_data_map_location.exists() { - let uploaded_file_metadata = - UploadedFile::read(&expected_data_map_location)?; - - uploaded_file_metadata.data_map.map(|bytes| Chunk { - address: ChunkAddress::new(xor_name_provided), - value: bytes, - }) - } else { - None - } - }; - - download_file( - files_api, - xor_name_provided, - (download_file_name, local_data_map), - &download_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await - } - _ => { - println!("Attempting to download all files uploaded by the current user..."); - download_files( - &files_api, - root_dir, - show_holders, - batch_size, - retry_strategy, - ) - .await? - } - } - } - } - Ok(()) -} - -fn count_files_in_path_recursively(file_path: &PathBuf) -> u32 { - let entries_iterator = WalkDir::new(file_path).into_iter().flatten(); - let mut count = 0; - - entries_iterator.for_each(|entry| { - if entry.file_type().is_file() { - count += 1; - } - }); - count -} diff --git a/sn_cli/src/bin/subcommands/folders.rs b/sn_cli/src/bin/subcommands/folders.rs deleted file mode 100644 index 705b746459..0000000000 --- a/sn_cli/src/bin/subcommands/folders.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_cli::AccountPacket; - -use sn_client::{ - protocol::storage::RetryStrategy, transfers::MainSecretKey, Client, UploadCfg, BATCH_SIZE, -}; - -use bls::{SecretKey, SK_SIZE}; -use clap::Parser; -use color_eyre::{eyre::bail, Result}; -use dialoguer::Password; -use std::{ - env::current_dir, - path::{Path, PathBuf}, -}; - -#[derive(Parser, Debug)] -pub enum FoldersCmds { - Init { - /// The directory to initialise as a root folder, which can then be stored on the network (and kept in sync with). - /// By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - }, - Download { - /// The full local path where to download the folder. By default the current path is assumed, - /// and the main Folder's network address will be used as the folder name. - #[clap(name = "target folder path")] - path: Option, - /// The hex-encoded recovery secret key for deriving addresses, encryption and signing keys, to be used by this account packet. - #[clap(name = "recovery key")] - root_sk: Option, - /// The batch_size for parallel downloading - #[clap(long, default_value_t = BATCH_SIZE , short='b')] - batch_size: usize, - /// Set the strategy to use on downloads failure. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Quick, short = 'r', help = "Sets the retry strategy on download failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, - /// Report any changes made to local version of files/folders (this doesn't compare it with their versions stored on the network). - Status { - /// Path to check changes made on. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - }, - /// Sync up local files/folders changes with their versions stored on the network. - Sync { - /// Path to sync with its remote version on the network. By default the current path is assumed. - #[clap(name = "path", value_name = "PATH")] - path: Option, - /// The batch_size to split chunks into parallel handling batches - /// during payment and upload processing. - #[clap(long, default_value_t = BATCH_SIZE, short='b')] - batch_size: usize, - /// Should the files be made accessible to all. (This is irreversible) - #[clap(long, name = "make_public", default_value = "false", short = 'p')] - make_data_public: bool, - /// Set the strategy to use on chunk upload failure. Does not modify the spend failure retry attempts yet. - /// - /// Choose a retry strategy based on effort level, from 'quick' (least effort), through 'balanced', - /// to 'persistent' (most effort). - #[clap(long, default_value_t = RetryStrategy::Balanced, short = 'r', help = "Sets the retry strategy on upload failure. Options: 'quick' for minimal effort, 'balanced' for moderate effort, or 'persistent' for maximum effort.")] - retry_strategy: RetryStrategy, - }, -} - -pub(crate) async fn folders_cmds( - cmds: FoldersCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - FoldersCmds::Init { path, root_sk } => { - let path = get_path(path, None)?; - // initialise path as a fresh new Folder with a network address derived from the root SK - let root_sk = get_recovery_secret_sk(root_sk, true)?; - let acc_packet = AccountPacket::init(client.clone(), root_dir, &path, &root_sk, None)?; - println!("Directory at {path:?} initialised as a root Folder, ready to track and sync changes with the network at address: {}", acc_packet.root_folder_addr().to_hex()) - } - FoldersCmds::Download { - path, - root_sk, - batch_size, - retry_strategy, - } => { - let root_sk = get_recovery_secret_sk(root_sk, false)?; - let root_sk_hex = root_sk.main_pubkey().to_hex(); - let download_folder_name = format!( - "folder_{}_{}", - &root_sk_hex[..6], - &root_sk_hex[root_sk_hex.len() - 6..] - ); - let download_folder_path = get_path(path, Some(&download_folder_name))?; - println!("Downloading onto {download_folder_path:?}, with batch-size {batch_size}"); - debug!("Downloading onto {download_folder_path:?}"); - - let _ = AccountPacket::retrieve_folders( - client, - root_dir, - &root_sk, - None, - &download_folder_path, - batch_size, - retry_strategy, - ) - .await?; - } - FoldersCmds::Status { path } => { - let path = get_path(path, None)?; - let acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - acc_packet.status()?; - } - FoldersCmds::Sync { - path, - batch_size, - make_data_public, - retry_strategy, - } => { - let path = get_path(path, None)?; - let mut acc_packet = AccountPacket::from_path(client.clone(), root_dir, &path, None)?; - - let options = UploadCfg { - verify_store, - batch_size, - retry_strategy, - ..Default::default() - }; - acc_packet.sync(options, make_data_public).await?; - } - } - Ok(()) -} - -// Unwrap provided path, or return the current path if none was provided. -// It can optionally be provided a string to adjoin when the current dir is returned. -fn get_path(path: Option, to_join: Option<&str>) -> Result { - let path = if let Some(path) = path { - path - } else { - let current_dir = current_dir()?; - to_join.map_or_else(|| current_dir.clone(), |str| current_dir.join(str)) - }; - Ok(path) -} - -// Either get a hex-encoded SK entered by the user, or generate a new one -// TODO: get/generate a mnemonic instead -fn get_recovery_secret_sk( - root_sk: Option, - gen_new_recovery_secret: bool, -) -> Result { - let result = if let Some(str) = root_sk { - SecretKey::from_hex(&str) - } else { - let prompt_msg = if gen_new_recovery_secret { - println!( - "\n\nA recovery secret is required to derive signing/encryption keys, and network addresses, \ - used by an Account Packet." - ); - println!( - "The recovery secret used to initialise an Account Packet, can be used to retrieve and restore \ - a new replica/clone from the network, onto any local path and even onto another device.\n" - ); - - "Please enter your recovery secret for this new Account Packet,\nif you don't have one, \ - press [Enter] to generate one" - } else { - "Please enter your recovery secret" - }; - - let err_msg = format!("Hex-encoded recovery secret must be {} long", 2 * SK_SIZE); - let sk_hex = Password::new() - .with_prompt(prompt_msg) - .allow_empty_password(gen_new_recovery_secret) - .validate_with(|input: &String| -> Result<(), &str> { - let len = input.chars().count(); - if len == 0 || len == 2 * SK_SIZE { - Ok(()) - } else { - Err(&err_msg) - } - }) - .interact()?; - - println!(); - if sk_hex.is_empty() { - println!("Generating your recovery secret..."); - let sk = SecretKey::random(); - println!("\n*** Recovery secret generated ***\n{}", sk.to_hex()); - println!(); - println!( - "Please *MAKE SURE YOU DON'T LOOSE YOU RECOVERY SECRET*, and always sync up local changes \ - made to your Account Packet with the remote replica on the network to not loose them either.\n" - ); - - Ok(sk) - } else { - SecretKey::from_hex(&sk_hex) - } - }; - - match result { - Ok(sk) => Ok(MainSecretKey::new(sk)), - Err(err) => bail!("Failed to decode the recovery secret: {err:?}"), - } -} diff --git a/sn_cli/src/bin/subcommands/mod.rs b/sn_cli/src/bin/subcommands/mod.rs deleted file mode 100644 index 575e90b3d3..0000000000 --- a/sn_cli/src/bin/subcommands/mod.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) mod files; -pub(crate) mod folders; -pub(crate) mod register; -pub(crate) mod wallet; - -use clap::Parser; -use clap::Subcommand; -use color_eyre::Result; -use sn_logging::{LogFormat, LogOutputDest}; -use sn_peers_acquisition::PeersArgs; -use std::time::Duration; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser)] -#[command(disable_version_flag = true)] -pub(crate) struct Opt { - /// Specify the logging output destination. - /// - /// Valid values are "stdout", "data-dir", or a custom path. - /// - /// `data-dir` is the default value. - /// - /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs - #[clap(long, value_parser = LogOutputDest::parse_from_str, verbatim_doc_comment, default_value = "data-dir")] - pub log_output_dest: LogOutputDest, - - /// Specify the logging format. - /// - /// Valid values are "default" or "json". - /// - /// If the argument is not used, the default format will be applied. - #[clap(long, value_parser = LogFormat::parse_from_str, verbatim_doc_comment)] - pub log_format: Option, - - #[command(flatten)] - pub(crate) peers: PeersArgs, - - /// Available sub commands. - #[clap(subcommand)] - pub cmd: Option, - - /// The maximum duration to wait for a connection to the network before timing out. - #[clap(long = "timeout", global = true, value_parser = |t: &str| -> Result { Ok(t.parse().map(Duration::from_secs)?) })] - pub connection_timeout: Option, - - /// Prevent verification of data storage on the network. - /// - /// This may increase operation speed, but offers no guarantees that operations were successful. - #[clap(global = true, long = "no-verify", short = 'x')] - pub no_verify: bool, - - /// Print the crate version. - #[clap(long)] - pub crate_version: bool, - - /// Print the network protocol version. - #[clap(long)] - pub protocol_version: bool, - - /// Print the package version. - #[clap(long)] - #[cfg(not(feature = "nightly"))] - pub package_version: bool, - - /// Print version information. - #[clap(long)] - pub version: bool, -} - -#[derive(Subcommand, Debug)] -pub(super) enum SubCmd { - #[clap(name = "wallet", subcommand)] - /// Commands for a hot-wallet management. - /// A hot-wallet holds the secret key, thus it can be used for signing transfers/transactions. - Wallet(wallet::hot_wallet::WalletCmds), - #[clap(name = "wowallet", subcommand)] - /// Commands for watch-only wallet management - /// A watch-only wallet holds only the public key, thus it cannot be used for signing - /// transfers/transactions, but only to query balances and broadcast offline signed transactions. - WatchOnlyWallet(wallet::wo_wallet::WatchOnlyWalletCmds), - #[clap(name = "files", subcommand)] - /// Commands for file management - Files(files::FilesCmds), - #[clap(name = "folders", subcommand)] - /// Commands for folders management - Folders(folders::FoldersCmds), - #[clap(name = "register", subcommand)] - /// Commands for register management - Register(register::RegisterCmds), -} diff --git a/sn_cli/src/bin/subcommands/register.rs b/sn_cli/src/bin/subcommands/register.rs deleted file mode 100644 index 675e1ae6c5..0000000000 --- a/sn_cli/src/bin/subcommands/register.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::PublicKey; -use clap::Subcommand; -use color_eyre::{eyre::WrapErr, Result, Section}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::protocol::storage::RegisterAddress; -use sn_client::registers::Permissions; - -use sn_client::{Client, Error as ClientError, WalletClient}; -use std::path::Path; -use xor_name::XorName; - -#[derive(Subcommand, Debug)] -pub enum RegisterCmds { - /// Create a new register with a name. - Create { - /// The name of the register to create. This could be the app's name. - /// This is used along with your public key to derive the address of the register - #[clap(name = "name", short = 'n')] - name: String, - - /// Create the register with public write access. - /// By default only the owner can write to the register. - #[clap(name = "public", short = 'p')] - public: bool, - }, - Edit { - /// The address of the register to edit. - #[clap(name = "address")] - address: String, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register name instead of the address - #[clap(name = "name", short = 'n')] - use_name: bool, - /// The entry to add to the register. - #[clap(name = "entry")] - entry: String, - }, - Get { - /// The register addresses to get. - #[clap(name = "addresses")] - addresses: Vec, - /// If you are the owner, the name of the register can be used as a shorthand to the address, - /// as we can derive the address from the public key + name - /// Use this flag if you are providing the register names instead of the addresses - #[clap(name = "name", short = 'n')] - use_name: bool, - }, -} - -pub(crate) async fn register_cmds( - cmds: RegisterCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - RegisterCmds::Create { name, public } => { - create_register(name, public, client, root_dir, verify_store).await? - } - RegisterCmds::Edit { - address, - use_name, - entry, - } => edit_register(address, use_name, entry, client, verify_store).await?, - RegisterCmds::Get { - addresses, - use_name, - } => get_registers(addresses, use_name, client).await?, - } - Ok(()) -} - -async fn create_register( - name: String, - public: bool, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - trace!("Starting to pay for Register storage"); - - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None) - .wrap_err("Unable to read wallet file in {path:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - let meta = XorName::from_content(name.as_bytes()); - let perms = match public { - true => Permissions::new_anyone_can_write(), - false => Permissions::default(), - }; - let (register, storage_cost, royalties_fees) = client - .create_and_pay_for_register(meta, &mut wallet_client, verify_store, perms) - .await?; - - if storage_cost.is_zero() { - println!("Register '{name}' already exists!",); - } else { - println!( - "Successfully created register '{name}' for {storage_cost:?} (royalties fees: {royalties_fees:?})!", - ); - } - - println!("REGISTER_ADDRESS={}", register.address().to_hex()); - - Ok(()) -} - -async fn edit_register( - address_str: String, - use_name: bool, - entry: String, - client: &Client, - verify_store: bool, -) -> Result<()> { - let (address, printing_name) = parse_addr(&address_str, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register from {address}"); - - match client.get_register(address).await { - Ok(mut register) => { - println!("Successfully retrieved Register {printing_name}",); - println!("Editing Register {printing_name} with: {entry}"); - match register.write_online(entry.as_bytes(), verify_store).await { - Ok(()) => {} - Err(ref err @ ClientError::ContentBranchDetected(ref branches)) => { - println!( - "We need to merge {} branches in Register entries: {err}", - branches.len() - ); - register - .write_merging_branches_online(entry.as_bytes(), verify_store) - .await?; - } - Err(err) => return Err(err.into()), - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - - Ok(()) -} - -async fn get_registers(addresses: Vec, use_name: bool, client: &Client) -> Result<()> { - for addr in addresses { - let (address, printing_name) = parse_addr(&addr, use_name, client.signer_pk())?; - - println!("Trying to retrieve Register {printing_name}"); - - match client.get_register(address).await { - Ok(register) => { - println!("Successfully retrieved Register {printing_name}"); - let entries = register.read(); - println!("Register entries:"); - for (hash, bytes) in entries { - let data_str = match String::from_utf8(bytes.clone()) { - Ok(data_str) => data_str, - Err(_) => format!("{bytes:?}"), - }; - println!("{hash:?}: {data_str}"); - } - } - Err(error) => { - println!( - "Did not retrieve Register {printing_name} from all nodes in the close group! {error}" - ); - return Err(error.into()); - } - } - } - - Ok(()) -} - -/// Parse str and return the address and the register info for printing -fn parse_addr( - address_str: &str, - use_name: bool, - pk: PublicKey, -) -> Result<(RegisterAddress, String)> { - if use_name { - debug!("Parsing address as name"); - let user_metadata = XorName::from_content(address_str.as_bytes()); - let addr = RegisterAddress::new(user_metadata, pk); - Ok((addr, format!("'{address_str}' at {addr}"))) - } else { - debug!("Parsing address as hex"); - let addr = RegisterAddress::from_hex(address_str) - .wrap_err("Could not parse hex string") - .suggestion( - "If getting a register by name, use the `-n` flag eg:\n - safe register get -n ", - )?; - Ok((addr, format!("at {address_str}"))) - } -} diff --git a/sn_cli/src/bin/subcommands/wallet.rs b/sn_cli/src/bin/subcommands/wallet.rs deleted file mode 100644 index 0392c81874..0000000000 --- a/sn_cli/src/bin/subcommands/wallet.rs +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod audit; -pub(crate) mod helpers; -pub(crate) mod hot_wallet; -pub(crate) mod wo_wallet; - -use sn_client::transfers::{CashNote, HotWallet, MainPubkey, NanoTokens, WatchOnlyWallet}; -use sn_protocol::storage::SpendAddress; - -use crate::get_stdin_password_response; -use color_eyre::Result; -use std::{collections::BTreeSet, io::Read, path::Path}; - -// TODO: convert this into a Trait part of the wallet APIs. -pub(crate) enum WalletApiHelper { - WatchOnlyWallet(WatchOnlyWallet), - HotWallet(HotWallet), -} - -impl WalletApiHelper { - pub fn watch_only_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - Ok(Self::WatchOnlyWallet(wallet)) - } - - pub fn load_from(root_dir: &Path) -> Result { - let wallet = if HotWallet::is_encrypted(root_dir) { - println!("Wallet is encrypted. It needs a password to unlock."); - let password = get_stdin_password_response("Enter password: "); - let mut wallet = HotWallet::load_encrypted_from_path(root_dir, password.to_owned())?; - // Authenticate so that a user doesn't have to immediately provide the password again - wallet.authenticate_with_password(password)?; - wallet - } else { - HotWallet::load_from(root_dir)? - }; - - Ok(Self::HotWallet(wallet)) - } - - pub fn encrypt(root_dir: &Path, password: &str) -> Result<()> { - HotWallet::encrypt(root_dir, password)?; - Ok(()) - } - - pub fn balance(&self) -> NanoTokens { - match self { - Self::WatchOnlyWallet(w) => w.balance(), - Self::HotWallet(w) => w.balance(), - } - } - - pub fn status(&mut self) -> Result<()> { - self.authenticate()?; - - match self { - Self::WatchOnlyWallet(_) => Ok(()), - Self::HotWallet(w) => { - println!("Unconfirmed spends are:"); - for spend in w.unconfirmed_spend_requests().iter() { - let address = SpendAddress::from_unique_pubkey(&spend.spend.unique_pubkey); - println!( - "Unconfirmed spend {address:?} - {:?}, hex_str: {:?}", - spend.spend.unique_pubkey, - address.to_hex() - ); - println!( - "reason {:?}, amount {}, inputs: {}, outputs: {}", - spend.spend.reason, - spend.spend.amount(), - spend.spend.ancestors.len(), - spend.spend.descendants.len() - ); - println!("Inputs in hex str:"); - for input in spend.spend.ancestors.iter() { - let address = SpendAddress::from_unique_pubkey(input); - println!("Input spend {}", address.to_hex()); - } - println!("Outputs in hex str:"); - for (output, amount) in spend.spend.descendants.iter() { - let address = SpendAddress::from_unique_pubkey(output); - println!("Output {} with {amount}", address.to_hex()); - } - } - println!("Available cash notes are:"); - if let Ok(available_cnrs) = w.available_cash_notes() { - for cnr in available_cnrs.0.iter() { - println!("{cnr:?}"); - } - } - - Ok(()) - } - } - } - - pub fn read_cash_note_from_stdin(&mut self) -> Result<()> { - println!("Please paste your CashNote below:"); - let mut input = String::new(); - std::io::stdin().read_to_string(&mut input)?; - self.deposit_from_cash_note_hex(&input) - } - - pub fn deposit_from_cash_note_hex(&mut self, input: &str) -> Result<()> { - let cash_note = CashNote::from_hex(input.trim())?; - - let old_balance = self.balance(); - let cash_notes = vec![cash_note.clone()]; - - let spent_unique_pubkeys: BTreeSet<_> = cash_note - .parent_spends - .iter() - .map(|spend| spend.unique_pubkey()) - .collect(); - - match self { - Self::WatchOnlyWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - Self::HotWallet(w) => { - w.mark_notes_as_spent(spent_unique_pubkeys); - w.deposit_and_store_to_disk(&cash_notes)? - } - } - let new_balance = self.balance(); - println!("Successfully stored cash_note to wallet dir. \nOld balance: {old_balance}\nNew balance: {new_balance}"); - - Ok(()) - } - - pub fn deposit(&mut self, read_from_stdin: bool, cash_note: Option<&str>) -> Result<()> { - if read_from_stdin { - return self.read_cash_note_from_stdin(); - } - - if let Some(cash_note_hex) = cash_note { - return self.deposit_from_cash_note_hex(cash_note_hex); - } - - let previous_balance = self.balance(); - - self.try_load_cash_notes()?; - - let deposited = NanoTokens::from(self.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = self.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) - } - - fn deposit_and_store_to_disk(&mut self, cash_notes: &Vec) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - Self::HotWallet(w) => w.deposit_and_store_to_disk(cash_notes)?, - } - Ok(()) - } - - fn try_load_cash_notes(&mut self) -> Result<()> { - match self { - Self::WatchOnlyWallet(w) => w.try_load_cash_notes()?, - Self::HotWallet(w) => w.try_load_cash_notes()?, - } - Ok(()) - } - - /// Authenticate with password for encrypted wallet. - fn authenticate(&mut self) -> Result<()> { - match self { - WalletApiHelper::WatchOnlyWallet(_) => Ok(()), - WalletApiHelper::HotWallet(w) => { - if w.authenticate().is_err() { - let password = get_stdin_password_response("Wallet password: "); - w.authenticate_with_password(password)?; - Ok(()) - } else { - Ok(()) - } - } - } - } -} - -fn watch_only_wallet_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result { - let pk_hex = main_pk.to_hex(); - let folder_name = format!("pk_{}_{}", &pk_hex[..6], &pk_hex[pk_hex.len() - 6..]); - let wallet_dir = root_dir.join(folder_name); - println!( - "Loading watch-only local wallet from: {}", - wallet_dir.display() - ); - let wallet = WatchOnlyWallet::load_from(&wallet_dir, main_pk)?; - Ok(wallet) -} diff --git a/sn_cli/src/bin/subcommands/wallet/audit.rs b/sn_cli/src/bin/subcommands/wallet/audit.rs deleted file mode 100644 index c0e3833d50..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/audit.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::path::Path; -use std::str::FromStr; - -use bls::SecretKey; -use color_eyre::eyre::bail; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::{CashNoteRedemption, SpendAddress, Transfer, GENESIS_SPEND_UNIQUE_KEY}; -use sn_client::{Client, SpendDag}; - -const SPEND_DAG_FILENAME: &str = "spend_dag"; -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -async fn step_by_step_spend_dag_gathering(client: &Client, mut dag: SpendDag) -> Result { - let start_time = std::time::Instant::now(); - println!("Gathering the Spend DAG, note that this might take a very long time..."); - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - tokio::spawn(async move { - let mut spend_count = 0; - let mut exponential = 64; - while let Some(_spend) = rx.recv().await { - spend_count += 1; - if spend_count % exponential == 0 { - println!("Collected {spend_count} spends..."); - exponential *= 2; - } - } - }); - - client - .spend_dag_continue_from_utxos(&mut dag, Some(tx), false) - .await; - println!("Done gathering the Spend DAG in {:?}", start_time.elapsed()); - - // verify the DAG - if let Err(e) = dag.record_faults(&dag.source()) { - println!("DAG verification failed: {e}"); - } else { - let faults_len = dag.faults().len(); - println!("DAG verification successful, identified {faults_len} faults.",); - if faults_len > 0 { - println!("Logging identified faults: {:#?}", dag.faults()); - } - } - Ok(dag) -} - -/// Gather the Spend DAG from the Network and store it on disk -/// If a DAG is found on disk, it will continue from it -/// If fast_mode is true, gathers in a silent and fast way -/// else enjoy a step by step slow narrated gathering -async fn gather_spend_dag(client: &Client, root_dir: &Path, fast_mode: bool) -> Result { - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let inital_dag = match SpendDag::load_from_file(&dag_path) { - Ok(mut dag) => { - println!("Found a local spend dag on disk, continuing from it..."); - if fast_mode { - client - .spend_dag_continue_from_utxos(&mut dag, None, false) - .await; - } - dag - } - Err(err) => { - println!("Starting from Genesis as found no local spend dag on disk..."); - info!("Starting from Genesis as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - if fast_mode { - client - .spend_dag_build_from(genesis_addr, None, true) - .await? - } else { - client.new_dag_with_genesis_only().await? - } - } - }; - - let dag = match fast_mode { - true => inital_dag, - false => step_by_step_spend_dag_gathering(client, inital_dag).await?, - }; - - println!("Saving DAG to disk at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - Ok(dag) -} - -pub async fn audit( - client: &Client, - to_dot: bool, - royalties: bool, - root_dir: &Path, - foundation_sk: Option, -) -> Result<()> { - let fast_mode = to_dot || royalties || foundation_sk.is_some(); - let dag = gather_spend_dag(client, root_dir, fast_mode).await?; - - if to_dot { - println!("========================== spends DAG digraph =========================="); - println!("{}", dag.dump_dot_format()); - } - if let Some(sk) = foundation_sk { - println!( - "========================== payment forward statistics ==========================" - ); - println!("{}", dag.dump_payment_forward_statistics(&sk)); - } - if royalties { - let royalties = dag.all_royalties()?; - redeem_royalties(royalties, client, root_dir).await?; - } - - println!("Audit completed successfully."); - Ok(()) -} - -/// Redeem royalties from the Network and deposit them into the wallet -/// Only works if the wallet has the private key for the royalties -async fn redeem_royalties( - royalties: Vec, - client: &Client, - root_dir: &Path, -) -> Result<()> { - if royalties.is_empty() { - println!("No royalties found to redeem."); - return Ok(()); - } else { - println!("Found {} royalties.", royalties.len()); - } - - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - // batch royalties per 100 - let mut batch = Vec::new(); - for (i, royalty) in royalties.iter().enumerate() { - batch.push(royalty.clone()); - if i % 100 == 0 { - println!( - "Attempting to redeem {} royalties from the Network...", - batch.len() - ); - let transfer = Transfer::NetworkRoyalties(batch.clone()); - batch.clear(); - println!("Current balance: {}", wallet.balance()); - let cashnotes = client.receive(&transfer, &wallet).await?; - wallet.deposit_and_store_to_disk(&cashnotes)?; - println!("Successfully redeemed royalties from the Network."); - println!("Current balance: {}", wallet.balance()); - } - } - Ok(()) -} - -/// Verify a spend's existance on the Network. -/// If genesis is true, verify all the way to Genesis, note that this might take A VERY LONG TIME -pub async fn verify_spend_at( - spend_address: String, - genesis: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - // get spend - println!("Verifying spend's existance at: {spend_address}"); - let addr = SpendAddress::from_str(&spend_address)?; - let spend = match client.get_spend_from_network(addr).await { - Ok(s) => { - println!("Confirmed spend's existance on the Network at {addr:?}"); - s - } - Err(err) => { - bail!("Could not confirm spend's validity, be careful: {err}") - } - }; - - // stop here if we don't go all the way to Genesis - if !genesis { - return Ok(()); - } - println!("Verifying spend all the way to Genesis, note that this might take a while..."); - - // extend DAG until spend - let dag_path = root_dir.join(SPEND_DAG_FILENAME); - let mut dag = match SpendDag::load_from_file(&dag_path) { - Ok(d) => { - println!("Found a local spend dag on disk, continuing from it, this might make things faster..."); - d - } - Err(err) => { - info!("Starting verification from an empty DAG as failed to load spend dag from disk: {err}"); - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - SpendDag::new(genesis_addr) - } - }; - info!("Extending DAG with {spend_address} {addr:?}"); - client.spend_dag_extend_until(&mut dag, addr, spend).await?; - info!("Saving DAG locally at: {dag_path:?}"); - dag.dump_to_file(dag_path)?; - - // verify spend is not faulty - let faults = dag.get_spend_faults(&addr); - if faults.is_empty() { - println!( - "Successfully confirmed spend at {spend_address} is valid, and comes from Genesis!" - ); - } else { - println!("Spend at {spend_address} has {} faults", faults.len()); - println!("{faults:#?}"); - } - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/helpers.rs b/sn_cli/src/bin/subcommands/wallet/helpers.rs deleted file mode 100644 index e3ef2d6687..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/helpers.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::Result; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::transfers::Transfer; -use sn_client::Client; -use std::path::Path; -use url::Url; - -#[cfg(feature = "distribution")] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - address: Option, - signature: Option, -) -> Result<()> { - if address.is_some() ^ signature.is_some() { - println!("Address and signature must both be specified."); - return Ok(()); - } - if address.is_none() && signature.is_none() { - get_faucet_fixed_amount(root_dir, client, url).await?; - } else if let Some(addr) = address { - if let Some(sig) = signature { - get_faucet_distribution(root_dir, client, url, addr, sig).await?; - } - } - Ok(()) -} - -#[cfg(not(feature = "distribution"))] -pub async fn get_faucet( - root_dir: &Path, - client: &Client, - url: String, - _address: Option, - _signature: Option, -) -> Result<()> { - get_faucet_fixed_amount(root_dir, client, url).await -} - -pub async fn get_faucet_fixed_amount(root_dir: &Path, client: &Client, url: String) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let address_hex = wallet.address().to_hex(); - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - let req_url = Url::parse(&format!("{url}/{address_hex}"))?; - println!("Requesting token for wallet address: {address_hex}"); - - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let body = response.text().await?; - if is_ok { - receive(body, false, client, root_dir).await?; - println!("Successfully got tokens from faucet."); - } else { - println!("Failed to get tokens from faucet, server responded with: {body:?}"); - } - Ok(()) -} - -#[cfg(feature = "distribution")] -pub async fn get_faucet_distribution( - root_dir: &Path, - client: &Client, - url: String, - address: String, - signature: String, -) -> Result<()> { - // submit the details to the faucet to get the distribution - let url = if !url.contains("://") { - format!("{}://{}", "http", url) - } else { - url - }; - // receive to the current local wallet - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)? - .address() - .to_hex(); - println!("Requesting distribution for maid address {address} to local wallet {wallet}"); - // base64 uses + and / as the delimiters which doesn't go well in the query - // string, so the signature is encoded using url safe characters. - let sig_bytes = base64::engine::general_purpose::STANDARD.decode(signature)?; - let sig_url = base64::engine::general_purpose::URL_SAFE.encode(sig_bytes); - let req_url = Url::parse(&format!( - "{url}/distribution?address={address}&wallet={wallet}&signature={sig_url}" - ))?; - let response = reqwest::get(req_url).await?; - let is_ok = response.status().is_success(); - let transfer_hex = response.text().await?; - if !is_ok { - println!( - "Failed to get distribution from faucet, server responded with:\n{transfer_hex:?}" - ); - return Ok(()); - } - println!("Receiving transfer for maid address {address}:\n{transfer_hex}"); - receive(transfer_hex, false, client, root_dir).await?; - Ok(()) -} - -pub async fn receive( - transfer: String, - is_file: bool, - client: &Client, - root_dir: &Path, -) -> Result<()> { - let transfer = if is_file { - std::fs::read_to_string(transfer)?.trim().to_string() - } else { - transfer - }; - - let transfer = match Transfer::from_hex(&transfer) { - Ok(transfer) => transfer, - Err(err) => { - println!("Failed to parse transfer: {err:?}"); - println!("Transfer: \"{transfer}\""); - return Err(err.into()); - } - }; - println!("Successfully parsed transfer. "); - - println!("Verifying transfer with the Network..."); - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - let cashnotes = match client.receive(&transfer, &wallet).await { - Ok(cashnotes) => cashnotes, - Err(err) => { - println!("Failed to verify and redeem transfer: {err:?}"); - return Err(err.into()); - } - }; - println!("Successfully verified transfer."); - - let old_balance = wallet.balance(); - wallet.deposit_and_store_to_disk(&cashnotes)?; - let new_balance = wallet.balance(); - - println!("Successfully stored cash_note to wallet dir."); - println!("Old balance: {old_balance}"); - println!("New balance: {new_balance}"); - - Ok(()) -} diff --git a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs b/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs deleted file mode 100644 index 6b209a9625..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - audit::{audit, verify_spend_at}, - helpers::{get_faucet, receive}, - WalletApiHelper, -}; -use crate::{get_stdin_password_response, get_stdin_response}; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{eyre::eyre, Result}; -use dialoguer::Confirm; -use sn_cli::utils::is_valid_key_hex; -use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic}; -use sn_client::transfers::{ - HotWallet, MainPubkey, MainSecretKey, NanoTokens, Transfer, TransferError, UnsignedTransaction, - WalletError, -}; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error as ClientError, -}; -use std::{path::Path, str::FromStr}; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WalletCmds { - /// Print the wallet address. - Address, - /// Print the wallet balance. - Balance { - /// Instead of checking CLI local wallet balance, the PeerId of a node can be used - /// to check the balance of its rewards local wallet. Multiple ids can be provided - /// in order to read the balance of multiple nodes at once. - #[clap(long)] - peer_id: Vec, - }, - /// Create a hot wallet. - Create { - /// Optional flag to not replace existing wallet. - #[clap(long, action)] - no_replace: bool, - /// Optional flag to not add a password. - #[clap(long, action)] - no_password: bool, - /// Optional hex-encoded main secret key. - #[clap(long, short, name = "key")] - key: Option, - /// Optional derivation passphrase to protect the mnemonic, - /// it's not the source of the entropy for the mnemonic generation. - /// The mnemonic+passphrase will be the seed. See detail at - /// `` - #[clap(long, short, name = "derivation")] - derivation_passphrase: Option, - /// Optional password to encrypt the wallet with. - #[clap(long, short)] - password: Option, - }, - /// Get tokens from a faucet. - GetFaucet { - /// The http url of the faucet to get tokens from. - #[clap(name = "url")] - url: String, - /// The maidsafecoin address to claim. Leave blank to receive a fixed - /// amount of tokens. - maid_address: Option, - /// A signature of the safe wallet address, made by the maidsafecoin - /// address. - signature: Option, - }, - /// Send a transfer. - /// - /// This command will create a new transfer and encrypt it for the recipient. - /// This encrypted transfer can then be shared with the recipient, who can then - /// use the 'receive' command to claim the funds. - Send { - /// The number of SafeNetworkTokens to send. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// Signs a transaction to be then broadcasted to the network. - Sign { - /// Hex-encoded unsigned transaction. It requires a hot-wallet was created for CLI. - #[clap(name = "tx")] - tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Receive a transfer created by the 'send' or 'broadcast' command. - Receive { - /// Read the encrypted transfer from a file. - #[clap(long, default_value = "false")] - file: bool, - /// Encrypted transfer. - #[clap(name = "transfer")] - transfer: String, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, - /// Audit the Currency - /// Note that this might take a very long time - /// Analogous to verifying the entire blockchain in Bitcoin - /// - /// When run without any flags, runs in verbose mode, - /// a slower but more informative mode where DAG collection progress is diplayed - Audit { - /// EXPERIMENTAL Dump Audit DAG in dot format on stdout - #[clap(long, default_value = "false")] - dot: bool, - /// EXPERIMENTAL redeem all royalties - #[clap(long, default_value = "false")] - royalties: bool, - /// Hex string of the Foundation SK. - /// Providing this key allow displaying rewards statistics gathered from the DAG. - #[clap(long, name = "sk_str")] - sk_str: Option, - }, - Status, - /// Encrypt wallet with a password. - Encrypt, -} - -pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Path) -> Result<()> { - match cmds { - WalletCmds::Address => { - let wallet = WalletApiHelper::load_from(root_dir)?; - match wallet { - WalletApiHelper::WatchOnlyWallet(w) => println!("{:?}", w.address()), - WalletApiHelper::HotWallet(w) => println!("{:?}", w.address()), - } - Ok(()) - } - WalletCmds::Balance { peer_id } => { - if peer_id.is_empty() { - let wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - } else { - let default_node_dir_path = dirs_next::data_dir() - .ok_or_else(|| eyre!("Failed to obtain data directory path"))? - .join("safe") - .join("node"); - - for id in peer_id { - let path = default_node_dir_path.join(id); - let rewards = WalletApiHelper::load_from(&path)?.balance(); - println!("Node's rewards wallet balance (PeerId: {id}): {rewards}"); - } - } - Ok(()) - } - WalletCmds::Create { - no_replace, - no_password, - key, - derivation_passphrase, - password, - } => { - let mut wallet_already_exists = false; - if key.is_some() && derivation_passphrase.is_some() { - return Err(eyre!( - "Only one of `--key` or `--derivation` may be specified" - )); - } - if *no_password && password.is_some() { - return Err(eyre!( - "Only one of `--no-password` or `--password` may be specified" - )); - } - if let Some(key) = key { - // Check if key is valid - // Doing this early to avoid stashing an existing wallet while the provided key is invalid - if !is_valid_key_hex(key) { - return Err(eyre!("Please provide a valid secret key in hex format. It must be 64 characters long.")); - } - } - // Check for existing wallet - if HotWallet::is_encrypted(root_dir) { - wallet_already_exists = true; - println!("Existing encrypted wallet found."); - } else if let Ok(existing_wallet) = WalletApiHelper::load_from(root_dir) { - wallet_already_exists = true; - let balance = existing_wallet.balance(); - println!("Existing wallet found with balance of {balance}"); - } - // If a wallet already exists, ask the user if they want to replace it - if wallet_already_exists { - let response = if *no_replace { - "n".to_string() - } else { - get_stdin_response("Replace existing wallet with new wallet? [y/N]") - }; - if response != "y" { - // Do nothing, return ok and prevent any further operations - println!("Exiting without creating new wallet"); - return Ok(()); - } - // remove existing wallet - let new_location = HotWallet::stash(root_dir)?; - println!("Old wallet stored at {}", new_location.display()); - } - let main_sk = if let Some(key) = key { - let sk = SecretKey::from_hex(key) - .map_err(|err| eyre!("Failed to parse hex-encoded SK: {err:?}"))?; - MainSecretKey::new(sk) - } else { - // If no key is specified, use the mnemonic - let mnemonic = load_or_create_mnemonic(root_dir)?; - secret_key_from_mnemonic(mnemonic, derivation_passphrase.to_owned())? - }; - // Ask user if they want to encrypt the wallet with a password - let password = if *no_password { - None - } else if let Some(password) = password { - Some(password.to_owned()) - } else { - request_password(false) - }; - // Create the new wallet with the new key - let main_pubkey = main_sk.main_pubkey(); - let local_wallet = HotWallet::create_from_key(root_dir, main_sk, password)?; - let balance = local_wallet.balance(); - println!( - "Hot Wallet created (balance {balance}) for main public key: {main_pubkey:?}." - ); - Ok(()) - } - WalletCmds::Sign { tx, force } => sign_transaction(tx, root_dir, *force), - WalletCmds::Status => { - let mut wallet = WalletApiHelper::load_from(root_dir)?; - println!("{}", wallet.balance()); - wallet.status()?; - Ok(()) - } - WalletCmds::Encrypt => { - println!("Encrypt your wallet with a password. WARNING: If you forget your password, you will lose access to your wallet!"); - // Ask user for a new password to encrypt the wallet with - if let Some(password) = request_password(true) { - WalletApiHelper::encrypt(root_dir, &password)?; - } - println!("Wallet successfully encrypted."); - Ok(()) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wallet_cmds( - cmds: WalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WalletCmds::Send { amount, to } => send(amount, to, client, root_dir, verify_store).await, - WalletCmds::Receive { file, transfer } => receive(transfer, file, client, root_dir).await, - WalletCmds::GetFaucet { - url, - maid_address, - signature, - } => get_faucet(root_dir, client, url.clone(), maid_address, signature).await, - WalletCmds::Audit { - dot, - royalties, - sk_str, - } => { - let sk_key = if let Some(s) = sk_str { - match SecretKey::from_hex(&s) { - Ok(sk_key) => Some(sk_key), - Err(err) => { - return Err(eyre!( - "Cann't parse Foundation SK from input string: {s} {err:?}" - )) - } - } - } else { - None - }; - audit(client, dot, royalties, root_dir, sk_key).await - } - WalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -async fn send( - amount: String, - to: String, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - let from = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let amount = match NanoTokens::from_str(&amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let cash_note = match sn_client::send(from, amount, to, client, verify_store).await { - Ok(cash_note) => { - let wallet = HotWallet::load_from(root_dir)?; - println!("Sent {amount:?} to {to:?}"); - println!("New wallet balance is {}.", wallet.balance()); - cash_note - } - Err(err) => { - match err { - ClientError::AmountIsZero => { - println!("Zero amount passed in. Nothing sent."); - } - ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - ))) => { - println!("Could not send due to low balance.\nBalance: {available:?}\nRequired: {required:?}"); - } - _ => { - println!("Failed to send {amount:?} to {to:?} due to {err:?}."); - } - } - return Err(err.into()); - } - }; - - let transfer = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("The encrypted transfer has been successfully created."); - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the 'receive' command to claim the funds."); - - Ok(()) -} - -fn sign_transaction(tx: &str, root_dir: &Path, force: bool) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let unsigned_tx = UnsignedTransaction::from_hex(tx)?; - - println!("The unsigned transaction has been successfully decoded:"); - for (i, (unique_pk, amount)) in unsigned_tx.spent_unique_keys().iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", unique_pk.to_hex()); - println!("\tAmount: {amount}"); - - for (descendant, amount) in unsigned_tx.output_unique_keys().iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!("\n** Please make sure the above information is correct before signing it. **\n"); - let confirmation = Confirm::new() - .with_prompt("Do you want to sign the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction not signed."); - return Ok(()); - } - } - - println!("Signing the transaction with local hot-wallet..."); - let signed_tx = wallet.sign(unsigned_tx)?; - - println!( - "The transaction has been successfully signed:\n\n{}\n", - signed_tx.to_hex()? - ); - println!( - "Please copy the above text, and broadcast it to the network with 'wallet broadcast' cmd." - ); - - Ok(()) -} - -fn request_password(required: bool) -> Option { - 'outer: loop { - let prompt = if required { - "Enter password: " - } else { - "Enter password (leave empty for none): " - }; - - let password_response = get_stdin_password_response(prompt); - - if required && password_response.is_empty() { - println!("Password is required."); - continue 'outer; - } - - // If a password is set, request user to repeat it - if !password_response.is_empty() { - const MAX_RETRIES: u8 = 2; - let mut retries = 0u8; - - loop { - let repeat_password = get_stdin_password_response("Repeat password: "); - - if repeat_password == password_response { - break; - } else if retries >= MAX_RETRIES { - // User forgot the password, let them reset it again - println!("You might have forgotten the password. Please set a new one."); - continue 'outer; - } else { - println!("Passwords do not match."); - retries += 1; - } - } - - break Some(password_response); - } - - break None; - } -} diff --git a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs b/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs deleted file mode 100644 index c4513754ba..0000000000 --- a/sn_cli/src/bin/subcommands/wallet/wo_wallet.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{audit::verify_spend_at, watch_only_wallet_from_pk, WalletApiHelper}; - -use bls::PublicKey; -use clap::Parser; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use dialoguer::Confirm; -use sn_client::transfers::{MainPubkey, NanoTokens, SignedTransaction, Transfer, WatchOnlyWallet}; -use sn_client::Client; -use std::{path::Path, str::FromStr}; -use walkdir::WalkDir; - -// Please do not remove the blank lines in these doc comments. -// They are used for inserting line breaks when the help menu is rendered in the UI. -#[derive(Parser, Debug)] -pub enum WatchOnlyWalletCmds { - /// Print the watch-only wallets addresses. - Addresses, - /// Print the wallet balance. - Balance { - /// The hex-encoded public key of an existing watch-only wallet. - #[clap(name = "public key")] - pk: Option, - }, - /// Deposit CashNotes from the received directory to the chosen watch-only wallet. - /// Or Read a hex encoded CashNote from stdin. - /// - /// The default received directory is platform specific: - /// - Linux: $HOME/.local/share/safe/client/\/cash_notes - /// - macOS: $HOME/Library/Application Support/safe/client/\/cash_notes - /// - Windows: C:\Users\{username}\AppData\Roaming\safe\client\\\cash_notes - /// - /// If you find the default path unwieldy, you can also set the RECEIVED_CASHNOTES_PATH environment - /// variable to a path you would prefer to work with. - #[clap(verbatim_doc_comment)] - Deposit { - /// Read a hex encoded CashNote from stdin. - #[clap(long, default_value = "false")] - stdin: bool, - /// The hex encoded CashNote. - #[clap(long)] - cash_note: Option, - /// The hex-encoded public key of an existing watch-only wallet to deposit into it. - #[clap(name = "public key")] - pk: String, - }, - /// Create a watch-only wallet from the given (hex-encoded) key. - Create { - /// Hex-encoded main public key. - #[clap(name = "public key")] - pk: String, - }, - /// Builds an unsigned transaction to be signed offline. It requires an existing watch-only wallet. - Transaction { - /// Hex-encoded public key of the source watch-only wallet. - #[clap(name = "from")] - from: String, - /// The number of SafeNetworkTokens to transfer. - #[clap(name = "amount")] - amount: String, - /// Hex-encoded public address of the recipient. - #[clap(name = "to")] - to: String, - }, - /// This command turns an offline signed transaction into a valid sendable Transfer - /// The signed transaction's SignedSpends are broadcasted to the Network and the recipient's Transfer is returned - /// This Transfer can then be sent and redeemed by the recipient using the 'receive' command - Broadcast { - /// Hex-encoded signed transaction. - #[clap(name = "signed Tx")] - signed_tx: String, - /// Avoid prompts by assuming `yes` as the answer. - #[clap(long, name = "force", default_value = "false")] - force: bool, - }, - /// Verify a spend on the Network. - Verify { - /// The Network address or hex encoded UniquePubkey of the Spend to verify - #[clap(name = "spend")] - spend_address: String, - /// Verify all the way to Genesis - /// - /// Used for auditing, note that this might take a very long time - /// Analogous to verifying an UTXO through the entire blockchain in Bitcoin - #[clap(long, default_value = "false")] - genesis: bool, - }, -} - -pub(crate) async fn wo_wallet_cmds_without_client( - cmds: &WatchOnlyWalletCmds, - root_dir: &Path, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Addresses => { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Addresses of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - for (wo_wallet, _) in wallets { - println!("- {:?}", wo_wallet.address()); - } - Ok(()) - } - WatchOnlyWalletCmds::Balance { pk } => { - if let Some(pk) = pk { - let main_pk = MainPubkey::from_hex(pk)?; - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - println!("{}", watch_only_wallet.balance()); - } else { - let wallets = get_watch_only_wallets(root_dir)?; - println!( - "Balances of {} watch-only wallets found at {}:", - wallets.len(), - root_dir.display() - ); - let mut total = NanoTokens::zero(); - for (wo_wallet, folder_name) in wallets { - let balance = wo_wallet.balance(); - println!("{folder_name}: {balance}"); - total = total - .checked_add(balance) - .ok_or(eyre!("Failed to add to total balance"))?; - } - println!("Total: {total}"); - } - Ok(()) - } - WatchOnlyWalletCmds::Deposit { - stdin, - cash_note, - pk, - } => { - let main_pk = MainPubkey::from_hex(pk)?; - let mut wallet = WalletApiHelper::watch_only_from_pk(main_pk, root_dir)?; - wallet.deposit(*stdin, cash_note.as_deref()) - } - WatchOnlyWalletCmds::Create { pk } => { - let pk = PublicKey::from_hex(pk) - .map_err(|err| eyre!("Failed to parse hex-encoded PK: {err:?}"))?; - let main_pk = MainPubkey::new(pk); - let main_pubkey = main_pk.public_key(); - let watch_only_wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let balance = watch_only_wallet.balance(); - println!("Watch-only wallet created (balance {balance}) for main public key: {main_pubkey:?}."); - Ok(()) - } - WatchOnlyWalletCmds::Transaction { from, amount, to } => { - build_unsigned_transaction(from, amount, to, root_dir) - } - cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")), - } -} - -pub(crate) async fn wo_wallet_cmds( - cmds: WatchOnlyWalletCmds, - client: &Client, - root_dir: &Path, - verify_store: bool, -) -> Result<()> { - match cmds { - WatchOnlyWalletCmds::Broadcast { signed_tx, force } => { - broadcast_signed_tx(signed_tx, client, verify_store, force).await - } - WatchOnlyWalletCmds::Verify { - spend_address, - genesis, - } => verify_spend_at(spend_address, genesis, client, root_dir).await, - cmd => Err(eyre!( - "{cmd:?} has to be processed before connecting to the network" - )), - } -} - -fn get_watch_only_wallets(root_dir: &Path) -> Result> { - let mut wallets = vec![]; - for entry in WalkDir::new(root_dir.display().to_string()) - .into_iter() - .flatten() - { - if let Some(file_name) = entry.path().file_name().and_then(|name| name.to_str()) { - if file_name.starts_with("pk_") { - let wallet_dir = root_dir.join(file_name); - if let Ok(wo_wallet) = WatchOnlyWallet::load_from_path(&wallet_dir) { - wallets.push((wo_wallet, file_name.to_string())); - } - } - } - } - if wallets.is_empty() { - bail!("No watch-only wallets found at {}", root_dir.display()); - } - - Ok(wallets) -} - -fn build_unsigned_transaction(from: &str, amount: &str, to: &str, root_dir: &Path) -> Result<()> { - let main_pk = MainPubkey::from_hex(from)?; - let mut wallet = watch_only_wallet_from_pk(main_pk, root_dir)?; - let amount = match NanoTokens::from_str(amount) { - Ok(amount) => amount, - Err(err) => { - println!("The amount cannot be parsed. Nothing sent."); - return Err(err.into()); - } - }; - let to = match MainPubkey::from_hex(to) { - Ok(to) => to, - Err(err) => { - println!("Error while parsing the recipient's 'to' key: {err:?}"); - return Err(err.into()); - } - }; - - let unsigned_transfer = wallet.build_unsigned_transaction(vec![(amount, to)], None)?; - - println!( - "The unsigned transaction has been successfully created:\n\n{}\n", - hex::encode(rmp_serde::to_vec(&unsigned_transfer)?) - ); - println!("Please copy the above text, sign it offline with 'wallet sign' cmd, and then use the signed transaction to broadcast it with 'wallet broadcast' cmd."); - - Ok(()) -} - -async fn broadcast_signed_tx( - signed_tx: String, - client: &Client, - verify_store: bool, - force: bool, -) -> Result<()> { - let signed_tx = match SignedTransaction::from_hex(&signed_tx) { - Ok(signed_tx) => signed_tx, - Err(err) => { - bail!("Failed to decode the signed transaction: {err:?}"); - } - }; - println!("The signed transaction has been successfully decoded:"); - - for (i, signed_spend) in signed_tx.spends.iter().enumerate() { - println!("\nSpending input #{i}:"); - println!("\tKey: {}", signed_spend.unique_pubkey().to_hex()); - println!("\tAmount: {}", signed_spend.amount()); - - if let Err(err) = signed_spend.verify() { - bail!("Transaction is invalid: {err:?}"); - } - - for (descendant, amount) in signed_spend.spend.descendants.iter() { - println!("\tOutput Key: {}", descendant.to_hex()); - println!("\tAmount: {amount}"); - } - } - - if !force { - println!( - "\n** Please make sure the above information is correct before broadcasting it. **\n" - ); - let confirmation = Confirm::new() - .with_prompt("Do you want to broadcast the above transaction?") - .interact()?; - - if !confirmation { - println!("Transaction was not broadcasted."); - return Ok(()); - } - } - - println!("Broadcasting the transaction to the network..."); - // return the first CashNote (assuming there is only one because we only sent to one recipient) - let cash_note = match &signed_tx.output_cashnotes[..] { - [cashnote] => cashnote, - [_multiple, ..] => bail!("Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG."), - [] =>bail!("No CashNotes were built from the Tx.") - }; - - // send to network - client - .send_spends(signed_tx.spends.iter(), verify_store) - .await - .map_err(|err| { - eyre!("The transfer was not successfully registered in the network: {err:?}") - })?; - - println!("Transaction broadcasted!."); - - let transfer = Transfer::transfer_from_cash_note(cash_note)?.to_hex()?; - println!("Please share this to the recipient:\n\n{transfer}\n"); - println!("The recipient can then use the wallet 'receive' command to claim the funds.\n"); - - if let Some(change_cn) = signed_tx.change_cashnote { - let change_transfer = Transfer::transfer_from_cash_note(&change_cn)?.to_hex()?; - println!("Please redeem the change from this Transaction:\n\n{change_transfer}\n"); - println!("You should use the wallet 'deposit' command to be able to use these funds.\n"); - } - - Ok(()) -} diff --git a/sn_cli/src/files.rs b/sn_cli/src/files.rs deleted file mode 100644 index 66341f4865..0000000000 --- a/sn_cli/src/files.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod chunk_manager; -mod download; -mod estimate; -mod files_uploader; -mod upload; - -pub use chunk_manager::ChunkManager; -pub use download::{download_file, download_files}; -pub use estimate::Estimator; -pub use files_uploader::{FilesUploadStatusNotifier, FilesUploadSummary, FilesUploader}; -pub use upload::{UploadedFile, UPLOADED_FILES}; - -use color_eyre::Result; -use indicatif::{ProgressBar, ProgressStyle}; -use std::time::Duration; - -pub fn get_progress_bar(length: u64) -> Result { - let progress_bar = ProgressBar::new(length); - progress_bar.set_style( - ProgressStyle::default_bar() - .template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")? - .progress_chars("#>-"), - ); - progress_bar.enable_steady_tick(Duration::from_millis(100)); - Ok(progress_bar) -} diff --git a/sn_cli/src/files/chunk_manager.rs b/sn_cli/src/files/chunk_manager.rs deleted file mode 100644 index 577ff0e111..0000000000 --- a/sn_cli/src/files/chunk_manager.rs +++ /dev/null @@ -1,1045 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use super::upload::UploadedFile; -use bytes::Bytes; -use color_eyre::{ - eyre::{bail, eyre}, - Result, -}; -use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress}, - FilesApi, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - fs::{self, File}, - io::Write, - path::{Path, PathBuf}, - time::Instant, -}; -use tracing::{debug, error, info, trace}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -const CHUNK_ARTIFACTS_DIR: &str = "chunk_artifacts"; -const METADATA_FILE: &str = "metadata"; - -// The unique hex encoded hash(path) -// This allows us to uniquely identify if a file has been chunked or not. -// An alternative to use instead of filename as it might not be unique -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -struct PathXorName(String); - -impl PathXorName { - fn new(path: &Path) -> PathXorName { - // we just need an unique value per path, thus we don't have to mind between the - // [u8]/[u16] differences - let path_as_lossy_str = path.as_os_str().to_string_lossy(); - let path_xor = XorName::from_content(path_as_lossy_str.as_bytes()); - PathXorName(hex::encode(path_xor)) - } -} - -/// Info about a file that has been chunked -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkedFile { - pub file_path: PathBuf, - pub file_name: OsString, - pub head_chunk_address: ChunkAddress, - pub chunks: BTreeSet<(XorName, PathBuf)>, - pub data_map: Chunk, -} - -/// Manages the chunking process by resuming pre-chunked files and chunking any -/// file that has not been chunked yet. -#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] -pub struct ChunkManager { - /// Whole client root dir - root_dir: PathBuf, - /// Dir for chunk artifacts - artifacts_dir: PathBuf, - files_to_chunk: Vec<(OsString, PathXorName, PathBuf)>, - chunks: BTreeMap, - completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - resumed_chunk_count: usize, - resumed_files_count: usize, -} - -impl ChunkManager { - // Provide the root_dir. The function creates a sub-directory to store the SE chunks - pub fn new(root_dir: &Path) -> Self { - let artifacts_dir = root_dir.join(CHUNK_ARTIFACTS_DIR); - Self { - root_dir: root_dir.to_path_buf(), - artifacts_dir, - files_to_chunk: Default::default(), - chunks: Default::default(), - completed_files: Default::default(), - resumed_files_count: 0, - resumed_chunk_count: 0, - } - } - - /// Chunk all the files in the provided `files_path` - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - /// - /// # Arguments - /// * files_path - &[Path] - /// * read_cache - Boolean. Set to true to resume the chunks from the artifacts dir. - /// * include_data_maps - Boolean. If set to true, will append all the ChunkedFile.data_map chunks - pub fn chunk_path( - &mut self, - files_path: &Path, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - self.chunk_with_iter( - WalkDir::new(files_path).into_iter().flatten(), - read_cache, - include_data_maps, - ) - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// verified - pub(crate) fn already_put_chunks( - &mut self, - entries_iter: impl Iterator, - make_files_public: bool, - ) -> Result> { - self.chunk_with_iter(entries_iter, false, make_files_public)?; - Ok(self.get_chunks()) - } - - /// Chunk all the files in the provided iterator - /// These are stored to the CHUNK_ARTIFACTS_DIR - /// if read_cache is true, will take cache from previous runs into account - pub fn chunk_with_iter( - &mut self, - entries_iter: impl Iterator, - read_cache: bool, - include_data_maps: bool, - ) -> Result<()> { - let now = Instant::now(); - // clean up - self.files_to_chunk = Default::default(); - self.chunks = Default::default(); - self.completed_files = Default::default(); - self.resumed_chunk_count = 0; - self.resumed_files_count = 0; - - // collect the files to chunk - entries_iter.for_each(|entry| { - if entry.file_type().is_file() { - let path_xor = PathXorName::new(entry.path()); - info!( - "Added file {:?} with path_xor: {path_xor:?} to be chunked/resumed", - entry.path() - ); - self.files_to_chunk.push(( - entry.file_name().to_owned(), - path_xor, - entry.into_path(), - )); - } - }); - let total_files = self.files_to_chunk.len(); - - if total_files == 0 { - return Ok(()); - }; - - // resume the chunks from the artifacts dir - if read_cache { - self.resume_path(); - } - - // note the number of chunks that we've resumed - self.resumed_chunk_count = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - // note the number of files that we've resumed - self.resumed_files_count = self.chunks.keys().collect::>().len(); - - // Filter out files_to_chunk; Any PathXorName in chunks_to_upload is considered to be resumed. - { - let path_xors = self.chunks.keys().collect::>(); - self.files_to_chunk - .retain(|(_, path_xor, _)| !path_xors.contains(path_xor)); - } - - // Get the list of completed files - { - let completed_files = self.chunks.iter().filter_map(|(_, chunked_file)| { - if chunked_file.chunks.is_empty() { - Some(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )) - } else { - None - } - }); - - self.completed_files.extend(completed_files); - } - - // Return early if no more files to chunk - if self.files_to_chunk.is_empty() { - debug!( - "All files_to_chunk ({total_files:?}) were resumed. Returning the resumed chunks.", - ); - debug!("It took {:?} to resume all the files", now.elapsed()); - return Ok(()); - } - - let progress_bar = get_progress_bar(total_files as u64)?; - progress_bar.println(format!("Chunking {total_files} files...")); - - let artifacts_dir = &self.artifacts_dir.clone(); - let chunked_files = self.files_to_chunk - .par_iter() - .map(|(original_file_name, path_xor, path)| { - let file_chunks_dir = { - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - fs::create_dir_all(&file_chunks_dir).map_err(|err| { - error!("Failed to create folder {file_chunks_dir:?} for SE chunks with error {err:?}!"); - eyre!("Failed to create dir {file_chunks_dir:?} for SE chunks with error {err:?}") - })?; - file_chunks_dir - }; - - match FilesApi::chunk_file(path, &file_chunks_dir, include_data_maps) { - Ok((head_chunk_address, data_map, size, chunks)) => { - progress_bar.clone().inc(1); - debug!("Chunked {original_file_name:?} with {path_xor:?} into file's XorName: {head_chunk_address:?} of size {size}, and chunks len: {}", chunks.len()); - - let chunked_file = ChunkedFile { - head_chunk_address, - file_path: path.to_owned(), - file_name: original_file_name.clone(), - chunks: chunks.into_iter().collect(), - data_map - }; - Ok((path_xor.clone(), chunked_file)) - } - Err(err) => { - println!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - error!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}"); - Err(eyre!("Failed to chunk file {path:?}/{path_xor:?} with err: {err:?}")) - } - } - }) - .collect::>>()?; - debug!( - "Out of total files_to_chunk {total_files}, we have resumed {} files and chunked {} files", - self.resumed_files_count, - chunked_files.len() - ); - - // Self::resume_path would create an empty self.chunks entry if a file that was fully - // completed was resumed. Thus if it is empty, the user did not provide any valid file - // path. - if chunked_files.is_empty() && self.chunks.is_empty() { - bail!( - "The provided path does not contain any file. Please check your path!\nExiting..." - ); - } - - // write metadata and data_map - chunked_files - .par_iter() - .map(|(path_xor, chunked_file)| { - let metadata_path = artifacts_dir.join(&path_xor.0).join(METADATA_FILE); - - info!("Metadata path is: {metadata_path:?}"); - let metadata = rmp_serde::to_vec(&( - chunked_file.head_chunk_address, - chunked_file.data_map.clone(), - )) - .map_err(|_| { - error!("Failed to serialize file_xor_addr for writing metadata"); - eyre!("Failed to serialize file_xor_addr for writing metadata") - })?; - - let mut metadata_file = File::create(&metadata_path).map_err(|_| { - error!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to create metadata_path {metadata_path:?} for {path_xor:?}") - })?; - - metadata_file.write_all(&metadata).map_err(|_| { - error!("Failed to write metadata to {metadata_path:?} for {path_xor:?}"); - eyre!("Failed to write metadata to {metadata_path:?} for {path_xor:?}") - })?; - - debug!("Wrote metadata for {path_xor:?}"); - Ok(()) - }) - .collect::>()?; - - progress_bar.finish_and_clear(); - debug!("It took {:?} to chunk {} files", now.elapsed(), total_files); - self.chunks.extend(chunked_files); - - Ok(()) - } - - // Try to resume the chunks - fn resume_path(&mut self) { - let artifacts_dir = self.artifacts_dir.clone(); - let resumed = self - .files_to_chunk - .par_iter() - .filter_map(|(original_file_name, path_xor, original_file_path)| { - // if this folder exists, and if we find chunks under this, we upload them. - let file_chunks_dir = artifacts_dir.join(&path_xor.0); - if !file_chunks_dir.exists() { - return None; - } - Self::read_file_chunks_dir( - file_chunks_dir, - path_xor, - original_file_path.clone(), - original_file_name.clone(), - ) - }) - .collect::>(); - - self.chunks.extend(resumed); - } - - /// Get all the chunk name and their path. - /// If include_data_maps is true, append all the ChunkedFile.data_map chunks to the vec - pub fn get_chunks(&self) -> Vec<(XorName, PathBuf)> { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .cloned() - .collect::>() - } - - pub fn is_chunks_empty(&self) -> bool { - self.chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .next() - .is_none() - } - - /// Mark all the chunks as completed. This removes the chunks from the CHUNK_ARTIFACTS_DIR. - /// But keeps the folder and metadata file that denotes that the file has been already completed. - pub fn mark_completed_all(&mut self) -> Result<()> { - let all_chunks = self - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .map(|(chunk, _)| *chunk) - .collect::>(); - self.mark_completed(all_chunks.into_iter()) - } - - /// Mark a set of chunks as completed and remove them from CHUNK_ARTIFACTS_DIR - /// If the entire file is completed, keep the folder and metadata file - pub fn mark_completed(&mut self, chunks: impl Iterator) -> Result<()> { - let set_of_completed_chunks = chunks.collect::>(); - trace!("marking as completed: {set_of_completed_chunks:?}"); - - // remove those files - self.chunks - .par_iter() - .flat_map(|(_, chunked_file)| &chunked_file.chunks) - .map(|(chunk_xor, chunk_path)| { - if set_of_completed_chunks.contains(chunk_xor) { - debug!("removing {chunk_xor:?} at {chunk_path:?} as it is marked as completed"); - fs::remove_file(chunk_path).map_err(|_err| { - error!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}"); - eyre!("Failed to remove SE chunk {chunk_xor} from {chunk_path:?}") - })?; - } - Ok(()) - }) - .collect::>()?; - - let mut entire_file_is_done = BTreeSet::new(); - // remove the entries from the struct - self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - chunked_file - .chunks - // if chunk is part of completed_chunks, return false to remove it - .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - if chunked_file.chunks.is_empty() { - entire_file_is_done.insert(path_xor.clone()); - } - }); - - for path_xor in &entire_file_is_done { - // todo: should we remove the entry? ig so - if let Some(chunked_file) = self.chunks.remove(path_xor) { - trace!("removed {path_xor:?} from chunks list"); - - self.completed_files.push(( - chunked_file.file_path.clone(), - chunked_file.file_name.clone(), - chunked_file.head_chunk_address, - )); - - let uploaded_file_metadata = UploadedFile { - filename: chunked_file.file_name, - data_map: Some(chunked_file.data_map.value), - }; - // errors are logged by write() - let _result = - uploaded_file_metadata.write(&self.root_dir, &chunked_file.head_chunk_address); - } - } - Ok(()) - - // let mut entire_file_is_done = BTreeSet::new(); - // // remove the entries from the struct - // self.chunks.iter_mut().for_each(|(path_xor, chunked_file)| { - // chunked_file - // .chunks - // // if chunk is part of completed_chunks, return false to remove it - // .retain(|(chunk_xor, _)| !set_of_completed_chunks.contains(chunk_xor)); - // if chunked_file.chunks.is_empty() { - // entire_file_is_done.insert(path_xor.clone()); - // } - // }); - - // for path_xor in &entire_file_is_done { - // // todo: should we remove the entry? ig so - // if let Some(chunked_file) = self.chunks.remove(path_xor) { - // trace!("removed {path_xor:?} from chunks list"); - // self.verified_files - // .push((chunked_file.file_name, chunked_file.head_chunk_address)); - // } - // } - } - - /// Return the filename and the file's Xor address if all their chunks has been marked as - /// completed - pub(crate) fn completed_files(&self) -> &Vec<(PathBuf, OsString, ChunkAddress)> { - &self.completed_files - } - - /// Return the list of Filenames that have some chunks that are yet to be marked as completed. - pub(crate) fn incomplete_files(&self) -> Vec<(&PathBuf, &OsString, &ChunkAddress)> { - self.chunks - .values() - .map(|chunked_file| { - ( - &chunked_file.file_path, - &chunked_file.file_name, - &chunked_file.head_chunk_address, - ) - }) - .collect() - } - - /// Returns an iterator over the list of chunked files - pub(crate) fn iter_chunked_files(&mut self) -> impl Iterator { - self.chunks.values() - } - - // Try to read the chunks from `file_chunks_dir` - // Returns the ChunkedFile if the metadata file exists - // file_chunks_dir: artifacts_dir/path_xor - // path_xor: Used during logging and is returned - // original_file_name: Used to create ChunkedFile - fn read_file_chunks_dir( - file_chunks_dir: PathBuf, - path_xor: &PathXorName, - original_file_path: PathBuf, - original_file_name: OsString, - ) -> Option<(PathXorName, ChunkedFile)> { - let mut file_chunk_address: Option = None; - let mut data_map = Chunk::new(Bytes::new()); - debug!("Trying to resume {path_xor:?} as the file_chunks_dir exists"); - - let chunks = WalkDir::new(file_chunks_dir.clone()) - .into_iter() - .flatten() - .filter_map(|entry| { - if !entry.file_type().is_file() { - return None; - } - if entry.file_name() == METADATA_FILE { - if let Some((address, optional_data_map)) = - Self::try_read_metadata(entry.path()) - { - file_chunk_address = Some(address); - data_map = optional_data_map; - debug!("Obtained metadata for {path_xor:?}"); - } else { - error!("Could not read metadata for {path_xor:?}"); - } - // not a chunk, so don't return - return None; - } - - // try to get the chunk's xorname from its filename - if let Some(file_name) = entry.file_name().to_str() { - Self::hex_decode_xorname(file_name) - .map(|chunk_xorname| (chunk_xorname, entry.into_path())) - } else { - error!( - "Failed to convert OsString to str for {:?}", - entry.file_name() - ); - None - } - }) - .collect::>(); - - match file_chunk_address { - Some(head_chunk_address) => { - debug!("Resuming {} chunks for file {original_file_name:?} and with file_xor_addr {head_chunk_address:?}/{path_xor:?}", chunks.len()); - - Some(( - path_xor.clone(), - ChunkedFile { - file_path: original_file_path, - file_name: original_file_name, - head_chunk_address, - chunks, - data_map, - }, - )) - } - _ => { - error!("Metadata file or data map was not present for {path_xor:?}"); - // metadata file or data map was not present/was not read - None - } - } - } - - /// Try to read the metadata file - /// Returning (head_chunk_address, datamap Chunk) - fn try_read_metadata(path: &Path) -> Option<(ChunkAddress, Chunk)> { - let metadata = fs::read(path) - .map_err(|err| error!("Failed to read metadata with err {err:?}")) - .ok()?; - // head chunk address and the final datamap contents if a datamap exists for this file - let metadata: (ChunkAddress, Chunk) = rmp_serde::from_slice(&metadata) - .map_err(|err| error!("Failed to deserialize metadata with err {err:?}")) - .ok()?; - - Some(metadata) - } - - // Decode the hex encoded xorname - fn hex_decode_xorname(string: &str) -> Option { - let hex_decoded = hex::decode(string) - .map_err(|err| error!("Failed to decode {string} into bytes with err {err:?}")) - .ok()?; - let decoded_xorname: [u8; xor_name::XOR_NAME_LEN] = hex_decoded - .try_into() - .map_err(|_| error!("Failed to convert hex_decoded xorname into an [u8; 32]")) - .ok()?; - Some(XorName(decoded_xorname)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use color_eyre::{eyre::eyre, Result}; - use rand::{thread_rng, Rng}; - use rayon::prelude::IntoParallelIterator; - use sn_logging::LogBuilder; - use tempfile::TempDir; - - /// Assert any collection/iterator even if their orders do not match. - pub fn assert_list_eq(a: I, b: J) - where - K: Eq + Clone, - I: IntoIterator, - J: IntoIterator, - { - let vec1: Vec<_> = a.into_iter().collect::>(); - let mut vec2: Vec<_> = b.into_iter().collect(); - - assert_eq!(vec1.len(), vec2.len()); - - for item1 in &vec1 { - let idx2 = vec2 - .iter() - .position(|item2| item1 == item2) - .expect("Item not found in second list"); - - vec2.swap_remove(idx2); - } - - assert_eq!(vec2.len(), 0); - } - - #[test] - fn chunked_files_should_be_written_to_artifacts_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 4 chunks - assert_eq!(chunks.len(), 4); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 datamap + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn no_datamap_chunked_files_should_be_written_to_artifacts_dir_when_not_public() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - let artifacts_dir = manager.artifacts_dir.clone(); - let _ = create_random_files(&random_files_dir, 1, 1)?; - - // we do NOT want to include or write the data_map chunk here - manager.chunk_path(&random_files_dir, true, false)?; - - let chunks = manager.get_chunks(); - // 1. 1mb file produces 3 chunks without the datamap - assert_eq!(chunks.len(), 3); - - // 2. make sure we have 1 folder == 1 file - let n_folders = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != artifacts_dir) - .count(); - assert_eq!(n_folders, 1); - - // 3. make sure we have the 1 files per chunk, + 1 metadata file - let n_files = WalkDir::new(&artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| { - info!("direntry {entry:?}"); - entry.file_type().is_file() - }) - .count(); - assert_eq!(n_files, chunks.len() + 1); - - // 4. make sure metadata file holds the correct file_xor_addr - let mut file_xor_addr_from_metadata = None; - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - if entry.file_type().is_file() && entry.file_name() == METADATA_FILE { - let metadata = ChunkManager::try_read_metadata(entry.path()); - - if let Some((head_chunk_addr, _datamap)) = metadata { - file_xor_addr_from_metadata = Some(head_chunk_addr); - } - } - } - let file_xor_addr_from_metadata = - file_xor_addr_from_metadata.expect("The metadata file should be present"); - let file_xor_addr = manager - .chunks - .values() - .next() - .expect("1 file should be present") - .head_chunk_address; - assert_eq!(file_xor_addr_from_metadata, file_xor_addr); - - // 5. make sure the chunked file's name is the XorName of that chunk - let chunk_xornames = manager - .chunks - .values() - .next() - .expect("We must have 1 file here") - .chunks - .iter() - .map(|(xor_name, _)| *xor_name) - .collect::>(); - for entry in WalkDir::new(&artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - assert!(chunk_xornames.contains(&chunk_xorname_from_filename)); - } - } - - Ok(()) - } - - #[test] - fn chunks_should_be_removed_from_artifacts_dir_if_marked_as_completed() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 1, 1)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let path_xor = manager.chunks.keys().next().unwrap().clone(); - let chunked_file = manager.chunks.values().next().unwrap().clone(); - let file_xor_addr = chunked_file.head_chunk_address; - let (chunk, _) = chunked_file - .chunks - .first() - .expect("Must contain 1 chunk") - .clone(); - let total_chunks = manager.chunks.values().next().unwrap().chunks.len(); - manager.mark_completed(vec![chunk].into_iter())?; - - // 1. chunk should be removed from the struct - assert_eq!( - manager - .chunks - .values() - .next() - .expect("Since the file was not fully completed, it should be present") - .chunks - .len(), - total_chunks - 1, - ); - - // 2. the folder should exists, but chunk removed - let file_chunks_dir = manager.artifacts_dir.join(&path_xor.0); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - &path_xor, - chunked_file.file_path, - chunked_file.file_name, - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), total_chunks - 1); - assert_eq!(chunked_file_from_dir.head_chunk_address, file_xor_addr); - assert_eq!(path_xor_from_dir, path_xor); - - // 2. file should not be marked as completed - assert!(manager.completed_files.is_empty()); - - Ok(()) - } - - #[test] - fn marking_all_chunks_as_completed_should_not_remove_the_dir() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - // cloned after chunking - let manager_clone = manager.clone(); - - let n_folders = WalkDir::new(&manager.artifacts_dir) - .into_iter() - .flatten() - .filter(|entry| entry.file_type().is_dir() && entry.path() != manager.artifacts_dir) - .count(); - assert_eq!(n_folders, 5); - - manager.mark_completed_all()?; - - // all 5 files should be marked as completed - assert_eq!(manager.completed_files.len(), 5); - - // all 5 folders should exist - for (path_xor, chunked_file) in manager_clone.chunks.iter() { - let file_chunks_dir = manager_clone.artifacts_dir.join(path_xor.0.clone()); - let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir( - file_chunks_dir, - path_xor, - chunked_file.file_path.clone(), - chunked_file.file_name.to_owned(), - ) - .expect("Folder and metadata should be present"); - assert_eq!(chunked_file_from_dir.chunks.len(), 0); - assert_eq!( - chunked_file_from_dir.head_chunk_address, - chunked_file.head_chunk_address - ); - assert_eq!(&path_xor_from_dir, path_xor); - } - - Ok(()) - } - - #[test] - fn mark_none_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. make sure the chunk counts match - let total_chunk_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunk_count); - - // 2. assert the two managers - assert_eq!(manager.chunks, new_manager.chunks); - assert_eq!(manager.completed_files, new_manager.completed_files); - - Ok(()) - } - - #[test] - fn mark_one_chunk_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - - let total_chunks_count = manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(); - - // mark a chunk as completed - let removed_chunk = manager - .chunks - .values() - .next() - .expect("Atleast 1 file should be present") - .chunks - .iter() - .next() - .expect("Chunk should be present") - .0; - manager.mark_completed([removed_chunk].into_iter())?; - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have 1 completed chunk and (total_chunks_count-1) incomplete chunks - assert_eq!(manager.resumed_chunk_count, 0); - assert_eq!(new_manager.resumed_chunk_count, total_chunks_count - 1); - // also check the structs - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - total_chunks_count - 1 - ); - - // 2. files should not be added to completed files - assert_eq!(new_manager.completed_files.len(), 0); - - Ok(()) - } - - #[test] - fn mark_all_and_resume() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, root_dir, random_files_dir) = init_manager()?; - - let _ = create_random_files(&random_files_dir, 5, 5)?; - manager.chunk_path(&random_files_dir, true, true)?; - manager.mark_completed_all()?; - - let mut new_manager = ChunkManager::new(&root_dir); - new_manager.chunk_path(&random_files_dir, true, true)?; - - // 1. we should have chunk entries, but 0 chunks inside them - assert_eq!(new_manager.chunks.len(), 5); - assert_eq!( - new_manager - .chunks - .values() - .flat_map(|chunked_file| &chunked_file.chunks) - .count(), - 0 - ); - // 2. the resumed stats should be 0 - assert_eq!(new_manager.resumed_chunk_count, 0); - - // 3. make sure the files are added to completed list - assert_eq!(new_manager.completed_files.len(), 5); - - Ok(()) - } - - #[test] - fn absence_of_metadata_file_should_re_chunk_the_entire_file() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("chunk_manager", true); - let (_tmp_dir, mut manager, _root_dir, random_files_dir) = init_manager()?; - - let mut random_files = create_random_files(&random_files_dir, 1, 1)?; - let random_file = random_files.remove(0); - manager.chunk_path(&random_files_dir, true, true)?; - - let mut old_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - old_chunks_list.insert(chunk_xorname_from_filename); - } - } - - // remove metadata file from artifacts_dir - let path_xor = PathXorName::new(&random_file); - let metadata_path = manager.artifacts_dir.join(path_xor.0).join(METADATA_FILE); - fs::remove_file(&metadata_path)?; - - // use the same manager to chunk the path - manager.chunk_path(&random_files_dir, true, true)?; - // nothing should be resumed - assert_eq!(manager.resumed_chunk_count, 0); - // but it should be re-chunked - assert_eq!( - manager.get_chunks().len(), - 4, - "we have correct chunk len including data_map" - ); - // metadata file should be created - assert!(metadata_path.exists()); - - let mut new_chunks_list = BTreeSet::new(); - for entry in WalkDir::new(&manager.artifacts_dir).into_iter().flatten() { - let file_name = entry.file_name(); - if entry.file_type().is_file() && file_name != METADATA_FILE { - let chunk_xorname_from_filename = - ChunkManager::hex_decode_xorname(file_name.to_str().unwrap()) - .expect("Failed to get xorname from hex encoded file_name"); - new_chunks_list.insert(chunk_xorname_from_filename); - } - } - assert_list_eq(new_chunks_list, old_chunks_list); - - Ok(()) - } - - fn init_manager() -> Result<(TempDir, ChunkManager, PathBuf, PathBuf)> { - let tmp_dir = tempfile::tempdir()?; - let random_files_dir = tmp_dir.path().join("random_files"); - let root_dir = tmp_dir.path().join("root_dir"); - fs::create_dir_all(&random_files_dir)?; - fs::create_dir_all(&root_dir)?; - let manager = ChunkManager::new(&root_dir); - - Ok((tmp_dir, manager, root_dir, random_files_dir)) - } - - fn create_random_files( - at: &Path, - num_files: usize, - mb_per_file: usize, - ) -> Result> { - let files = (0..num_files) - .into_par_iter() - .filter_map(|i| { - let mut path = at.to_path_buf(); - path.push(format!("random_file_{i}")); - match generate_file(&path, mb_per_file) { - Ok(_) => Some(path), - Err(err) => { - error!("Failed to generate random file with {err:?}"); - None - } - } - }) - .collect::>(); - if files.len() < num_files { - return Err(eyre!("Failed to create a Failedkk")); - } - Ok(files) - } - - fn generate_file(path: &PathBuf, file_size_mb: usize) -> Result<()> { - let mut file = File::create(path)?; - let mut rng = thread_rng(); - - // can create [u8; 32] max at time. Thus each mb has 1024*32 such small chunks - let n_small_chunks = file_size_mb * 1024 * 32; - for _ in 0..n_small_chunks { - let random_data: [u8; 32] = rng.gen(); - file.write_all(&random_data)?; - } - let size = file.metadata()?.len() as f64 / (1024 * 1024) as f64; - assert_eq!(file_size_mb as f64, size); - - Ok(()) - } -} diff --git a/sn_cli/src/files/download.rs b/sn_cli/src/files/download.rs deleted file mode 100644 index d95f0a0646..0000000000 --- a/sn_cli/src/files/download.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - get_progress_bar, - upload::{UploadedFile, UPLOADED_FILES}, -}; - -use std::collections::BTreeSet; -use std::ffi::OsString; -use std::path::Path; - -use color_eyre::Result; -use indicatif::ProgressBar; -use walkdir::WalkDir; -use xor_name::XorName; - -use crate::utils::duration_to_minute_seconds_miliseconds_string; -use sn_client::{ - protocol::storage::{Chunk, ChunkAddress, RetryStrategy}, - FilesApi, FilesDownload, FilesDownloadEvent, -}; -use tracing::{debug, error, info}; - -/// The default folder to download files to. -const DOWNLOAD_FOLDER: &str = "safe_files"; - -pub async fn download_files( - files_api: &FilesApi, - root_dir: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) -> Result<()> { - info!("Downloading with batch size of {}", batch_size); - let uploaded_files_path = root_dir.join(UPLOADED_FILES); - let download_path = dirs_next::download_dir() - .unwrap_or(root_dir.to_path_buf()) - .join(DOWNLOAD_FOLDER); - std::fs::create_dir_all(download_path.as_path())?; - - let mut uploaded_files = BTreeSet::new(); - - for entry in WalkDir::new(uploaded_files_path.clone()) { - let entry = entry?; - let path = entry.path(); - if path.is_file() { - let hex_xorname = path - .file_name() - .expect("Uploaded file to have name") - .to_str() - .expect("Failed to convert path to string"); - let bytes = hex::decode(hex_xorname)?; - let xor_name_bytes: [u8; 32] = bytes - .try_into() - .expect("Failed to parse XorName from hex string"); - let xor_name = XorName(xor_name_bytes); - let address = ChunkAddress::new(xor_name); - - let uploaded_file_metadata = UploadedFile::read(path)?; - let datamap_chunk = uploaded_file_metadata.data_map.map(|bytes| Chunk { - address, - value: bytes, - }); - uploaded_files.insert((xor_name, (uploaded_file_metadata.filename, datamap_chunk))); - } - } - - for (xorname, file_data) in uploaded_files.into_iter() { - download_file( - files_api.clone(), - xorname, - file_data, - &download_path, - show_holders, - batch_size, - retry_strategy, - ) - .await; - } - - Ok(()) -} - -pub async fn download_file( - files_api: FilesApi, - xor_name: XorName, - // original file name and optional datamap chunk - (file_name, datamap): (OsString, Option), - download_path: &Path, - show_holders: bool, - batch_size: usize, - retry_strategy: RetryStrategy, -) { - let start_time = std::time::Instant::now(); - - let mut files_download = FilesDownload::new(files_api.clone()) - .set_batch_size(batch_size) - .set_show_holders(show_holders) - .set_retry_strategy(retry_strategy); - - println!("Downloading {file_name:?} from {xor_name:64x} with batch-size {batch_size}"); - debug!("Downloading {file_name:?} from {:64x}", xor_name); - let downloaded_file_path = download_path.join(&file_name); - - let mut download_events_rx = files_download.get_events(); - - let progress_handler = tokio::spawn(async move { - let mut progress_bar: Option = None; - - // The loop is guaranteed to end, as the channel will be closed when the download completes or errors out. - while let Some(event) = download_events_rx.recv().await { - match event { - FilesDownloadEvent::Downloaded(_) => { - if let Some(progress_bar) = &progress_bar { - progress_bar.inc(1); - } - } - FilesDownloadEvent::ChunksCount(count) => { - // terminate the progress bar from datamap download. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::DatamapCount(count) => { - // terminate the progress bar if it was loaded here. This should not happen. - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - progress_bar = get_progress_bar(count as u64).map_err(|err|{ - println!("Unable to initialize progress bar. The download process will continue without a progress bar."); - error!("Failed to obtain progress bar with err: {err:?}"); - err - }).ok(); - } - FilesDownloadEvent::Error => { - error!("Got FilesDownloadEvent::Error"); - } - } - } - - if let Some(progress_bar) = progress_bar { - progress_bar.finish_and_clear(); - } - }); - - let download_result = files_download - .download_file_to_path( - ChunkAddress::new(xor_name), - datamap, - downloaded_file_path.clone(), - ) - .await; - - let duration = start_time.elapsed(); - - // await on the progress handler first as we want to clear the progress bar before printing things. - let _ = progress_handler.await; - match download_result { - Ok(_) => { - debug!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - println!( - "Saved {file_name:?} at {}", - downloaded_file_path.to_string_lossy() - ); - let elapsed_time = duration_to_minute_seconds_miliseconds_string(duration); - println!("File downloaded in {elapsed_time}"); - } - Err(error) => { - error!("Error downloading {file_name:?}: {error}"); - println!("Error downloading {file_name:?}: {error}") - } - } -} diff --git a/sn_cli/src/files/estimate.rs b/sn_cli/src/files/estimate.rs deleted file mode 100644 index a5c16f4a03..0000000000 --- a/sn_cli/src/files/estimate.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::ChunkManager; - -use std::path::{Path, PathBuf}; - -use color_eyre::Result; - -use sn_client::{ - protocol::{storage::ChunkAddress, NetworkAddress}, - transfers::NanoTokens, - FilesApi, -}; - -pub struct Estimator { - chunk_manager: ChunkManager, - files_api: FilesApi, -} - -impl Estimator { - pub fn new(chunk_manager: ChunkManager, files_api: FilesApi) -> Self { - Self { - chunk_manager, - files_api, - } - } - - /// Estimate the upload cost of a chosen file - pub async fn estimate_cost( - mut self, - path: PathBuf, - make_data_public: bool, - root_dir: &Path, - ) -> Result<()> { - self.chunk_manager - .chunk_path(&path, false, make_data_public)?; - - let mut estimate: u64 = 0; - - let balance = FilesApi::new(self.files_api.client().clone(), root_dir.to_path_buf()) - .wallet()? - .balance() - .as_nano(); - - for (chunk_address, _location) in self.chunk_manager.get_chunks() { - let c = self.files_api.clone(); - - tokio::spawn(async move { - let (_peer, _cost, quote) = c - .wallet() - .expect("estimate_cost: Wallet error.") - .get_store_cost_at_address(NetworkAddress::from_chunk_address( - ChunkAddress::new(chunk_address), - )) - .await - .expect("estimate_cost: Error with file."); - quote.cost.as_nano() - }) - .await - .map(|nanos| estimate += nanos) - .expect("estimate_cost: Concurrency error."); - } - - let total = balance.saturating_sub(estimate); - - println!("**************************************"); - println!("Your current balance: {}", NanoTokens::from(balance)); - println!("Transfer cost estimate: {}", NanoTokens::from(estimate)); - println!( - "Your balance estimate after transfer: {}", - NanoTokens::from(total) - ); - println!("**************************************"); - - Ok(()) - } -} diff --git a/sn_cli/src/files/files_uploader.rs b/sn_cli/src/files/files_uploader.rs deleted file mode 100644 index 6e20f2e788..0000000000 --- a/sn_cli/src/files/files_uploader.rs +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::get_progress_bar; -use crate::utils::duration_to_minute_seconds_string; -use crate::ChunkManager; -use bytes::Bytes; -use color_eyre::{eyre::eyre, Report, Result}; -use futures::StreamExt; -use rand::prelude::SliceRandom; -use rand::thread_rng; -use sn_client::{ - transfers::{TransferError, WalletError}, - Client, Error as ClientError, UploadCfg, UploadEvent, UploadSummary, Uploader, -}; -use sn_protocol::storage::{Chunk, ChunkAddress}; -use std::{ - ffi::OsString, - path::{Path, PathBuf}, - time::{Duration, Instant}, -}; -use tokio::{sync::mpsc::Receiver, task::JoinHandle}; -use tracing::{debug, error, info, warn}; -use walkdir::{DirEntry, WalkDir}; -use xor_name::XorName; - -/// The result of a successful files upload. -pub struct FilesUploadSummary { - /// The cost and count summary of the upload. - pub upload_summary: UploadSummary, - /// The list of completed files (FilePath, FileName, HeadChunkAddress) - pub completed_files: Vec<(PathBuf, OsString, ChunkAddress)>, - /// The list of incomplete files (FilePath, FileName, HeadChunkAddress) - pub incomplete_files: Vec<(PathBuf, OsString, ChunkAddress)>, -} - -/// A trait designed to customize the standard output behavior for file upload processes. -pub trait FilesUploadStatusNotifier: Send { - fn collect_entries(&mut self, entries_iter: Vec); - fn collect_paths(&mut self, path: &Path); - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize); - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize); - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ); - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ); - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ); -} - -/// Combines the `Uploader` along with the `ChunkManager` -pub struct FilesUploader { - client: Client, - root_dir: PathBuf, - /// entries to upload - entries_to_upload: Vec, - /// The status notifier that can be overridden to perform custom actions instead of printing things to stdout. - status_notifier: Option>, - /// config - make_data_public: bool, - upload_cfg: UploadCfg, -} - -impl FilesUploader { - pub fn new(client: Client, root_dir: PathBuf) -> Self { - let status_notifier = Box::new(StdOutPrinter { - file_paths_to_print: Default::default(), - }); - Self { - client, - root_dir, - entries_to_upload: Default::default(), - status_notifier: Some(status_notifier), - make_data_public: false, - upload_cfg: Default::default(), - } - } - - pub fn set_upload_cfg(mut self, cfg: UploadCfg) -> Self { - self.upload_cfg = cfg; - self - } - - pub fn set_make_data_public(mut self, make_data_public: bool) -> Self { - self.make_data_public = make_data_public; - self - } - - /// Override the default status notifier. By default we print things to stdout. - pub fn set_status_notifier( - mut self, - status_notifier: Box, - ) -> Self { - self.status_notifier = Some(status_notifier); - self - } - - pub fn insert_entries(mut self, entries_iter: impl IntoIterator) -> Self { - self.entries_to_upload.extend(entries_iter); - self - } - - pub fn insert_path(mut self, path: &Path) -> Self { - if let Some(notifier) = &mut self.status_notifier { - notifier.collect_paths(path); - } - let entries = WalkDir::new(path).into_iter().flatten(); - self.entries_to_upload.extend(entries); - self - } - - pub async fn start_upload(mut self) -> Result { - let mut chunk_manager = ChunkManager::new(&self.root_dir); - let chunks_to_upload = self.get_chunks_to_upload(&mut chunk_manager).await?; - let chunks_to_upload_len = chunks_to_upload.len(); - - // Notify on chunking complete - if let Some(notifier) = &self.status_notifier { - notifier.on_chunking_complete( - &self.upload_cfg, - self.make_data_public, - chunks_to_upload_len, - ); - } - - let now = Instant::now(); - let mut uploader = Uploader::new(self.client, self.root_dir); - uploader.set_upload_cfg(self.upload_cfg); - uploader.insert_chunk_paths(chunks_to_upload); - - let events_handle = Self::spawn_upload_events_handler( - chunk_manager, - self.make_data_public, - chunks_to_upload_len, - uploader.get_event_receiver(), - self.status_notifier.take(), - )?; - - let upload_sum = match uploader.start_upload().await { - Ok(summary) => summary, - Err(ClientError::Wallet(WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )))) => { - return Err(eyre!( - "Not enough balance in wallet to pay for chunk. \ - We have {available:?} but need {required:?} to pay for the chunk" - )) - } - Err(err) => return Err(eyre!("Failed to upload chunk batch: {err}")), - }; - let (chunk_manager, status_notifier) = events_handle.await??; - self.status_notifier = status_notifier; - - // Notify on upload complete - if let Some(notifier) = &self.status_notifier { - notifier.on_upload_complete(&upload_sum, now.elapsed(), chunks_to_upload_len); - } - - let summary = FilesUploadSummary { - upload_summary: upload_sum, - completed_files: chunk_manager.completed_files().clone(), - incomplete_files: chunk_manager - .incomplete_files() - .into_iter() - .map(|(path, file_name, head_address)| { - (path.clone(), file_name.clone(), *head_address) - }) - .collect(), - }; - Ok(summary) - } - - // This will read from the cache if possible. We only re-verify with the network if the file has been cached but - // there are no pending chunks to upload. - async fn get_chunks_to_upload( - &self, - chunk_manager: &mut ChunkManager, - ) -> Result> { - // Initially try reading from the cache - chunk_manager.chunk_with_iter( - self.entries_to_upload.iter().cloned(), - true, - self.make_data_public, - )?; - // We verify if there are no chunks left to upload. - let mut chunks_to_upload = if !chunk_manager.is_chunks_empty() { - chunk_manager.get_chunks() - } else { - // re chunk it again to get back all the chunks - let chunks = chunk_manager.already_put_chunks( - self.entries_to_upload.iter().cloned(), - self.make_data_public, - )?; - - // Notify on verification init - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_init(chunks.len()); - } - - let failed_chunks = self.verify_uploaded_chunks(&chunks).await?; - - chunk_manager.mark_completed( - chunks - .into_iter() - .filter(|c| !failed_chunks.contains(c)) - .map(|(xor, _)| xor), - )?; - - if failed_chunks.is_empty() { - // Notify on verification success - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_success( - chunk_manager.completed_files(), - self.make_data_public, - ); - } - - return Ok(vec![]); - } - // Notify on verification failure - if let Some(notifier) = &self.status_notifier { - notifier.on_verifying_uploaded_chunks_failure(failed_chunks.len()); - } - failed_chunks - }; - // shuffle the chunks - let mut rng = thread_rng(); - chunks_to_upload.shuffle(&mut rng); - - Ok(chunks_to_upload) - } - - async fn verify_uploaded_chunks( - &self, - chunks_paths: &[(XorName, PathBuf)], - ) -> Result> { - let mut stream = futures::stream::iter(chunks_paths) - .map(|(xorname, path)| async move { - let chunk = Chunk::new(Bytes::from(std::fs::read(path)?)); - let res = self.client.verify_chunk_stored(&chunk).await; - Ok::<_, Report>((xorname, path.clone(), res.is_err())) - }) - .buffer_unordered(self.upload_cfg.batch_size); - let mut failed_chunks = Vec::new(); - - while let Some(result) = stream.next().await { - let (xorname, path, is_error) = result?; - if is_error { - warn!("Failed to fetch a chunk {xorname:?}"); - failed_chunks.push((*xorname, path)); - } - } - - Ok(failed_chunks) - } - - #[expect(clippy::type_complexity)] - fn spawn_upload_events_handler( - mut chunk_manager: ChunkManager, - make_data_public: bool, - chunks_to_upload_len: usize, - mut upload_event_rx: Receiver, - status_notifier: Option>, - ) -> Result>)>>> - { - let progress_bar = get_progress_bar(chunks_to_upload_len as u64)?; - let handle = tokio::spawn(async move { - let mut upload_terminated_with_error = false; - // The loop is guaranteed to end, as the channel will be - // closed when the upload completes or errors out. - while let Some(event) = upload_event_rx.recv().await { - match event { - UploadEvent::ChunkUploaded(addr) - | UploadEvent::ChunkAlreadyExistsInNetwork(addr) => { - progress_bar.clone().inc(1); - if let Err(err) = - chunk_manager.mark_completed(std::iter::once(*addr.xorname())) - { - error!("Failed to mark chunk {addr:?} as completed: {err:?}"); - } - } - UploadEvent::Error => { - upload_terminated_with_error = true; - } - UploadEvent::RegisterUploaded { .. } - | UploadEvent::RegisterUpdated { .. } - | UploadEvent::PaymentMade { .. } => {} - } - } - progress_bar.finish_and_clear(); - - // this check is to make sure that we don't partially write to the uploaded_files file if the upload process - // terminates with an error. This race condition can happen as we bail on `upload_result` before we await the - // handler. - if upload_terminated_with_error { - error!("Got UploadEvent::Error inside upload event loop"); - } else { - // Notify on upload failure - if let Some(notifier) = &status_notifier { - notifier.on_failed_to_upload_all_files( - chunk_manager.incomplete_files(), - chunk_manager.completed_files(), - make_data_public, - ); - } - } - - Ok::<_, Report>((chunk_manager, status_notifier)) - }); - - Ok(handle) - } -} - -/// The default -struct StdOutPrinter { - file_paths_to_print: Vec, -} - -impl FilesUploadStatusNotifier for StdOutPrinter { - fn collect_entries(&mut self, _entries_iter: Vec) {} - - fn collect_paths(&mut self, path: &Path) { - self.file_paths_to_print.push(path.to_path_buf()); - } - - fn on_verifying_uploaded_chunks_init(&self, chunks_len: usize) { - println!("Files upload attempted previously, verifying {chunks_len} chunks",); - } - - fn on_verifying_uploaded_chunks_success( - &self, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - println!("All files were already uploaded and verified"); - Self::print_uploaded_msg(make_data_public); - - if completed_files.is_empty() { - println!("chunk_manager doesn't have any verified_files, nor any failed_chunks to re-upload."); - } - Self::print_completed_file_list(completed_files); - } - - fn on_verifying_uploaded_chunks_failure(&self, failed_chunks_len: usize) { - println!("{failed_chunks_len} chunks were uploaded in the past but failed to verify. Will attempt to upload them again..."); - } - - fn on_failed_to_upload_all_files( - &self, - incomplete_files: Vec<(&PathBuf, &OsString, &ChunkAddress)>, - completed_files: &[(PathBuf, OsString, ChunkAddress)], - make_data_public: bool, - ) { - for (_, file_name, _) in incomplete_files { - if let Some(file_name) = file_name.to_str() { - println!("Unverified file \"{file_name}\", suggest to re-upload again."); - info!("Unverified {file_name}"); - } else { - println!("Unverified file \"{file_name:?}\", suggest to re-upload again."); - info!("Unverified file {file_name:?}"); - } - } - - // log uploaded file information - Self::print_uploaded_msg(make_data_public); - Self::print_completed_file_list(completed_files); - } - - fn on_chunking_complete( - &self, - upload_cfg: &UploadCfg, - make_data_public: bool, - chunks_to_upload_len: usize, - ) { - for path in self.file_paths_to_print.iter() { - debug!( - "Uploading file(s) from {path:?} batch size {:?} will verify?: {}", - upload_cfg.batch_size, upload_cfg.verify_store - ); - if make_data_public { - info!("{path:?} will be made public and linkable"); - println!("{path:?} will be made public and linkable"); - } - } - if self.file_paths_to_print.len() == 1 { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print[0] - ); - } else { - println!( - "Splitting and uploading {:?} into {chunks_to_upload_len} chunks", - self.file_paths_to_print - ); - } - } - - fn on_upload_complete( - &self, - upload_sum: &UploadSummary, - elapsed_time: Duration, - chunks_to_upload_len: usize, - ) { - let elapsed = duration_to_minute_seconds_string(elapsed_time); - - println!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - info!( - "Among {chunks_to_upload_len} chunks, found {} already existed in network, uploaded \ - the leftover {} chunks in {elapsed}", - upload_sum.skipped_count, upload_sum.uploaded_count, - ); - println!("**************************************"); - println!("* Payment Details *"); - println!("**************************************"); - println!( - "Made payment of {:?} for {} chunks", - upload_sum.storage_cost, upload_sum.uploaded_count - ); - println!( - "Made payment of {:?} for royalties fees", - upload_sum.royalty_fees - ); - println!("New wallet balance: {}", upload_sum.final_balance); - } -} - -impl StdOutPrinter { - fn print_completed_file_list(completed_files: &[(PathBuf, OsString, ChunkAddress)]) { - for (_, file_name, addr) in completed_files { - let hex_addr = addr.to_hex(); - if let Some(file_name) = file_name.to_str() { - println!("Uploaded \"{file_name}\" to address {hex_addr}"); - info!("Uploaded {file_name} to {hex_addr}"); - } else { - println!("Uploaded \"{file_name:?}\" to address {hex_addr}"); - info!("Uploaded {file_name:?} to {hex_addr}"); - } - } - } - - fn print_uploaded_msg(make_data_public: bool) { - println!("**************************************"); - println!("* Uploaded Files *"); - if !make_data_public { - println!("* *"); - println!("* These are not public by default. *"); - println!("* Reupload with `-p` option *"); - println!("* to publish the datamaps. *"); - } - println!("**************************************"); - } -} diff --git a/sn_cli/src/files/upload.rs b/sn_cli/src/files/upload.rs deleted file mode 100644 index 2aa13d7dd8..0000000000 --- a/sn_cli/src/files/upload.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bytes::Bytes; -use color_eyre::Result; -use serde::Deserialize; -use sn_client::protocol::storage::ChunkAddress; -use std::{ffi::OsString, path::Path}; -use tracing::{error, warn}; - -/// Subdir for storing uploaded file into -pub const UPLOADED_FILES: &str = "uploaded_files"; - -/// The metadata related to file that has been uploaded. -/// This is written during upload and read during downloads. -#[derive(Clone, Debug, Deserialize)] -pub struct UploadedFile { - pub filename: OsString, - pub data_map: Option, -} - -impl UploadedFile { - /// Write an UploadedFile to a path identified by the hex of the head ChunkAddress. - /// If you want to update the data_map to None, calling this function will overwrite the previous value. - pub fn write(&self, root_dir: &Path, head_chunk_address: &ChunkAddress) -> Result<()> { - let uploaded_files = root_dir.join(UPLOADED_FILES); - - if !uploaded_files.exists() { - if let Err(error) = std::fs::create_dir_all(&uploaded_files) { - error!("Failed to create {uploaded_files:?} because {error:?}"); - } - } - - let uploaded_file_path = uploaded_files.join(head_chunk_address.to_hex()); - - if self.data_map.is_none() { - warn!( - "No data-map being written for {:?} as it is empty", - self.filename - ); - } - let serialized = - rmp_serde::to_vec(&(&self.filename, &self.data_map)).inspect_err(|_err| { - error!("Failed to serialize UploadedFile"); - })?; - - std::fs::write(&uploaded_file_path, serialized).inspect_err(|_err| { - error!( - "Could not write UploadedFile of {:?} to {uploaded_file_path:?}", - self.filename - ); - })?; - - Ok(()) - } - - pub fn read(path: &Path) -> Result { - let bytes = std::fs::read(path).inspect_err(|_err| { - error!("Error while reading the UploadedFile from {path:?}"); - })?; - let metadata = rmp_serde::from_slice(&bytes).inspect_err(|_err| { - error!("Error while deserializing UploadedFile for {path:?}"); - })?; - Ok(metadata) - } -} diff --git a/sn_cli/src/lib.rs b/sn_cli/src/lib.rs deleted file mode 100644 index 4d0e77b41e..0000000000 --- a/sn_cli/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod acc_packet; -mod files; -pub mod utils; - -pub use acc_packet::AccountPacket; -pub use files::{ - download_file, download_files, ChunkManager, Estimator, FilesUploadStatusNotifier, - FilesUploadSummary, FilesUploader, UploadedFile, UPLOADED_FILES, -}; diff --git a/sn_cli/src/utils.rs b/sn_cli/src/utils.rs deleted file mode 100644 index 093b939960..0000000000 --- a/sn_cli/src/utils.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::time::Duration; - -/// Returns whether a hex string is a valid secret key in hex format. -pub fn is_valid_key_hex(hex: &str) -> bool { - hex.len() == 64 && hex.chars().all(|c| c.is_ascii_hexdigit()) -} - -pub fn duration_to_minute_seconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds") - } else { - format!("{elapsed_seconds} seconds") - } -} - -pub fn duration_to_minute_seconds_miliseconds_string(duration: Duration) -> String { - let elapsed_minutes = duration.as_secs() / 60; - let elapsed_seconds = duration.as_secs() % 60; - let elapsed_millis = duration.subsec_millis(); - if elapsed_minutes > 0 { - format!("{elapsed_minutes} minutes {elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else if elapsed_seconds > 0 { - format!("{elapsed_seconds} seconds {elapsed_millis} milliseconds") - } else { - format!("{elapsed_millis} milliseconds") - } -} diff --git a/sn_client/CHANGELOG.md b/sn_client/CHANGELOG.md deleted file mode 100644 index fb045ff82c..0000000000 --- a/sn_client/CHANGELOG.md +++ /dev/null @@ -1,2712 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.107.7](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.6...sn_client-v0.107.7) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(network)* set metrics server to run on localhost - -## [0.107.6](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.5...sn_client-v0.107.6) - 2024-06-04 - -### Fixed -- *(transfer)* mismatched key shall result in decryption error - -### Other -- *(transfer)* make discord_name decryption backward compatible -## [0.107.5](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.4...sn_client-v0.107.5) - 2024-06-04 - -### Other -- *(network)* set metrics server to run on localhost - -## [0.107.4](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.3...sn_client-v0.107.4) - 2024-06-04 - -### Fixed -- *(faucet)* save the transfer not the cashnote for foundation - -### Other -- *(release)* sn_client-v0.107.3/sn_transfers-v0.18.4/sn_cli-v0.93.2/sn_node-v0.107.2/node-launchpad-v0.3.2/sn-node-manager-v0.9.2/sn_auditor-v0.1.20/sn_networking-v0.16.2/sn_protocol-v0.17.2/sn_faucet-v0.4.22/sn_service_management-v0.3.3/sn_node_rpc_client-v0.6.20 - -## [0.107.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.2...sn_client-v0.107.3) - 2024-06-03 - -### Fixed -- enable compile time sk setting for faucet/genesis - -## [0.107.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.107.1...sn_client-v0.107.2) - 2024-06-03 - -### Other -- bump versions to enable re-release with env vars at compilation - -## [0.107.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.3...sn_client-v0.107.0) - 2024-06-03 - -### Added -- *(faucet)* write foundation cash note to disk -- *(client)* read existing mnemonic from disk if avilable -- integrate DAG crawling fixes from Josh and Qi -- *(networking)* add UPnP metrics -- *(network)* [**breaking**] move network versioning away from sn_protocol -- *(keys)* enable compile or runtime override of keys -- *(launchpad)* use nat detection server to determine the nat status - -### Fixed -- *(networking)* upnp feature gates for metrics -- *(networking)* conditional upnp metrics - -### Other -- rename DAG building to crawling -- spend verification error management -- *(networking)* cargo fmt -- use secrets during build process -- *(release)* sn_auditor-v0.1.17/sn_client-v0.106.3/sn_networking-v0.15.3/sn_transfers-v0.18.1/sn_logging-v0.2.27/sn_cli-v0.92.0/sn_faucet-v0.4.19/sn_node-v0.106.5/sn_service_management-v0.3.0/node-launchpad-v0.2.0/sn-node-manager-v0.8.0/sn_protocol-v0.16.7/sn_node_rpc_client-v0.6.18 - -## [0.106.3](https://github.com/joshuef/safe_network/compare/sn_client-v0.106.2...sn_client-v0.106.3) - 2024-05-24 - -### Added -- improved spend verification with DAG and fault detection -- upgrade cli audit to use DAG -- remove two uneeded env vars -- pass genesis_cn pub fields separate to hide sk -- hide genesis keypair -- pass sk_str via cli opt -- *(node)* use separate keys of Foundation and Royalty -- *(wallet)* ensure genesis wallet attempts to load from local on init first -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- *(audit)* intercept sender of the payment forward -- *(audit)* collect payment forward statistics -- spend reason enum and sized cipher -- *(metrics)* expose store cost value -- keep track of the estimated network size metric -- record lip2p relay and dctur metrics -- *(node)* periodically forward reward to specific address -- use default keys for genesis, or override -- use different key for payment forward -- hide genesis keypair -- tracking beta rewards from the DAG - -### Fixed -- *(uploader)* do not error out immediately on max repayment errors -- *(node)* notify fetch completion earlier to avoid being skipped -- avoid adding mixed type addresses into RT -- enable libp2p metrics to be captured -- correct genesis_pk naming -- genesis_cn public fields generated from hard coded value -- invalid spend reason in data payments - -### Other -- *(uploader)* return summary when upload fails due to max repayments -- *(uploader)* return the list of max repayment reached items -- improve cli DAG collection -- remove now unused mostly duplicated code -- improve DAG verification redundancy -- *(faucet)* devskim ignore -- *(faucet)* log existing faucet balance if non-zero -- *(faucet)* add foundation PK as const -- *(faucet)* clarify logs for verification -- increase initial faucet balance -- add temp log -- *(faucet)* refresh cashnotes on fund -- devSkim ignore foundation pub temp key -- update got 'gifting-from-genesis' faucet feat -- make open metrics feature default but without starting it by default -- Revert "feat(node): make spend and cash_note reason field configurable" -- Revert "feat(cli): track spend creation reasons during audit" -- Revert "chore: refactor CASH_NOTE_REASON strings to consts" -- Revert "feat(client): dump spends creation_reason statistics" -- Revert "chore: address review comments" -- *(node)* tuning the pricing curve -- *(node)* remove un-necessary is_relayed check inside add_potential_candidates -- move historic_quoting_metrics out of the record_store dir -- clippy fixes for open metrics feature -- *(networking)* update tests for pricing curve tweaks -- *(refactor)* stabilise node size to 4k records, -- Revert "chore: rename output reason to purpose for clarity" -- *(transfers)* comment and naming updates for clarity -- log genesis PK -- rename improperly named foundation_key -- reconfigure local network owner args -- use const for default user or owner -- resolve errors after reverts -- Revert "feat: spend shows the purposes of outputs created for" -- *(node)* use proper SpendReason enum -- add consts - -## [0.106.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.1...sn_client-v0.106.2) - 2024-05-09 - -### Fixed -- *(relay_manager)* filter out bad nodes - -## [0.106.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0...sn_client-v0.106.1) - 2024-05-08 - -### Other -- *(release)* sn_registers-v0.3.13 - -## [0.106.0-alpha.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.106.0-alpha.5...sn_client-v0.106.0-alpha.6) - 2024-05-07 - -### Added -- *(client)* dump spends creation_reason statistics -- *(cli)* track spend creation reasons during audit -- *(node)* make spend and cash_note reason field configurable -- *(client)* speed up register checks when paying -- double spend fork detection, fix invalid edges issue -- dag faults unit tests, sn_auditor offline mode -- [**breaking**] renamings in CashNote -- *(faucet)* log from sn_client -- unit testing dag, double spend poisoning tweaks -- report protocol mismatch error -- *(network)* add --upnp flag to node -- *(networking)* feature gate 'upnp' -- *(networking)* add UPnP behavior to open port -- *(relay)* remove autonat and enable hole punching manually -- *(relay)* remove old listen addr if we are using a relayed connection -- *(relay)* update the relay manager if the listen addr has been closed -- *(relay)* remove the dial flow -- *(relay)* impl RelayManager to perform circuit relay when behind NAT -- *(networking)* add in autonat server basics -- *(neetworking)* initial tcp use by default -- *(networking)* clear record on valid put -- *(node)* restrict replication fetch range when node is full -- *(store)* load existing records in parallel -- *(node)* notify peer it is now considered as BAD -- *(node)* restore historic quoting metrics to allow restart -- *(networking)* shift to use ilog2 bucket distance for close data calcs -- spend shows the purposes of outputs created for -- *(cli)* generate a mnemonic as wallet basis if no wallet found -- *(transfers)* do not genereate wallet by default -- [**breaking**] rename token to amount in Spend -- *(tui)* adding services -- *(network)* network contacts url should point to the correct network version - -### Fixed -- create faucet via account load or generation -- more test and cli fixes -- update calls to HotWallet::load -- *(client)* set uploader to use mnemonic wallet loader -- *(client)* move acct_packet mnemonic into client layer -- *(client)* calm down broadcast error logs if we've no listeners -- spend dag double spend links -- orphan test -- orphan parent bug, improve fault detection and logging -- *(networking)* allow wasm32 compilation -- *(network)* remove all external addresses related to a relay server -- *(relay_manager)* remove external addr on connection close -- relay server should not close connections made to a reserved peer -- short circuit identify if the peer is already present in the routitng table -- update outdated connection removal flow -- do not remove outdated connections -- increase relay server capacity -- keep idle connections forever -- pass peer id while crafting relay address -- *(relay)* crafted multi address should contain the P2PCircuit protocol -- do not add reported external addressese if we are behind home network -- *(networking)* do not add to dialed peers -- *(network)* do not strip out relay's PeerId -- *(relay)* craft the correctly formatted relay address -- *(network)* do not perform AutoNat for clients -- *(relay_manager)* do not dial with P2PCircuit protocol -- *(test)* quoting metrics might have live_time field changed along time -- *(node)* avoid false alert on FailedLocalRecord -- *(record_store)* prune only one record at a time -- *(node)* notify replication_fetcher of early completion -- *(node)* fetcher completes on_going_fetch entry on record_key only -- *(node)* not send out replication when failed read from local -- *(networking)* increase the local responsible range of nodes to K_VALUE peers away -- *(network)* clients should not perform farthest relevant record check -- *(node)* replication_fetch keep distance_range sync with record_store -- *(node)* replication_list in range filter -- transfer tests for HotWallet creation -- typo -- *(manager)* do not print to stdout on low verbosity level -- *(protocol)* evaluate NETWORK_VERSION_MODE at compile time - -### Other -- *(versions)* sync versions with latest crates.io vs -- check DAG crawling performance -- address review comments -- refactor CASH_NOTE_REASON strings to consts -- store owner info inside node instead of network -- small cleanup of dead code -- improve naming and typo fix -- clarify client documentation -- clarify client::new description -- clarify client documentation -- clarify client::new description -- *(deps)* bump dependencies -- cargo fmt -- rename output reason to purpose for clarity -- *(network)* move event handling to its own module -- cleanup network events -- *(network)* remove nat detection via incoming connections check -- enable connection keepalive timeout -- remove non relayed listener id from relay manager -- enable multiple relay connections -- return early if peer is not a node -- *(tryout)* do not add new relay candidates -- add debug lines while adding potential relay candidates -- do not remove old non-relayed listeners -- clippy fix -- *(networking)* remove empty file -- *(networking)* re-add global_only -- use quic again -- log listner id -- *(relay)* add candidate even if we are dialing -- remove quic -- cleanup, add in relay server behaviour, and todo -- *(node)* lower some log levels to reduce log size -- *(node)* optimise record_store farthest record calculation -- *(node)* do not reset farthest_acceptance_distance -- *(node)* remove duplicated record_store fullness check -- *(networking)* notify network event on failed put due to prune -- *(networking)* ensure pruned data is indeed further away than kept -- *(CI)* confirm there is no failed replication fetch -- *(networking)* remove circular vec error -- *(node)* unit test for recover historic quoting metrics -- *(node)* pass entire QuotingMetrics into calculate_cost_for_records -- *(node)* extend distance range -- addres review comments -- *(transfers)* reduce error size -- *(transfer)* unit tests for PaymentQuote -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_auditor-v0.1.3-alpha.0/sn_client-v0.105.3-alpha.0/sn_networking-v0.14.2-alpha.0/sn_protocol-v0.16.2-alpha.0/sn_build_info-v0.1.7-alpha.0/sn_transfers-v0.17.2-alpha.0/sn_peers_acquisition-v0.2.9-alpha.0/sn_cli-v0.90.3-alpha.0/sn_node-v0.105.4-alpha.0/sn-node-manager-v0.7.3-alpha.0/sn_faucet-v0.4.4-alpha.0/sn_service_management-v0.2.2-alpha.0/sn_node_rpc_client-v0.6.4-alpha.0 -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(release)* sn_client-v0.105.3-alpha.5/sn_protocol-v0.16.3-alpha.2/sn_cli-v0.90.4-alpha.5/sn_node-v0.105.6-alpha.4/sn-node-manager-v0.7.4-alpha.1/sn_auditor-v0.1.7-alpha.0/sn_networking-v0.14.4-alpha.0/sn_peers_acquisition-v0.2.10-alpha.0/sn_faucet-v0.4.9-alpha.0/sn_service_management-v0.2.4-alpha.0/sn_node_rpc_client-v0.6.8-alpha.0 -- *(release)* sn_client-v0.105.3-alpha.3/sn_protocol-v0.16.3-alpha.1/sn_peers_acquisition-v0.2.9-alpha.2/sn_cli-v0.90.4-alpha.3/sn_node-v0.105.6-alpha.1/sn_auditor-v0.1.5-alpha.0/sn_networking-v0.14.3-alpha.0/sn_faucet-v0.4.7-alpha.0/sn_service_management-v0.2.3-alpha.0/sn-node-manager-v0.7.4-alpha.0/sn_node_rpc_client-v0.6.6-alpha.0 -- *(release)* sn_auditor-v0.1.3-alpha.1/sn_client-v0.105.3-alpha.1/sn_networking-v0.14.2-alpha.1/sn_peers_acquisition-v0.2.9-alpha.1/sn_cli-v0.90.4-alpha.1/sn_metrics-v0.1.4-alpha.0/sn_node-v0.105.5-alpha.1/sn_service_management-v0.2.2-alpha.1/sn-node-manager-v0.7.3-alpha.1/sn_node_rpc_client-v0.6.4-alpha.1/token_supplies-v0.1.47-alpha.0 -- *(release)* sn_build_info-v0.1.7-alpha.1/sn_protocol-v0.16.3-alpha.0/sn_cli-v0.90.4-alpha.0/sn_faucet-v0.4.5-alpha.0/sn_node-v0.105.5-alpha.0 - -## [0.105.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.105.1...sn_client-v0.105.2) - 2024-03-28 - -### Fixed -- *(cli)* read from cache during initial chunking process -- *(uploader)* do not error out on quote expiry during get store cost - -## [0.105.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.105.0...sn_client-v0.105.1) - 2024-03-28 - -### Added -- *(uploader)* error out if the quote has expired during get store_cost -- *(uploader)* use WalletApi to prevent loading client wallet during each operation -- *(transfers)* implement WalletApi to expose common methods - -### Fixed -- *(uploader)* clarify the use of root and wallet dirs - -### Other -- *(uploader)* update docs - -## [0.105.0](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.31...sn_client-v0.105.0) - 2024-03-27 - -### Added -- svg caching, fault tolerance during DAG collection -- *(uploader)* collect all the uploaded registers -- *(uploader)* repay immediately if the quote has expired -- *(uploader)* allow either chunk or chunk path to be used -- *(uploader)* use ClientRegister instead of Registers -- *(uploader)* register existence should be checked before going with payment flow -- *(client)* use the new Uploader insetead of FilesUpload -- *(client)* implement a generic uploader with repay ability -- *(transfers)* enable client to check if a quote has expired -- [**breaking**] remove gossip code -- *(client)* make publish register as an associated function -- *(network)* filter out peers when returning store cost -- *(transfers)* [**breaking**] support multiple payments for the same xorname -- use Arc inside Client, Network to reduce clone cost -- *(networking)* add NodeIssue for tracking bad node shunning -- *(faucet)* rate limit based upon wallet locks - -### Fixed -- *(test)* use tempfile lib instead of stdlib to create temp dirs -- *(clippy)* allow too many arguments as it is a private function -- *(uploader)* remove unused error tracking and allow retries for new payee -- *(uploader)* make the internals more clean -- *(uploader)* update force make payment logic -- *(register)* permissions verification was not being made by some Register APIs -- *(node)* fetching new data shall not cause timed_out immediately -- *(test)* generate unique temp dir to avoid read outdated data -- *(register)* shortcut permissions check when anyone can write to Register - -### Other -- *(uploader)* remove unused code path when store cost is 0 -- *(uploader)* implement tests to test the basic pipeline logic -- *(uploader)* remove FilesApi dependency -- *(uploader)* initial test setup for uploader -- *(uploader)* implement UploaderInterface for easier testing -- *(uploader)* remove failed_to states -- *(register)* minor simplification in Register Permissions implementation -- *(node)* refactor pricing metrics -- lower some networking log levels -- *(node)* loose bad node detection criteria -- *(node)* optimization to reduce logging - -## [0.104.31](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.30...sn_client-v0.104.31) - 2024-03-21 - -### Added -- improve parallelisation with buffered streams -- refactor DAG, improve error management and security -- dag error recording -- *(folders)* folders APIs to accept an encryption key for metadata chunks -- *(protocol)* add rpc to set node log level on the fly - -### Other -- *(cli)* adding automated test for metadata chunk encryption -- *(node)* reduce bad_nodes check resource usage - -## [0.104.30](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29...sn_client-v0.104.30) - 2024-03-18 - -### Other -- updated the following local packages: sn_networking - -## [0.104.29-alpha.2](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.1...sn_client-v0.104.29-alpha.2) - 2024-03-14 - -### Added -- moved param to outside calc -- refactor spend validation - -### Fixed -- dont stop spend verification at spend error, generalise spend serde - -### Other -- store test utils under a new crate -- *(acc-packet)* adding automated tests to sn_cli::AccountPacket -- improve code quality -- new `sn_service_management` crate -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.104.29-alpha.1](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.29-alpha.0...sn_client-v0.104.29-alpha.1) - 2024-03-08 - -### Other -- *(folders)* adding automated tests to sn_client::FoldersApi - -## [0.104.28](https://github.com/joshuef/safe_network/compare/sn_client-v0.104.27...sn_client-v0.104.28) - 2024-03-06 - -### Added -- *(cli)* pull any Folders changes from network when syncing and merge them to local version -- make sn_cli use sn_clients reeports -- *(folders)* sync up logic and CLI cmd -- *(register)* when a new entry is written return its hash -- refactor upload with iter -- actionable double spend reporting -- collect royalties through DAG -- *(folders)* store files data-map within Folders metadata chunk -- *(folders)* regenerate tracking info when downloading Folders fm the network -- *(folders)* realise local changes made to folders/files -- *(folders)* keep track of local changes to Folders -- expose sn related deps to app builders - -### Fixed -- filter out spent cashnotes in received client transfers - -### Other -- clean swarm commands errs and spend errors -- also add deps features in sn_client -- *(release)* sn_transfers-v0.16.1 -- *(release)* sn_protocol-v0.15.0/sn-node-manager-v0.4.0 -- *(cli)* removing some redundant logic from acc-packet codebase -- *(folders)* some simplifications to acc-packet codebase - -## [0.104.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.26...sn_client-v0.104.27) - 2024-02-23 - -### Other -- test docs test -- write online documentation -- push documentation -- sync documentation -- write atop write merg branches -- red and write register docs -- create register docs -- register docs - -## [0.104.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.25...sn_client-v0.104.26) - 2024-02-21 - -### Other -- *(release)* initial alpha test release - -## [0.104.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.24...sn_client-v0.104.25) - 2024-02-20 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.23...sn_client-v0.104.24) - 2024-02-20 - -### Added -- estimate feature with ci and balance after with fn docs - -## [0.104.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.22...sn_client-v0.104.23) - 2024-02-20 - -### Other -- updated the following local packages: sn_networking - -## [0.104.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.21...sn_client-v0.104.22) - 2024-02-20 - -### Added -- spend and DAG utilities - -### Other -- improve SpendDagGet names - -## [0.104.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.20...sn_client-v0.104.21) - 2024-02-20 - -### Added -- *(folders)* move folders/files metadata out of Folders entries - -## [0.104.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.19...sn_client-v0.104.20) - 2024-02-20 - -### Added -- *(registers)* expose MerkleReg of RegisterCrdt in all Register types - -### Fixed -- clippy warnings - -### Other -- marke merkle_reg() accessors as unstable (in comment) on Register types - -## [0.104.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.18...sn_client-v0.104.19) - 2024-02-20 - -### Other -- improve DAG crawling performance with better parallelisation - -## [0.104.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.17...sn_client-v0.104.18) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.16...sn_client-v0.104.17) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.15...sn_client-v0.104.16) - 2024-02-19 - -### Other -- updated the following local packages: sn_networking - -## [0.104.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.14...sn_client-v0.104.15) - 2024-02-15 - -### Added -- *(client)* keep payee as part of storage payment cache - -### Other -- *(client)* remove the payee-map from StoragePaymentResult - -## [0.104.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.13...sn_client-v0.104.14) - 2024-02-15 - -### Other -- updated the following local packages: sn_networking - -## [0.104.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.12...sn_client-v0.104.13) - 2024-02-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.11...sn_client-v0.104.12) - 2024-02-14 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.10...sn_client-v0.104.11) - 2024-02-14 - -### Other -- *(refactor)* move mod.rs files the modern way - -## [0.104.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.9...sn_client-v0.104.10) - 2024-02-13 - -### Other -- updated the following local packages: sn_protocol - -## [0.104.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.8...sn_client-v0.104.9) - 2024-02-13 - -### Added -- filtering dag errors -- identify orphans and inconsistencies in the DAG - -### Fixed -- manage the genesis spend case - -## [0.104.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.7...sn_client-v0.104.8) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.6...sn_client-v0.104.7) - 2024-02-12 - -### Other -- updated the following local packages: sn_networking - -## [0.104.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.5...sn_client-v0.104.6) - 2024-02-12 - -### Added -- *(cli)* single payment for all folders being synced -- *(cli)* adding Folders download CLI cmd -- *(client)* adding Folders sync API and CLI cmd - -### Other -- *(cli)* improvements based on peer review - -## [0.104.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.4...sn_client-v0.104.5) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.3...sn_client-v0.104.4) - 2024-02-09 - -### Other -- updated the following local packages: sn_networking - -## [0.104.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.2...sn_client-v0.104.3) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.104.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.1...sn_client-v0.104.2) - 2024-02-08 - -### Added -- move the RetryStrategy into protocol and use that during cli upload/download -- *(client)* perform more retries if we are verifying a register -- *(network)* impl RetryStrategy to make the reattempts flexible - -### Fixed -- *(ci)* update the reattempt flag to retry_strategy flag for the cli - -### Other -- *(network)* rename re-attempts to retry strategy - -## [0.104.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.104.0...sn_client-v0.104.1) - 2024-02-08 - -### Other -- updated the following local packages: sn_networking - -## [0.104.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.7...sn_client-v0.104.0) - 2024-02-07 - -### Added -- *(client)* put register to the peer that we paid to -- *(client)* [**breaking**] make the result of the storage payment into a struct - -### Fixed -- rust docs error - -## [0.103.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.6...sn_client-v0.103.7) - 2024-02-07 - -### Added -- extendable local state DAG in cli - -## [0.103.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.5...sn_client-v0.103.6) - 2024-02-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.103.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.4...sn_client-v0.103.5) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.3...sn_client-v0.103.4) - 2024-02-05 - -### Other -- updated the following local packages: sn_networking - -## [0.103.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.2...sn_client-v0.103.3) - 2024-02-05 - -### Other -- change to hot wallet -- docs formatting -- cargo fmt changes -- example for api verify uploaded chunks -- example for api verify cash note redemptions -- example for api publish on topic -- example for api unsubscribe to topic -- example for api subscribe to topic -- example for api get spend from network -- example for api verify register stored -- example for api get chunk -- example for api store chunk -- example for api create and pay for register -- example for api get register -- example for api get signed reg from network -- example for api signer pk -- example for api signer -- example for api sign -- example for api events channel -- example for api new -- apply format and params to doc templates -- better template set -- mark applicable functions with empty headers - -## [0.103.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.1...sn_client-v0.103.2) - 2024-02-05 - -### Other -- updated the following local packages: sn_protocol - -## [0.103.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.103.0...sn_client-v0.103.1) - 2024-02-02 - -### Other -- updated the following local packages: sn_networking - -## [0.103.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.22...sn_client-v0.103.0) - 2024-02-02 - -### Other -- [**breaking**] renaming LocalWallet to HotWallet as it holds the secret key for signing tx - -## [0.102.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.21...sn_client-v0.102.22) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.20...sn_client-v0.102.21) - 2024-02-01 - -### Fixed -- *(client)* error out when fetching large data_map - -## [0.102.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.19...sn_client-v0.102.20) - 2024-02-01 - -### Other -- updated the following local packages: sn_networking - -## [0.102.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.18...sn_client-v0.102.19) - 2024-01-31 - -### Other -- nano tokens to network address -- change to question mark from expect -- test doc changes to remove code and refactor for pr -- broadcast signed spends -- send -- verify cash note -- receive and cargo fmt -- send spends - -## [0.102.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.17...sn_client-v0.102.18) - 2024-01-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.102.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.16...sn_client-v0.102.17) - 2024-01-30 - -### Other -- *(client)* log client upload failure error - -## [0.102.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.15...sn_client-v0.102.16) - 2024-01-30 - -### Fixed -- *(client)* error out on verify_chunk_store - -## [0.102.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.14...sn_client-v0.102.15) - 2024-01-30 - -### Other -- updated the following local packages: sn_networking - -## [0.102.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.13...sn_client-v0.102.14) - 2024-01-30 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.12...sn_client-v0.102.13) - 2024-01-29 - -### Other -- *(sn_transfers)* making some functions/helpers to be constructor methods of public structs - -## [0.102.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.11...sn_client-v0.102.12) - 2024-01-25 - -### Other -- improved pay for storage -- mut wallet description -- revert to mut wallet -- change to wallet result -- cargo fmt -- into wallet doc -- into wallet doc -- expand abbreviations mutable wallet -- pay for storage clone for test pass -- expand on abbreviation and added detail -- pay for records example -- pay for records and cleanup -- pay for storage once detail -- send unsigned detail -- pay for storage -- get store cost at addr unused - -## [0.102.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.10...sn_client-v0.102.11) - 2024-01-25 - -### Other -- updated the following local packages: sn_networking - -## [0.102.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.9...sn_client-v0.102.10) - 2024-01-25 - -### Added -- client webtransport-websys feat - -### Other -- use a single target_arch.rs to simplify imports for wasm32 or no - -## [0.102.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.8...sn_client-v0.102.9) - 2024-01-24 - -### Other -- updated the following local packages: sn_networking, sn_networking - -## [0.102.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.7...sn_client-v0.102.8) - 2024-01-24 - -### Added -- client webtransport-websys feat - -### Other -- tidy up wasm32 as target arch rather than a feat - -## [0.102.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.6...sn_client-v0.102.7) - 2024-01-23 - -### Other -- *(release)* sn_protocol-v0.10.14/sn_networking-v0.12.35 - -## [0.102.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.5...sn_client-v0.102.6) - 2024-01-22 - -### Other -- wallet docs - -## [0.102.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.4...sn_client-v0.102.5) - 2024-01-22 - -### Added -- spend dag utils - -## [0.102.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.3...sn_client-v0.102.4) - 2024-01-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.102.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.2...sn_client-v0.102.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.102.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.1...sn_client-v0.102.2) - 2024-01-18 - -### Other -- updated the following local packages: sn_transfers - -## [0.102.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.102.0...sn_client-v0.102.1) - 2024-01-17 - -### Other -- fixed typo -- filled missing arguments -- formatting -- formatting -- new wallet docs - -## [0.102.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.13...sn_client-v0.102.0) - 2024-01-17 - -### Fixed -- *(docs)* update Client signature for doc test -- *(client)* move out the peers added var to event handler loop - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.101.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.12...sn_client-v0.101.13) - 2024-01-17 - -### Other -- new wallet client example - -## [0.101.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.11...sn_client-v0.101.12) - 2024-01-16 - -### Other -- updated the following local packages: sn_transfers - -## [0.101.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.10...sn_client-v0.101.11) - 2024-01-15 - -### Fixed -- *(client)* avoid deadlock during upload in case of error - -## [0.101.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.9...sn_client-v0.101.10) - 2024-01-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.101.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.8...sn_client-v0.101.9) - 2024-01-15 - -### Fixed -- *(client)* cache payments via disk instead of memory map - -### Other -- *(client)* collect wallet handling time statistics - -## [0.101.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.7...sn_client-v0.101.8) - 2024-01-12 - -### Other -- updated the following local packages: sn_networking - -## [0.101.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.6...sn_client-v0.101.7) - 2024-01-12 - -### Fixed -- *(client)* avoid dead lock with less chunks - -## [0.101.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.5...sn_client-v0.101.6) - 2024-01-11 - -### Other -- *(client)* refactor client upload flow - -## [0.101.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.4...sn_client-v0.101.5) - 2024-01-11 - -### Added -- error if file size smaller than MIN_ENCRYPTABLE_BYTES - -### Other -- udpate self_encryption dep - -## [0.101.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.3...sn_client-v0.101.4) - 2024-01-11 - -### Other -- updated the following local packages: sn_networking - -## [0.101.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.2...sn_client-v0.101.3) - 2024-01-10 - -### Added -- *(client)* client APIs and CLI cmd to broadcast a transaction signed offline - -### Other -- fixup send_spends and use ExcessiveNanoValue error - -## [0.101.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.1...sn_client-v0.101.2) - 2024-01-10 - -### Added -- allow register CLI to create a public register writable to anyone - -## [0.101.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.101.0...sn_client-v0.101.1) - 2024-01-09 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.101.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.1...sn_client-v0.101.0) - 2024-01-09 - -### Added -- *(client)* use buffered future stream to download chunks - -### Fixed -- *(client)* empty out the download cache once the stream exits -- *(ci)* fix clippy error due to Send not being general - -### Other -- *(client)* add docs to FilesDownload -- *(client)* [**breaking**] move read_from range into `DownloadFiles` - -## [0.100.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.100.0...sn_client-v0.100.1) - 2024-01-09 - -### Other -- get spend from network only require Majority - -## [0.100.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.42...sn_client-v0.100.0) - 2024-01-08 - -### Added -- *(cli)* intergrate FilesDownload with cli -- *(client)* emit events from download process - -### Other -- *(client)* [**breaking**] refactor `Files` into `FilesUpload` - -## [0.99.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.41...sn_client-v0.99.42) - 2024-01-08 - -### Other -- updated the following local packages: sn_networking - -## [0.99.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.40...sn_client-v0.99.41) - 2024-01-08 - -### Other -- more doc updates to readme files - -## [0.99.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.39...sn_client-v0.99.40) - 2024-01-08 - -### Fixed -- *(client)* reset sequential_payment_fails on batch upload success - -## [0.99.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.38...sn_client-v0.99.39) - 2024-01-05 - -### Other -- add clippy unwrap lint to workspace - -## [0.99.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.37...sn_client-v0.99.38) - 2024-01-05 - -### Added -- *(network)* move the kad::put_record_to inside PutRecordCfg - -## [0.99.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.36...sn_client-v0.99.37) - 2024-01-03 - -### Added -- *(client)* clients no longer upload data_map by default - -### Other -- refactor for clarity around head_chunk_address -- *(cli)* do not write datamap chunk if non-public - -## [0.99.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.35...sn_client-v0.99.36) - 2024-01-03 - -### Other -- updated the following local packages: sn_networking - -## [0.99.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.34...sn_client-v0.99.35) - 2024-01-02 - -### Fixed -- *(client)* wallet not progress with unconfirmed tx - -## [0.99.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.33...sn_client-v0.99.34) - 2024-01-02 - -### Other -- updated the following local packages: sn_networking - -## [0.99.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.32...sn_client-v0.99.33) - 2023-12-29 - -### Other -- updated the following local packages: sn_networking - -## [0.99.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.31...sn_client-v0.99.32) - 2023-12-29 - -### Added -- use put_record_to during upload chunk - -## [0.99.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.30...sn_client-v0.99.31) - 2023-12-26 - -### Other -- updated the following local packages: sn_networking - -## [0.99.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.29...sn_client-v0.99.30) - 2023-12-22 - -### Other -- updated the following local packages: sn_networking - -## [0.99.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.28...sn_client-v0.99.29) - 2023-12-21 - -### Other -- *(client)* emit chunk Uploaded event if a chunk was verified during repayment - -## [0.99.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.27...sn_client-v0.99.28) - 2023-12-20 - -### Other -- reduce default batch size - -## [0.99.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.26...sn_client-v0.99.27) - 2023-12-19 - -### Added -- network royalties through audit POC - -## [0.99.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.25...sn_client-v0.99.26) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.24...sn_client-v0.99.25) - 2023-12-19 - -### Fixed -- *(test)* tests should try to load just the faucet wallet - -## [0.99.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.23...sn_client-v0.99.24) - 2023-12-19 - -### Other -- updated the following local packages: sn_networking - -## [0.99.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.22...sn_client-v0.99.23) - 2023-12-19 - -### Fixed -- *(cli)* mark chunk completion as soon as we upload each chunk - -## [0.99.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.21...sn_client-v0.99.22) - 2023-12-18 - -### Added -- *(transfers)* add api for cleaning up CashNotes - -## [0.99.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.20...sn_client-v0.99.21) - 2023-12-18 - -### Added -- *(client)* update the Files config via setters -- *(client)* track the upload stats inside Files -- *(client)* move upload retry logic from CLI to client - -### Fixed -- *(test)* use the Files struct to upload chunks - -### Other -- *(client)* add docs to the Files struct - -## [0.99.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.19...sn_client-v0.99.20) - 2023-12-14 - -### Other -- updated the following local packages: sn_networking, sn_protocol, sn_registers, sn_transfers - -## [0.99.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.18...sn_client-v0.99.19) - 2023-12-14 - -### Added -- *(client)* add backoff to payment retries -- *(networking)* use backoff for get_record - -## [0.99.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.17...sn_client-v0.99.18) - 2023-12-14 - -### Other -- *(test)* fix log messages during churn test - -## [0.99.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.16...sn_client-v0.99.17) - 2023-12-14 - -### Added -- *(cli)* simple retry mechanism for remaining chunks - -## [0.99.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.15...sn_client-v0.99.16) - 2023-12-13 - -### Other -- updated the following local packages: sn_networking - -## [0.99.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.14...sn_client-v0.99.15) - 2023-12-13 - -### Added -- add amounts to edges -- audit DAG collection and visualization -- cli double spends audit from genesis - -### Fixed -- docs - -### Other -- udeps and gitignore - -## [0.99.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.13...sn_client-v0.99.14) - 2023-12-12 - -### Other -- updated the following local packages: sn_protocol - -## [0.99.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.12...sn_client-v0.99.13) - 2023-12-12 - -### Added -- *(cli)* skip payment and upload for existing chunks - -## [0.99.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.11...sn_client-v0.99.12) - 2023-12-12 - -### Added -- constant uploading across batches - -## [0.99.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.10...sn_client-v0.99.11) - 2023-12-11 - -### Other -- updated the following local packages: sn_networking - -## [0.99.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.9...sn_client-v0.99.10) - 2023-12-07 - -### Other -- updated the following local packages: sn_networking - -## [0.99.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.8...sn_client-v0.99.9) - 2023-12-06 - -### Other -- *(network)* use PUT Quorum::One for chunks -- *(network)* add more docs to the get_record_handlers - -## [0.99.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.7...sn_client-v0.99.8) - 2023-12-06 - -### Other -- updated the following local packages: sn_networking - -## [0.99.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.6...sn_client-v0.99.7) - 2023-12-06 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.5...sn_client-v0.99.6) - 2023-12-06 - -### Other -- remove some needless cloning -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.99.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.4...sn_client-v0.99.5) - 2023-12-05 - -### Added -- *(network)* use custom enum for get_record errors - -### Other -- *(network)* avoid losing error info by converting them to a single type - -## [0.99.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.3...sn_client-v0.99.4) - 2023-12-05 - -### Other -- updated the following local packages: sn_transfers - -## [0.99.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.2...sn_client-v0.99.3) - 2023-12-05 - -### Other -- updated the following local packages: sn_networking - -## [0.99.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.1...sn_client-v0.99.2) - 2023-12-05 - -### Added -- allow for cli chunk put retries for un verifiable chunks - -### Fixed -- mark chunks as completed when no failures on retry - -## [0.99.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.99.0...sn_client-v0.99.1) - 2023-12-05 - -### Fixed -- *(client)* dont assume verification is always set w/ VerificationConfig - -### Other -- tie node reward test to number of data. -- *(networking)* remove triggered bootstrap slowdown - -## [0.99.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.23...sn_client-v0.99.0) - 2023-12-01 - -### Added -- *(network)* use seperate PUT/GET configs - -### Other -- *(ci)* fix CI build cache parsing error -- *(network)* [**breaking**] use the Quorum struct provided by libp2p - -## [0.98.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.22...sn_client-v0.98.23) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.21...sn_client-v0.98.22) - 2023-11-29 - -### Other -- updated the following local packages: sn_networking - -## [0.98.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.20...sn_client-v0.98.21) - 2023-11-29 - -### Added -- add missing quic features - -## [0.98.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.19...sn_client-v0.98.20) - 2023-11-29 - -### Added -- verify all the way to genesis -- verify spends through the cli - -### Fixed -- genesis check security flaw - -## [0.98.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.18...sn_client-v0.98.19) - 2023-11-28 - -### Added -- *(chunks)* serialise Chunks with MsgPack instead of bincode - -## [0.98.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.17...sn_client-v0.98.18) - 2023-11-28 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.16...sn_client-v0.98.17) - 2023-11-27 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.98.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.15...sn_client-v0.98.16) - 2023-11-23 - -### Added -- *(networking)* reduce batch size to 64 -- add centralised retries for all data payment kinds - -### Fixed -- previous code assumptions - -## [0.98.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.14...sn_client-v0.98.15) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.13...sn_client-v0.98.14) - 2023-11-23 - -### Other -- updated the following local packages: sn_transfers - -## [0.98.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.12...sn_client-v0.98.13) - 2023-11-23 - -### Other -- updated the following local packages: sn_networking - -## [0.98.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.11...sn_client-v0.98.12) - 2023-11-22 - -### Other -- *(release)* non gossip handler shall not throw gossip msg up - -## [0.98.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.10...sn_client-v0.98.11) - 2023-11-22 - -### Added -- *(cli)* add download batch-size option - -## [0.98.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.9...sn_client-v0.98.10) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -### Other -- *(sn_networking)* enable_gossip via the builder pattern - -## [0.98.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.8...sn_client-v0.98.9) - 2023-11-21 - -### Other -- updated the following local packages: sn_networking - -## [0.98.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.7...sn_client-v0.98.8) - 2023-11-20 - -### Other -- increase default batch size - -## [0.98.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.6...sn_client-v0.98.7) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.98.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.5...sn_client-v0.98.6) - 2023-11-20 - -### Other -- updated the following local packages: sn_networking - -## [0.98.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.4...sn_client-v0.98.5) - 2023-11-20 - -### Added -- quotes - -## [0.98.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.3...sn_client-v0.98.4) - 2023-11-17 - -### Fixed -- *(client)* ensure we store spends at CLOSE_GROUP nodes. - -## [0.98.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.2...sn_client-v0.98.3) - 2023-11-16 - -### Other -- updated the following local packages: sn_networking - -## [0.98.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.1...sn_client-v0.98.2) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.98.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.98.0...sn_client-v0.98.1) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol - -## [0.98.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.6...sn_client-v0.98.0) - 2023-11-15 - -### Added -- *(client)* [**breaking**] error out if we cannot connect to the network in - -### Other -- *(client)* [**breaking**] remove request_response timeout argument - -## [0.97.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.5...sn_client-v0.97.6) - 2023-11-15 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.97.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.4...sn_client-v0.97.5) - 2023-11-14 - -### Other -- *(royalties)* verify royalties fees amounts - -## [0.97.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.3...sn_client-v0.97.4) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.2...sn_client-v0.97.3) - 2023-11-14 - -### Other -- updated the following local packages: sn_networking - -## [0.97.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.1...sn_client-v0.97.2) - 2023-11-13 - -### Added -- no throwing up if not a gossip listener - -## [0.97.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.97.0...sn_client-v0.97.1) - 2023-11-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.97.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.6...sn_client-v0.97.0) - 2023-11-10 - -### Added -- verify chunks with Quorum::N(2) -- *(client)* only pay one node - -### Fixed -- *(client)* register validations checks for more than one node -- *(client)* set Quorum::One for registers -- *(test)* use client API to listen for gossipsub msgs when checking transfer notifs - -### Other -- *(transfers)* more logs around payments... -- *(churn)* small delay before validating chunks in data_with_churn -- *(client)* register get quorum->one -- *(tests)* make gossipsub verification more strict wrt number of msgs received - -## [0.96.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.5...sn_client-v0.96.6) - 2023-11-09 - -### Other -- updated the following local packages: sn_transfers - -## [0.96.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.4...sn_client-v0.96.5) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.3...sn_client-v0.96.4) - 2023-11-09 - -### Other -- updated the following local packages: sn_networking - -## [0.96.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.2...sn_client-v0.96.3) - 2023-11-08 - -### Other -- updated the following local packages: sn_networking - -## [0.96.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.1...sn_client-v0.96.2) - 2023-11-08 - -### Added -- *(node)* set custom msg id in order to deduplicate transfer notifs - -## [0.96.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.96.0...sn_client-v0.96.1) - 2023-11-07 - -### Other -- Derive Clone on ClientRegister - -## [0.96.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.27...sn_client-v0.96.0) - 2023-11-07 - -### Fixed -- *(client)* [**breaking**] make `Files::chunk_file` into an associated function - -## [0.95.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.26...sn_client-v0.95.27) - 2023-11-07 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.25...sn_client-v0.95.26) - 2023-11-06 - -### Added -- *(node)* log marker to track the number of peers in the routing table - -## [0.95.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.24...sn_client-v0.95.25) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.23...sn_client-v0.95.24) - 2023-11-06 - -### Other -- updated the following local packages: sn_protocol - -## [0.95.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.22...sn_client-v0.95.23) - 2023-11-06 - -### Added -- *(deps)* upgrade libp2p to 0.53 - -## [0.95.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.21...sn_client-v0.95.22) - 2023-11-03 - -### Other -- updated the following local packages: sn_networking - -## [0.95.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.20...sn_client-v0.95.21) - 2023-11-02 - -### Other -- updated the following local packages: sn_networking - -## [0.95.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.19...sn_client-v0.95.20) - 2023-11-02 - -### Added -- keep transfers in mem instead of heavy cashnotes - -## [0.95.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.18...sn_client-v0.95.19) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.17...sn_client-v0.95.18) - 2023-11-01 - -### Other -- log detailed intermediate errors - -## [0.95.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.16...sn_client-v0.95.17) - 2023-11-01 - -### Other -- updated the following local packages: sn_networking - -## [0.95.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.15...sn_client-v0.95.16) - 2023-11-01 - -### Other -- updated the following local packages: sn_transfers - -## [0.95.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.14...sn_client-v0.95.15) - 2023-10-31 - -### Other -- updated the following local packages: sn_networking - -## [0.95.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.13...sn_client-v0.95.14) - 2023-10-30 - -### Other -- *(networking)* de/serialise directly to Bytes - -## [0.95.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.12...sn_client-v0.95.13) - 2023-10-30 - -### Added -- `bincode::serialize` into `Bytes` without intermediate allocation - -## [0.95.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.11...sn_client-v0.95.12) - 2023-10-30 - -### Other -- *(node)* use Bytes for Gossip related data types -- *(node)* make gossipsubpublish take Bytes - -## [0.95.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.10...sn_client-v0.95.11) - 2023-10-27 - -### Added -- *(rpc-client)* be able to decrpyt received Transfers by providing a secret key - -## [0.95.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.9...sn_client-v0.95.10) - 2023-10-27 - -### Other -- updated the following local packages: sn_networking - -## [0.95.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.8...sn_client-v0.95.9) - 2023-10-26 - -### Fixed -- client carry out merge when verify register storage - -## [0.95.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.7...sn_client-v0.95.8) - 2023-10-26 - -### Fixed -- add libp2p identity with rand dep for tests - -## [0.95.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.6...sn_client-v0.95.7) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_registers, sn_transfers - -## [0.95.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.5...sn_client-v0.95.6) - 2023-10-26 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.95.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.4...sn_client-v0.95.5) - 2023-10-25 - -### Added -- *(cli)* chunk files in parallel - -## [0.95.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.3...sn_client-v0.95.4) - 2023-10-24 - -### Fixed -- *(tests)* nodes rewards tests to account for repayments amounts - -## [0.95.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.2...sn_client-v0.95.3) - 2023-10-24 - -### Other -- *(api)* wallet APIs to account for network royalties fees when returning total cost paid for storage - -## [0.95.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.1...sn_client-v0.95.2) - 2023-10-24 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.95.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.95.0...sn_client-v0.95.1) - 2023-10-24 - -### Added -- *(client)* do not retry verification GETs - -### Other -- log and debug SwarmCmd - -## [0.95.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.8...sn_client-v0.95.0) - 2023-10-24 - -### Added -- *(protocol)* [**breaking**] implement `PrettyPrintRecordKey` as a `Cow` type - -## [0.94.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.7...sn_client-v0.94.8) - 2023-10-23 - -### Other -- updated the following local packages: sn_networking - -## [0.94.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.6...sn_client-v0.94.7) - 2023-10-23 - -### Other -- more custom debug and debug skips - -## [0.94.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.5...sn_client-v0.94.6) - 2023-10-22 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.4...sn_client-v0.94.5) - 2023-10-21 - -### Other -- updated the following local packages: sn_networking - -## [0.94.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.3...sn_client-v0.94.4) - 2023-10-20 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.94.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.2...sn_client-v0.94.3) - 2023-10-20 - -### Added -- *(client)* stop futher bootstrapping if the client has K_VALUE peers - -## [0.94.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.1...sn_client-v0.94.2) - 2023-10-19 - -### Fixed -- *(network)* emit NetworkEvent when we publish a gossipsub msg - -## [0.94.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.94.0...sn_client-v0.94.1) - 2023-10-18 - -### Other -- updated the following local packages: sn_networking - -## [0.94.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.18...sn_client-v0.94.0) - 2023-10-18 - -### Added -- *(client)* verify register sync, and repay if not stored on all nodes -- *(client)* verify register uploads and retry and repay if failed - -### Other -- Revert "feat: keep transfers in mem instead of mem and i/o heavy cashnotes" -- *(client)* always validate storage payments -- repay for data in node rewards tests -- *(client)* remove price tolerance at the client - -## [0.93.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.17...sn_client-v0.93.18) - 2023-10-18 - -### Added -- keep transfers in mem instead of mem and i/o heavy cashnotes - -## [0.93.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.16...sn_client-v0.93.17) - 2023-10-17 - -### Fixed -- *(transfers)* dont overwrite existing payment transactions when we top up - -### Other -- adding comments and cleanup around quorum / payment fixes - -## [0.93.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.15...sn_client-v0.93.16) - 2023-10-16 - -### Fixed -- return correct error type -- consider record split an error, handle it for regs - -## [0.93.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.14...sn_client-v0.93.15) - 2023-10-16 - -### Other -- updated the following local packages: sn_networking - -## [0.93.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.13...sn_client-v0.93.14) - 2023-10-13 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.93.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.12...sn_client-v0.93.13) - 2023-10-13 - -### Fixed -- batch download process - -## [0.93.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.11...sn_client-v0.93.12) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.10...sn_client-v0.93.11) - 2023-10-12 - -### Other -- updated the following local packages: sn_networking - -## [0.93.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.9...sn_client-v0.93.10) - 2023-10-12 - -### Other -- more detailed logging when client creating store cash_note - -## [0.93.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.8...sn_client-v0.93.9) - 2023-10-11 - -### Fixed -- expose RecordMismatch errors and cleanup wallet if we hit that - -### Other -- *(transfers)* add somre more clarity around DoubleSpendAttemptedForCashNotes -- *(transfers)* remove pointless api - -## [0.93.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.7...sn_client-v0.93.8) - 2023-10-11 - -### Other -- updated the following local packages: sn_networking - -## [0.93.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.6...sn_client-v0.93.7) - 2023-10-11 - -### Added -- showing expected holders to CLI when required -- verify put_record with expected_holders - -## [0.93.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.5...sn_client-v0.93.6) - 2023-10-10 - -### Added -- *(transfer)* special event for transfer notifs over gossipsub - -## [0.93.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.4...sn_client-v0.93.5) - 2023-10-10 - -### Other -- compare files after download twice - -## [0.93.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.3...sn_client-v0.93.4) - 2023-10-10 - -### Other -- updated the following local packages: sn_transfers - -## [0.93.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.2...sn_client-v0.93.3) - 2023-10-09 - -### Other -- updated the following local packages: sn_networking - -## [0.93.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.1...sn_client-v0.93.2) - 2023-10-08 - -### Other -- updated the following local packages: sn_networking - -## [0.93.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.93.0...sn_client-v0.93.1) - 2023-10-06 - -### Added -- feat!(sn_transfers): unify store api for wallet - -### Other -- *(client)* dont println for wallet errors - -## [0.93.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.9...sn_client-v0.93.0) - 2023-10-06 - -### Fixed -- *(client)* [**breaking**] unify send_without_verify and send functions - -### Other -- *(cli)* reuse the client::send function to send amount from wallet - -## [0.92.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.8...sn_client-v0.92.9) - 2023-10-06 - -### Other -- fix new clippy errors - -## [0.92.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.7...sn_client-v0.92.8) - 2023-10-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.92.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.6...sn_client-v0.92.7) - 2023-10-05 - -### Added -- feat!(cli): remove concurrency argument - -## [0.92.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.5...sn_client-v0.92.6) - 2023-10-05 - -### Fixed -- *(sn_transfers)* be sure we store CashNotes before writing the wallet file - -## [0.92.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.4...sn_client-v0.92.5) - 2023-10-05 - -### Added -- quorum for records get - -### Fixed -- use specific verify func for chunk stored verification - -## [0.92.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.3...sn_client-v0.92.4) - 2023-10-05 - -### Added -- use progress bars on `files upload` - -### Other -- pay_for_chunks returns cost and new balance - -## [0.92.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.2...sn_client-v0.92.3) - 2023-10-04 - -### Fixed -- *(wallet)* remove expect statments - -## [0.92.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.1...sn_client-v0.92.2) - 2023-10-04 - -### Fixed -- record_to_verify for store_chunk shall be a Chunk - -## [0.92.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.92.0...sn_client-v0.92.1) - 2023-10-04 - -### Other -- updated the following local packages: sn_networking - -## [0.92.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.11...sn_client-v0.92.0) - 2023-10-04 - -### Added -- improve register API - -## [0.91.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.10...sn_client-v0.91.11) - 2023-10-04 - -### Added -- *(client)* reuse cashnotes for address payments - -### Other -- separate method and write test - -## [0.91.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.9...sn_client-v0.91.10) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.8...sn_client-v0.91.9) - 2023-10-03 - -### Added -- re-attempt when get chunk from network - -## [0.91.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.7...sn_client-v0.91.8) - 2023-10-03 - -### Other -- updated the following local packages: sn_networking - -## [0.91.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.6...sn_client-v0.91.7) - 2023-10-02 - -### Other -- remove all spans. - -## [0.91.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.5...sn_client-v0.91.6) - 2023-10-02 - -### Other -- updated the following local packages: sn_transfers - -## [0.91.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.4...sn_client-v0.91.5) - 2023-10-02 - -### Other -- *(client)* more logs around StoreCost retrieveal - -## [0.91.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.3...sn_client-v0.91.4) - 2023-09-29 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.91.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.2...sn_client-v0.91.3) - 2023-09-28 - -### Added -- client to client transfers - -## [0.91.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.1...sn_client-v0.91.2) - 2023-09-27 - -### Added -- *(networking)* remove optional_semaphore being passed down from apps -- all records are Quorum::All once more - -## [0.91.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.91.0...sn_client-v0.91.1) - 2023-09-27 - -### Added -- *(client)* fail fast when a chunk is missing - -## [0.91.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.6...sn_client-v0.91.0) - 2023-09-27 - -### Added -- deep clean sn_transfers, reduce exposition, remove dead code - -## [0.90.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.5...sn_client-v0.90.6) - 2023-09-26 - -### Other -- updated the following local packages: sn_networking - -## [0.90.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.4...sn_client-v0.90.5) - 2023-09-26 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC service to unsubscribe from gossipsub topics - -## [0.90.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.3...sn_client-v0.90.4) - 2023-09-25 - -### Other -- updated the following local packages: sn_transfers - -## [0.90.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.2...sn_client-v0.90.3) - 2023-09-25 - -### Other -- cleanup renamings in sn_transfers - -## [0.90.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.1...sn_client-v0.90.2) - 2023-09-25 - -### Other -- *(client)* serialize ClientEvent - -## [0.90.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.90.0...sn_client-v0.90.1) - 2023-09-22 - -### Added -- *(apis)* adding client and node APIs, as well as safenode RPC services to pub/sub to gossipsub topics - -## [0.90.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.23...sn_client-v0.90.0) - 2023-09-21 - -### Added -- dusking DBCs - -### Other -- rename Nano NanoTokens -- improve naming - -## [0.89.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.22...sn_client-v0.89.23) - 2023-09-21 - -### Other -- updated the following local packages: sn_networking - -## [0.89.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.21...sn_client-v0.89.22) - 2023-09-21 - -### Other -- clarify `files download` usage -- output address of uploaded file - -## [0.89.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.20...sn_client-v0.89.21) - 2023-09-20 - -### Other -- updated the following local packages: sn_networking - -## [0.89.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.19...sn_client-v0.89.20) - 2023-09-20 - -### Other -- major dep updates - -## [0.89.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.18...sn_client-v0.89.19) - 2023-09-20 - -### Other -- allow chunks to be Quorum::One - -## [0.89.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.17...sn_client-v0.89.18) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.16...sn_client-v0.89.17) - 2023-09-19 - -### Other -- error handling when failed fetch store cost - -## [0.89.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.15...sn_client-v0.89.16) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.14...sn_client-v0.89.15) - 2023-09-19 - -### Other -- updated the following local packages: sn_networking - -## [0.89.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.13...sn_client-v0.89.14) - 2023-09-18 - -### Other -- updated the following local packages: sn_networking - -## [0.89.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.12...sn_client-v0.89.13) - 2023-09-18 - -### Added -- *(client)* download file concurrently - -## [0.89.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.11...sn_client-v0.89.12) - 2023-09-18 - -### Added -- serialisation for transfers for out of band sending - -### Other -- *(client)* simplify API -- *(cli)* use iter::chunks() API to batch and pay for our chunks - -## [0.89.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.10...sn_client-v0.89.11) - 2023-09-15 - -### Added -- *(client)* pay for chunks in batches - -### Other -- *(client)* refactor chunk upload code to allow greater concurrency - -## [0.89.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.9...sn_client-v0.89.10) - 2023-09-15 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.89.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.8...sn_client-v0.89.9) - 2023-09-15 - -### Other -- *(client)* remove unused wallet_client - -## [0.89.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.7...sn_client-v0.89.8) - 2023-09-14 - -### Added -- *(register)* client to pay for Register only if local wallet has not paymnt for it yet - -## [0.89.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.6...sn_client-v0.89.7) - 2023-09-14 - -### Added -- split upload procedure into batches - -## [0.89.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.5...sn_client-v0.89.6) - 2023-09-14 - -### Added -- *(network)* enable custom node metrics -- *(network)* use NetworkConfig for network construction - -### Other -- remove unused error variants -- *(network)* use builder pattern to construct the Network -- *(metrics)* rename feature flag and small fixes - -## [0.89.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.4...sn_client-v0.89.5) - 2023-09-13 - -### Added -- *(register)* paying nodes for Register storage - -### Other -- *(register)* adding Register payment storage tests to run in CI -- *(payments)* adaptig code to recent changes in Transfers - -## [0.89.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.3...sn_client-v0.89.4) - 2023-09-12 - -### Added -- utilize stream decryptor - -## [0.89.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.2...sn_client-v0.89.3) - 2023-09-12 - -### Other -- updated the following local packages: sn_networking - -## [0.89.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.1...sn_client-v0.89.2) - 2023-09-12 - -### Other -- *(metrics)* rename network metrics and remove from default features list - -## [0.89.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.89.0...sn_client-v0.89.1) - 2023-09-12 - -### Added -- add tx and parent spends verification -- chunk payments using UTXOs instead of DBCs - -### Other -- use updated sn_dbc - -## [0.89.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.16...sn_client-v0.89.0) - 2023-09-11 - -### Added -- [**breaking**] Clients add a tolerance to store cost - -## [0.88.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.15...sn_client-v0.88.16) - 2023-09-11 - -### Other -- utilize stream encryptor - -## [0.88.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.14...sn_client-v0.88.15) - 2023-09-08 - -### Added -- *(client)* repay for chunks if they cannot be validated - -### Other -- *(client)* refactor to have permits at network layer -- *(refactor)* remove wallet_client args from upload flow -- *(refactor)* remove upload_chunks semaphore arg - -## [0.88.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.13...sn_client-v0.88.14) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.12...sn_client-v0.88.13) - 2023-09-07 - -### Other -- updated the following local packages: sn_networking - -## [0.88.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.11...sn_client-v0.88.12) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking, sn_transfers - -## [0.88.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.10...sn_client-v0.88.11) - 2023-09-05 - -### Added -- encryptioni output to disk - -## [0.88.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.9...sn_client-v0.88.10) - 2023-09-05 - -### Other -- updated the following local packages: sn_networking - -## [0.88.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.8...sn_client-v0.88.9) - 2023-09-04 - -### Added -- feat!(protocol): make payments for all record types - -### Fixed -- fix permissions for public register creation - -### Other -- *(release)* sn_registers-v0.2.4 -- utilize encrypt_from_file - -## [0.88.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.7...sn_client-v0.88.8) - 2023-09-04 - -### Other -- Add client and protocol detail - -## [0.88.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.6...sn_client-v0.88.7) - 2023-09-01 - -### Other -- *(transfers)* store dbcs by ref to avoid more clones -- *(client)* make unconfonfirmed txs btreeset, remove unnecessary cloning -- *(client)* remove one signed_spend clone - -## [0.88.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.5...sn_client-v0.88.6) - 2023-09-01 - -### Other -- updated the following local packages: sn_networking - -## [0.88.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.4...sn_client-v0.88.5) - 2023-08-31 - -### Other -- remove unused async - -## [0.88.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.3...sn_client-v0.88.4) - 2023-08-31 - -### Other -- updated the following local packages: sn_protocol, sn_transfers - -## [0.88.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.2...sn_client-v0.88.3) - 2023-08-31 - -### Other -- some logging updates - -## [0.88.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.1...sn_client-v0.88.2) - 2023-08-31 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.88.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.88.0...sn_client-v0.88.1) - 2023-08-31 - -### Added -- *(cli)* expose 'concurrency' flag -- *(cli)* increase put parallelisation - -### Other -- *(client)* reduce default concurrency -- *(client)* improve download concurrency. - -## [0.88.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.29...sn_client-v0.88.0) - 2023-08-30 - -### Added -- refactor to allow greater upload parallelisation -- one transfer per data set, mapped dbcs to content addrs -- [**breaking**] pay each chunk holder direct -- feat!(protocol): gets keys with GetStoreCost -- feat!(protocol): get price and pay for each chunk individually -- feat!(protocol): remove chunk merkletree to simplify payment - -### Fixed -- *(tokio)* remove tokio fs - -### Other -- *(node)* refactor churn test order -- *(deps)* bump tokio to 1.32.0 -- *(client)* refactor client wallet to reduce dbc clones -- *(client)* pass around content payments map mut ref -- *(client)* reduce transferoutputs cloning -- *(client)* error out early for invalid transfers -- *(node)* reenable payment fail check - -## [0.87.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.28...sn_client-v0.87.29) - 2023-08-30 - -### Other -- updated the following local packages: sn_networking - -## [0.87.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.27...sn_client-v0.87.28) - 2023-08-29 - -### Other -- updated the following local packages: sn_networking - -## [0.87.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.26...sn_client-v0.87.27) - 2023-08-24 - -### Other -- updated the following local packages: sn_registers, sn_transfers - -## [0.87.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.25...sn_client-v0.87.26) - 2023-08-22 - -### Other -- updated the following local packages: sn_networking - -## [0.87.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.24...sn_client-v0.87.25) - 2023-08-22 - -### Fixed -- fixes to allow upload file works properly - -## [0.87.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.23...sn_client-v0.87.24) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.22...sn_client-v0.87.23) - 2023-08-21 - -### Other -- updated the following local packages: sn_networking - -## [0.87.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.21...sn_client-v0.87.22) - 2023-08-18 - -### Added -- remove client and node initial join flow - -## [0.87.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.20...sn_client-v0.87.21) - 2023-08-18 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.19...sn_client-v0.87.20) - 2023-08-17 - -### Fixed -- *(client)* start bootstrap when we are connected to one peer - -## [0.87.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.18...sn_client-v0.87.19) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.17...sn_client-v0.87.18) - 2023-08-17 - -### Fixed -- *(client)* use boostrap and fire Connecting event - -## [0.87.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.16...sn_client-v0.87.17) - 2023-08-17 - -### Other -- updated the following local packages: sn_networking - -## [0.87.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.15...sn_client-v0.87.16) - 2023-08-16 - -### Added -- *(client)* do not use cached proofs - -## [0.87.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.14...sn_client-v0.87.15) - 2023-08-16 - -### Added -- overpay by default to allow margin - -## [0.87.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.13...sn_client-v0.87.14) - 2023-08-15 - -### Other -- updated the following local packages: sn_networking - -## [0.87.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.12...sn_client-v0.87.13) - 2023-08-11 - -### Added -- *(transfers)* add resend loop for unconfirmed txs -- *(networking)* ensure we always use the highest price we find -- *(networking)* enable returning less than majority for store_cost -- *(client)* use store cost queries to pre populate cost and RT - -### Fixed -- *(client)* only_store_cost_if_higher missing else added - -### Other -- remove client inactivity random storage query -- *(node)* resend unconfirmed txs before asserting -- *(cli)* print cost info -- *(networking)* remove logs, fix typos and clippy issues -- overpay in advance to avoid storage cost calculation inconsistent - -## [0.87.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.11...sn_client-v0.87.12) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.87.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.10...sn_client-v0.87.11) - 2023-08-10 - -### Other -- updated the following local packages: sn_networking - -## [0.87.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.9...sn_client-v0.87.10) - 2023-08-08 - -### Added -- *(transfers)* add get largest dbc for spending - -### Fixed -- *(node)* prevent panic in storage calcs - -### Other -- tidy store cost code - -## [0.87.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.8...sn_client-v0.87.9) - 2023-08-07 - -### Other -- updated the following local packages: sn_networking - -## [0.87.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.7...sn_client-v0.87.8) - 2023-08-07 - -### Added -- rework register addresses to include pk - -### Other -- rename network addresses confusing name method to xorname - -## [0.87.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.6...sn_client-v0.87.7) - 2023-08-04 - -### Other -- updated the following local packages: sn_networking - -## [0.87.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.5...sn_client-v0.87.6) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.4...sn_client-v0.87.5) - 2023-08-03 - -### Other -- updated the following local packages: sn_networking - -## [0.87.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.3...sn_client-v0.87.4) - 2023-08-02 - -### Fixed -- do not create genesis when facuet already funded - -## [0.87.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.2...sn_client-v0.87.3) - 2023-08-01 - -### Other -- *(client)* reattempt to get_spend_from_network -- add more verificaiton for payments - -## [0.87.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.1...sn_client-v0.87.2) - 2023-08-01 - -### Other -- updated the following local packages: sn_protocol - -## [0.87.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.87.0...sn_client-v0.87.1) - 2023-08-01 - -### Added -- *(cli)* add no-verify flag to cli - -### Other -- fix double spend and remove arbitrary wait -- *(node)* verify faucet transactions before continuing -- *(netowrking)* change default re-attempt behaviour - -## [0.87.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.11...sn_client-v0.87.0) - 2023-08-01 - -### Other -- *(register)* [**breaking**] hashing the node of a Register to sign it instead of bincode-serialising it - -## [0.86.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.10...sn_client-v0.86.11) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.9...sn_client-v0.86.10) - 2023-07-31 - -### Added -- carry out get_record re-attempts for critical record -- for put_record verification, NotEnoughCopies is acceptable - -### Fixed -- *(test)* using proper wallets during data_with_churn test - -### Other -- move PrettyPrintRecordKey to sn_protocol -- small refactors for failing CI -- more tracable logs regarding chunk payment prove - -## [0.86.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.8...sn_client-v0.86.9) - 2023-07-31 - -### Other -- updated the following local packages: sn_networking - -## [0.86.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.7...sn_client-v0.86.8) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking - -## [0.86.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.6...sn_client-v0.86.7) - 2023-07-28 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.86.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.5...sn_client-v0.86.6) - 2023-07-28 - -### Other -- adapt all logging to use pretty record key - -## [0.86.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.4...sn_client-v0.86.5) - 2023-07-27 - -### Other -- updated the following local packages: sn_networking - -## [0.86.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.3...sn_client-v0.86.4) - 2023-07-26 - -### Fixed -- *(register)* Registers with same name but different tags were not being stored by the network - -### Other -- centralising RecordKey creation logic to make sure we always use the same for all content type - -## [0.86.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.2...sn_client-v0.86.3) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.1...sn_client-v0.86.2) - 2023-07-26 - -### Other -- updated the following local packages: sn_networking - -## [0.86.1](https://github.com/maidsafe/safe_network/compare/sn_client-v0.86.0...sn_client-v0.86.1) - 2023-07-25 - -### Added -- *(replication)* replicate when our close group changes - -### Fixed -- *(client)* keep an active `ClientEvent` receiver - -### Other -- *(client)* get k_value from const fn - -## [0.86.0](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.55...sn_client-v0.86.0) - 2023-07-21 - -### Added -- *(protocol)* [**breaking**] make Chunks storage payment required - -### Other -- tokens transfers task in data_with_churn tests to use client apis instead of faucet helpers - -## [0.85.55](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.54...sn_client-v0.85.55) - 2023-07-20 - -### Other -- cleanup error types - -## [0.85.54](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.53...sn_client-v0.85.54) - 2023-07-19 - -### Added -- using kad::record for dbc spend ops -- *(CI)* dbc verfication during network churning test - -## [0.85.53](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.52...sn_client-v0.85.53) - 2023-07-19 - -### Other -- updated the following local packages: sn_protocol - -## [0.85.52](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.51...sn_client-v0.85.52) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.51](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.50...sn_client-v0.85.51) - 2023-07-18 - -### Added -- safer registers requiring signatures -- *(networking)* remove LostRecordEvent - -### Fixed -- address PR comments -- client - -## [0.85.50](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.49...sn_client-v0.85.50) - 2023-07-18 - -### Other -- updated the following local packages: sn_networking - -## [0.85.49](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.48...sn_client-v0.85.49) - 2023-07-17 - -### Other -- updated the following local packages: sn_networking - -## [0.85.48](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.47...sn_client-v0.85.48) - 2023-07-17 - -### Added -- *(networking)* upgrade to libp2p 0.52.0 - -### Other -- *(networking)* log all connected peer count - -## [0.85.47](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.46...sn_client-v0.85.47) - 2023-07-17 - -### Added -- *(client)* keep storage payment proofs in local wallet - -## [0.85.46](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.45...sn_client-v0.85.46) - 2023-07-12 - -### Other -- client to upload paid chunks in batches - -## [0.85.45](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.44...sn_client-v0.85.45) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.44](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.43...sn_client-v0.85.44) - 2023-07-11 - -### Fixed -- *(client)* publish register on creation - -## [0.85.43](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.42...sn_client-v0.85.43) - 2023-07-11 - -### Other -- updated the following local packages: sn_networking - -## [0.85.42](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.41...sn_client-v0.85.42) - 2023-07-10 - -### Other -- updated the following local packages: sn_networking - -## [0.85.41](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.40...sn_client-v0.85.41) - 2023-07-10 - -### Added -- client query register via get_record -- client upload Register via put_record - -## [0.85.40](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.39...sn_client-v0.85.40) - 2023-07-06 - -### Other -- updated the following local packages: sn_networking - -## [0.85.39](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.38...sn_client-v0.85.39) - 2023-07-06 - -### Added -- PutRecord response during client upload -- client upload chunk using kad::put_record - -### Other -- *(release)* sn_cli-v0.79.0/sn_logging-v0.2.0/sn_node-v0.86.0/sn_testnet-v0.1.76/sn_networking-v0.3.11 - -## [0.85.38](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.37...sn_client-v0.85.38) - 2023-07-05 - -### Added -- carry out validation for record_store::put - -## [0.85.37](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.36...sn_client-v0.85.37) - 2023-07-04 - -### Other -- demystify permissions - -## [0.85.36](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.35...sn_client-v0.85.36) - 2023-07-03 - -### Added -- append SAFE_PEERS to initial_peers after restart - -### Fixed -- *(text)* data_churn_test creates clients parsing SAFE_PEERS env - -### Other -- reduce SAMPLE_SIZE for the data_with_churn test -- some client log tidy up - -## [0.85.35](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.34...sn_client-v0.85.35) - 2023-06-29 - -### Other -- updated the following local packages: sn_networking - -## [0.85.34](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.33...sn_client-v0.85.34) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.33](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.32...sn_client-v0.85.33) - 2023-06-28 - -### Added -- make the example work, fix sync when reg doesnt exist -- rework permissions, implement register cmd handlers -- register refactor, kad reg without cmds - -### Fixed -- rename UserRights to UserPermissions - -## [0.85.32](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.31...sn_client-v0.85.32) - 2023-06-28 - -### Other -- updated the following local packages: sn_networking - -## [0.85.31](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.30...sn_client-v0.85.31) - 2023-06-28 - -### Added -- *(node)* dial without PeerId - -## [0.85.30](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.29...sn_client-v0.85.30) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.29](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.28...sn_client-v0.85.29) - 2023-06-27 - -### Other -- updated the following local packages: sn_networking - -## [0.85.28](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.27...sn_client-v0.85.28) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.27](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.26...sn_client-v0.85.27) - 2023-06-26 - -### Other -- updated the following local packages: sn_networking - -## [0.85.26](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.25...sn_client-v0.85.26) - 2023-06-26 - -### Other -- *(release)* sn_cli-v0.78.9/sn_logging-v0.1.4/sn_node-v0.83.55/sn_testnet-v0.1.59/sn_networking-v0.1.24 - -## [0.85.25](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.24...sn_client-v0.85.25) - 2023-06-26 - -### Other -- payment proof map to use xorname as index instead of merkletree nodes type - -## [0.85.24](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.23...sn_client-v0.85.24) - 2023-06-24 - -### Other -- updated the following local packages: sn_networking - -## [0.85.23](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.22...sn_client-v0.85.23) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.22](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.21...sn_client-v0.85.22) - 2023-06-23 - -### Added -- forward chunk when not being the closest -- repliate to peers lost record - -### Fixed -- client upload to peers closer to chunk - -## [0.85.21](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.20...sn_client-v0.85.21) - 2023-06-23 - -### Other -- updated the following local packages: sn_networking - -## [0.85.20](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.19...sn_client-v0.85.20) - 2023-06-22 - -### Other -- *(client)* initial refactor around uploads - -## [0.85.19](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.18...sn_client-v0.85.19) - 2023-06-22 - -### Fixed -- improve client upload speed - -## [0.85.18](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.17...sn_client-v0.85.18) - 2023-06-21 - -### Other -- updated the following local packages: sn_networking, sn_protocol - -## [0.85.17](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.16...sn_client-v0.85.17) - 2023-06-21 - -### Other -- *(network)* remove `NetworkEvent::PutRecord` dead code - -## [0.85.16](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.15...sn_client-v0.85.16) - 2023-06-21 - -### Other -- remove unused error variants -- *(node)* obtain parent_tx from SignedSpend -- *(release)* sn_cli-v0.77.46/sn_logging-v0.1.3/sn_node-v0.83.42/sn_testnet-v0.1.46/sn_networking-v0.1.15 - -## [0.85.15](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.14...sn_client-v0.85.15) - 2023-06-20 - -### Added -- *(network)* validate `Record` on GET -- *(network)* validate and store `ReplicatedData` -- *(node)* perform proper validations on PUT -- *(network)* validate and store `Record` - -### Fixed -- *(node)* store parent tx along with `SignedSpend` - -### Other -- *(docs)* add more docs and comments - -## [0.85.14](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.13...sn_client-v0.85.14) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.13](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.12...sn_client-v0.85.13) - 2023-06-20 - -### Added -- pay 1 nano per Chunk as temporary approach till net-invoices are implemented -- committing storage payment SignedSpends to the network -- nodes to verify input DBCs of Chunk payment proof were spent - -### Other -- specific error types for different payment proof verification scenarios -- include the Tx instead of output DBCs as part of storage payment proofs - -## [0.85.12](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.11...sn_client-v0.85.12) - 2023-06-20 - -### Other -- updated the following local packages: sn_networking - -## [0.85.11](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.10...sn_client-v0.85.11) - 2023-06-16 - -### Fixed -- reduce client mem usage during uploading - -## [0.85.10](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.9...sn_client-v0.85.10) - 2023-06-15 - -### Added -- add double spend test - -### Fixed -- parent spend issue - -## [0.85.9](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.8...sn_client-v0.85.9) - 2023-06-14 - -### Added -- include output DBC within payment proof for Chunks storage - -## [0.85.8](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.7...sn_client-v0.85.8) - 2023-06-14 - -### Other -- updated the following local packages: sn_networking - -## [0.85.7](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.6...sn_client-v0.85.7) - 2023-06-14 - -### Added -- *(client)* expose req/resp timeout to client cli - -## [0.85.6](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.5...sn_client-v0.85.6) - 2023-06-13 - -### Other -- *(release)* sn_cli-v0.77.12/sn_logging-v0.1.2/sn_node-v0.83.10/sn_testnet-v0.1.14/sn_networking-v0.1.6 - -## [0.85.5](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.4...sn_client-v0.85.5) - 2023-06-12 - -### Added -- remove spendbook rw locks, improve logging - -### Other -- remove uneeded printlns -- *(release)* sn_cli-v0.77.10/sn_record_store-v0.1.3/sn_node-v0.83.8/sn_testnet-v0.1.12/sn_networking-v0.1.4 - -## [0.85.4](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.3...sn_client-v0.85.4) - 2023-06-09 - -### Other -- manually change crate version - -## [0.85.3](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.2...sn_client-v0.85.3) - 2023-06-09 - -### Other -- more replication flow statistics during mem_check test - -## [0.85.2](https://github.com/maidsafe/safe_network/compare/sn_client-v0.85.1...sn_client-v0.85.2) - 2023-06-07 - -### Added -- bail out if empty list of addreses is provided for payment proof generation -- *(client)* add progress indicator for initial network connections -- attach payment proof when uploading Chunks -- collect payment proofs and make sure merkletree always has pow-of-2 leaves -- node side payment proof validation from a given Chunk, audit trail, and reason-hash -- use all Chunks of a file to generate payment the payment proof tree -- Chunk storage payment and building payment proofs - -### Fixed -- remove progress bar after it's finished. - -### Other -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_node-v0.83.1 -- Revert "chore(release): sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2" -- *(release)* sn_cli-v0.77.1/sn_client-v0.85.2/sn_networking-v0.1.2/sn_protocol-v0.1.2/sn_node-v0.83.1/sn_record_store-v0.1.2/sn_registers-v0.1.2 -- small log wording updates -- exposing definition of merkletree nodes data type and additional doc in code -- making Chunk payment proof optional for now -- moving all payment proofs utilities into sn_transfers crate - -## [0.85.1](https://github.com/jacderida/safe_network/compare/sn_client-v0.85.0...sn_client-v0.85.1) - 2023-06-06 - -### Added -- refactor replication flow to using pull model diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml deleted file mode 100644 index ce0f2b5ee8..0000000000 --- a/sn_client/Cargo.toml +++ /dev/null @@ -1,90 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "Safe Network Client" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_client" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.110.4" - -[features] -default = [] -local = ["sn_networking/local"] -open-metrics = ["sn_networking/open-metrics", "prometheus-client"] -test-utils = ["sn_peers_acquisition", "eyre"] -# required to pass on flag to node builds -websockets = ["sn_networking/websockets", "sn_protocol/websockets"] - - -[dependencies] -tokio = { version = "1.35.0", features = [ - "io-util", - "macros", - "rt", - "sync", - "time", -] } -bip39 = "2.0.0" -curv = { version = "0.10.1", package = "sn_curv", default-features = false, features = [ - "num-bigint", -] } -eip2333 = { version = "0.2.1", package = "sn_bls_ckd" } -async-trait = "0.1" -backoff = { version = "0.4.0", features = ["tokio"] } -bls = { package = "blsttc", version = "8.0.1" } -bytes = { version = "1.0.1", features = ["serde"] } -crdts = "7.3.2" -custom_debug = "~0.6.1" -dashmap = "~6.1.0" -futures = "~0.3.13" -hex = "~0.4.3" -itertools = "~0.12.1" -libp2p = { version = "0.54.1", features = ["identify"] } -petgraph = { version = "0.6.4", features = ["serde-1"] } -prometheus-client = { version = "0.22", optional = true } -rand = { version = "~0.8.5", features = ["small_rng"] } -rayon = "1.8.0" -rmp-serde = "1.1.1" -self_encryption = "~0.30.0" -serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.18.4" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_registers = { path = "../sn_registers", version = "0.3.21" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tempfile = "3.6.0" -thiserror = "1.0.23" -tiny-keccak = "~2.0.2" -tracing = { version = "~0.1.26" } -xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3", optional = true } -eyre = { version = "0.6.8", optional = true } - -[dev-dependencies] -assert_matches = "1.5.0" -dirs-next = "~2.0.0" -# add rand to libp2p -libp2p-identity = { version = "0.2.7", features = ["rand"] } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_registers = { path = "../sn_registers", version = "0.3.21", features = [ - "test-utils", -] } - -[lints] -workspace = true - -# to allow wasm compilation -[lib] -crate-type = ["cdylib", "rlib"] - -[target.'cfg(target_arch = "wasm32")'.dependencies] -getrandom = { version = "0.2.12", features = ["js"] } -wasm-bindgen = "0.2.90" -wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -console_error_panic_hook = "0.1.6" -tracing-wasm = "0.2.1" -wasmtimer = "0.2.0" -web-sys = { version = "0.3.22", features = ["console"] } diff --git a/sn_client/README.md b/sn_client/README.md deleted file mode 100644 index 48a4fe9cf9..0000000000 --- a/sn_client/README.md +++ /dev/null @@ -1,56 +0,0 @@ -# `sn_client` - SAFE Network Client Library - -## Overview - -The `sn_client` library provides the core functionalities for interacting with the SAFE Network. It handles tasks such as connecting to the network, managing concurrency, and performing various network operations like data storage and retrieval. - -## Table of Contents - -- [Overview](#overview) -- [Installation](#installation) -- [Usage](#usage) - - [API Calls](#api-calls) -- [Running Tests](#running-tests) -- [Contributing](#contributing) - - [Conventional Commits](#conventional-commits) -- [License](#license) - -## Installation - -To include `sn_client` in your Rust project, add the following to your `Cargo.toml`: - -```toml -[dependencies] -sn_client = "latest_version_here" -``` - -## Usage - -To use `sn_client`, you first need to instantiate a client. Here's a simple example: - -```rust -use sn_client::Client; -let client = Client::new(signer, peers, req_response_timeout, custom_concurrency_limit).await?; -``` - -## Running Tests - -Prerequisites: -* A running local network. Refer to [`safe_network/README.md`](../README.md) to run a local test network. -* `SAFE_PEERS` environment variable or running the tests with `--feature=local`: - -```bash -$ cargo test --package sn_client --release --tests --features=local -``` - -## Contributing - -Please refer to the [Contributing Guidelines](../CONTRIBUTING.md) from the main directory for details on how to contribute to this project. - -### Conventional Commits - -We follow the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. Please adhere to this standard when contributing. - -## License - -This Safe Network repository is licensed under the General Public License (GPL), version 3 ([LICENSE](LICENSE) http://www.gnu.org/licenses/gpl-3.0.en.html). diff --git a/sn_client/src/acc_packet.rs b/sn_client/src/acc_packet.rs deleted file mode 100644 index 2d9570f34a..0000000000 --- a/sn_client/src/acc_packet.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::error::Result; -use bip39::Mnemonic; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey}; -use std::path::Path; - -pub mod user_secret; - -const DEFAULT_WALLET_DERIVIATION_PASSPHRASE: &str = "default"; - -/// Load a account from disk, with wallet, or create a new one using the mnemonic system -pub fn load_account_wallet_or_create_with_mnemonic( - root_dir: &Path, - derivation_passphrase: Option<&str>, -) -> Result { - let wallet = HotWallet::load_from(root_dir); - - match wallet { - Ok(wallet) => Ok(wallet), - Err(error) => { - warn!("Issue loading wallet, creating a new one: {error}"); - println!("Issue loading wallet from {root_dir:?}"); - - let mnemonic = load_or_create_mnemonic(root_dir)?; - let wallet = - secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?; - - Ok(HotWallet::create_from_key(root_dir, wallet, None)?) - } - } -} - -pub fn load_or_create_mnemonic(root_dir: &Path) -> Result { - match user_secret::read_mnemonic_from_disk(root_dir) { - Ok(mnemonic) => { - println!( - "Found existing mnemonic in {root_dir:?}, this will be used for key derivation." - ); - info!("Using existing mnemonic from {root_dir:?}"); - Ok(mnemonic) - } - Err(error) => { - println!("No existing mnemonic found, creating a new one in {root_dir:?}."); - warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}"); - let mnemonic = user_secret::random_eip2333_mnemonic()?; - user_secret::write_mnemonic_to_disk(root_dir, &mnemonic)?; - Ok(mnemonic) - } - } -} - -pub fn secret_key_from_mnemonic( - mnemonic: Mnemonic, - derivation_passphrase: Option, -) -> Result { - let passphrase = - derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned()); - user_secret::account_wallet_secret_key(mnemonic, &passphrase) -} - -pub fn create_faucet_account_and_wallet() -> HotWallet { - let root_dir = get_faucet_data_dir(); - - println!("Loading faucet wallet... {root_dir:#?}"); - load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .expect("Faucet wallet shall be created successfully.") -} diff --git a/sn_client/src/acc_packet/user_secret.rs b/sn_client/src/acc_packet/user_secret.rs deleted file mode 100644 index 800018cfb7..0000000000 --- a/sn_client/src/acc_packet/user_secret.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - error::{Error, Result}, - transfers::MainSecretKey, -}; -use bls::SecretKey; -use curv::elliptic::curves::ECScalar; -use rand::RngCore; -use std::path::Path; -use xor_name::XorName; - -const MNEMONIC_FILENAME: &str = "account_secret"; - -const ACCOUNT_ROOT_XORNAME_DERIVATION: &str = "m/1/0"; - -const ACCOUNT_WALLET_DERIVATION: &str = "m/2/0"; - -pub fn random_eip2333_mnemonic() -> Result { - let mut entropy = [1u8; 32]; - let rng = &mut rand::rngs::OsRng; - rng.fill_bytes(&mut entropy); - let mnemonic = - bip39::Mnemonic::from_entropy(&entropy).map_err(|_error| Error::FailedToParseEntropy)?; - Ok(mnemonic) -} - -/// Derive a wallet secret key from the mnemonic for the account. -pub fn account_wallet_secret_key( - mnemonic: bip39::Mnemonic, - passphrase: &str, -) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_WALLET_DERIVATION); - let key_bytes = derived_key.serialize(); - let sk = SecretKey::from_bytes(key_bytes.into()).map_err(|_err| Error::InvalidKeyBytes)?; - Ok(MainSecretKey::new(sk)) -} - -#[expect(dead_code)] // as yet unused, will be used soon -/// Derive an xorname from the mnemonic for the account to store data. -pub(crate) fn account_root_xorname(mnemonic: bip39::Mnemonic, passphrase: &str) -> Result { - let seed = mnemonic.to_seed(passphrase); - - let root_sk = - eip2333::derive_master_sk(&seed).map_err(|_err| Error::InvalidMnemonicSeedPhrase)?; - let derived_key = eip2333::derive_child_sk(root_sk, ACCOUNT_ROOT_XORNAME_DERIVATION); - let derived_key_bytes = derived_key.serialize(); - Ok(XorName::from_content(&derived_key_bytes)) -} - -pub fn write_mnemonic_to_disk(files_dir: &Path, mnemonic: &bip39::Mnemonic) -> Result<()> { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = mnemonic.to_string(); - std::fs::write(filename, content)?; - Ok(()) -} - -pub(super) fn read_mnemonic_from_disk(files_dir: &Path) -> Result { - let filename = files_dir.join(MNEMONIC_FILENAME); - let content = std::fs::read_to_string(filename)?; - let mnemonic = - bip39::Mnemonic::parse_normalized(&content).map_err(|_err| Error::FailedToParseMnemonic)?; - Ok(mnemonic) -} diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs deleted file mode 100644 index 5ed63210a6..0000000000 --- a/sn_client/src/api.rs +++ /dev/null @@ -1,1234 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - error::{Error, Result}, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, ClientRegister, - WalletClient, -}; -use bls::{PublicKey, SecretKey, Signature}; -use libp2p::{ - identity::Keypair, - kad::{KBucketDistance, Quorum, Record}, - Multiaddr, PeerId, -}; -use rand::{thread_rng, Rng}; -use sn_networking::{ - get_signed_spend_from_record, multiaddr_is_global, - target_arch::{interval, spawn, timeout, Instant}, - GetRecordCfg, GetRecordError, NetworkBuilder, NetworkError, NetworkEvent, PutRecordCfg, - VerificationKind, -}; -use sn_protocol::{ - error::Error as ProtocolError, - messages::ChunkProof, - storage::{ - try_deserialize_record, try_serialize_record, Chunk, ChunkAddress, RecordHeader, - RecordKind, RegisterAddress, RetryStrategy, SpendAddress, - }, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -}; -use sn_registers::{Permissions, SignedRegister}; -use sn_transfers::{ - CashNote, CashNoteRedemption, MainPubkey, NanoTokens, Payment, SignedSpend, TransferError, - GENESIS_SPEND_UNIQUE_KEY, -}; -#[cfg(target_arch = "wasm32")] -use std::path::PathBuf; -use std::{ - collections::{HashMap, HashSet}, - num::NonZeroUsize, - sync::Arc, -}; -use tokio::time::Duration; -use tracing::trace; -use xor_name::XorName; - -/// The maximum duration the client will wait for a connection to the network before timing out. -pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); - -/// The timeout duration for the client to receive any response from the network. -const INACTIVITY_TIMEOUT: Duration = Duration::from_secs(30); - -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const QUORUM_N_IS_2: NonZeroUsize = match NonZeroUsize::new(2) { - Some(v) => v, - None => panic!("2 is not zero"), -}; - -impl Client { - /// A quick client with a random secret key and some peers. - pub async fn quick_start(peers: Option>) -> Result { - Self::new(SecretKey::random(), peers, None, None).await - } - - /// Instantiate a new client. - /// - /// Optionally specify the duration for the connection timeout. - /// - /// Defaults to [`CONNECTION_TIMEOUT`]. - /// - /// # Arguments - /// * 'signer' - [SecretKey] - /// * 'peers' - [Option]<[Vec]<[Multiaddr]>> - /// * 'connection_timeout' - [Option]<[Duration]> : Specification for client connection timeout set via Optional - /// * 'client_event_broadcaster' - [Option]<[ClientEventsBroadcaster]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn new( - signer: SecretKey, - peers: Option>, - connection_timeout: Option, - client_event_broadcaster: Option, - ) -> Result { - // If any of our contact peers has a global address, we'll assume we're in a global network. - let local = match peers { - Some(ref peers) => !peers.iter().any(multiaddr_is_global), - None => true, - }; - - info!("Startup a client with peers {peers:?} and local {local:?} flag"); - info!("Starting Kad swarm in client mode..."); - - #[cfg(target_arch = "wasm32")] - let root_dir = PathBuf::from("dummy path, wasm32/browser environments will not use this"); - #[cfg(not(target_arch = "wasm32"))] - let root_dir = std::env::temp_dir(); - trace!("Starting Kad swarm in client mode..{root_dir:?}."); - - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), local, root_dir); - - let (network, mut network_event_receiver, swarm_driver) = network_builder.build_client()?; - info!("Client constructed network and swarm_driver"); - - // If the events broadcaster is not provided by the caller, then we create a new one. - // This is not optional as we wait on certain events to connect to the network and return from this function. - let events_broadcaster = client_event_broadcaster.unwrap_or_default(); - - let client = Self { - network: network.clone(), - events_broadcaster, - signer: Arc::new(signer), - }; - - // subscribe to our events channel first, so we don't have intermittent - // errors if it does not exist and we cannot send to it. - // (eg, if PeerAdded happens faster than our events channel is created) - let mut client_events_rx = client.events_channel(); - - let _swarm_driver = spawn({ - trace!("Starting up client swarm_driver"); - swarm_driver.run() - }); - - // spawn task to dial to the given peers - let network_clone = network.clone(); - let _handle = spawn(async move { - if let Some(peers) = peers { - for addr in peers { - trace!(%addr, "dialing initial peer"); - - if let Err(err) = network_clone.dial(addr.clone()).await { - tracing::error!(%addr, "Failed to dial: {err:?}"); - }; - } - } - }); - - // spawn task to wait for NetworkEvent and check for inactivity - let mut client_clone = client.clone(); - let _event_handler = spawn(async move { - let mut peers_added: usize = 0; - loop { - match timeout(INACTIVITY_TIMEOUT, network_event_receiver.recv()).await { - Ok(event) => { - let the_event = match event { - Some(the_event) => the_event, - None => { - error!("The `NetworkEvent` channel has been closed"); - continue; - } - }; - - let start = Instant::now(); - let event_string = format!("{the_event:?}"); - if let Err(err) = - client_clone.handle_network_event(the_event, &mut peers_added) - { - warn!("Error handling network event: {err}"); - } - trace!( - "Handled network event in {:?}: {:?}", - start.elapsed(), - event_string - ); - } - Err(_elapse_err) => { - debug!("Client inactivity... waiting for a network event"); - client_clone - .events_broadcaster - .broadcast(ClientEvent::InactiveClient(INACTIVITY_TIMEOUT)); - } - } - } - }); - - // loop to connect to the network - let mut is_connected = false; - let connection_timeout = connection_timeout.unwrap_or(CONNECTION_TIMEOUT); - let mut unsupported_protocol_tracker: Option<(String, String)> = None; - - debug!("Client connection timeout: {connection_timeout:?}"); - let mut connection_timeout_interval = interval(connection_timeout); - // first tick completes immediately - connection_timeout_interval.tick().await; - - loop { - tokio::select! { - _ = connection_timeout_interval.tick() => { - if !is_connected { - if let Some((our_protocol, their_protocols)) = unsupported_protocol_tracker { - error!("Timeout: Client could not connect to the network as it does not support the protocol"); - break Err(Error::UnsupportedProtocol(our_protocol, their_protocols)); - } - error!("Timeout: Client failed to connect to the network within {connection_timeout:?}"); - break Err(Error::ConnectionTimeout(connection_timeout)); - } - } - event = client_events_rx.recv() => { - match event { - // we do not error out directly as we might still connect if the other initial peers are from - // the correct network. - Ok(ClientEvent::PeerWithUnsupportedProtocol { our_protocol, their_protocol }) => { - warn!(%our_protocol, %their_protocol, "Client tried to connect to a peer with an unsupported protocol. Tracking the latest one"); - unsupported_protocol_tracker = Some((our_protocol, their_protocol)); - } - Ok(ClientEvent::ConnectedToNetwork) => { - is_connected = true; - info!("Client connected to the Network {is_connected:?}."); - break Ok(()); - } - Ok(ClientEvent::InactiveClient(timeout)) => { - if is_connected { - info!("The client was inactive for {timeout:?}."); - } else { - info!("The client still does not know enough network nodes."); - } - } - Err(err) => { - error!("Unexpected error during client startup {err:?}"); - println!("Unexpected error during client startup {err:?}"); - break Err(err.into()); - } - _ => {} - } - }} - }?; - - Ok(client) - } - - fn handle_network_event(&mut self, event: NetworkEvent, peers_added: &mut usize) -> Result<()> { - match event { - NetworkEvent::PeerAdded(peer_id, _connected_peer) => { - debug!("PeerAdded: {peer_id}"); - *peers_added += 1; - - // notify the listeners that we are waiting on CLOSE_GROUP_SIZE peers before emitting ConnectedToNetwork - self.events_broadcaster.broadcast(ClientEvent::PeerAdded { - max_peers_to_connect: CLOSE_GROUP_SIZE, - }); - // In case client running in non-local mode, - // it may take some time to fill up the RT. - // To avoid such delay may fail the query with RecordNotFound, - // wait till certain amount of peers populated into RT - if *peers_added >= CLOSE_GROUP_SIZE { - self.events_broadcaster - .broadcast(ClientEvent::ConnectedToNetwork); - } else { - debug!("{peers_added}/{CLOSE_GROUP_SIZE} initial peers found.",); - } - } - NetworkEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - } => { - self.events_broadcaster - .broadcast(ClientEvent::PeerWithUnsupportedProtocol { - our_protocol, - their_protocol, - }); - } - _other => {} - } - - Ok(()) - } - - /// Get the client events channel. - /// - /// Return Type: - /// - /// [ClientEventsReceiver] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error, ClientEvent}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Using client.events_channel() to publish messages - /// let mut events_channel = client.events_channel(); - /// while let Ok(event) = events_channel.recv().await { - /// // Handle the event - /// } - /// - /// # Ok(()) - /// # } - /// ``` - pub fn events_channel(&self) -> ClientEventsReceiver { - self.events_broadcaster.subscribe() - } - - /// Return the underlying network GetRange - pub async fn get_range(&self) -> Result { - self.network.get_range().await.map_err(Error::from) - } - - /// Sign the given data. - /// - /// # Arguments - /// * 'data' - bytes; i.e bytes of an sn_registers::Register instance - /// - /// Return Type: - /// - /// [Signature] - /// - pub fn sign>(&self, data: T) -> Signature { - self.signer.sign(data) - } - - /// Return a reference to the signer secret key. - /// - /// Return Type: - /// - /// [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let secret_key_reference = client.signer(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer(&self) -> &SecretKey { - &self.signer - } - - /// Return the public key of the data signing key. - /// - /// Return Type: - /// - /// [PublicKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let public_key_reference = client.signer_pk(); - /// # Ok(()) - /// # } - /// ``` - pub fn signer_pk(&self) -> PublicKey { - self.signer.public_key() - } - - /// Set the signing key for this client. - /// - /// # Arguments - /// * 'sk' - [SecretKey] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let mut client = Client::new(SecretKey::random(), None, None, None).await?; - /// client.set_signer_key(SecretKey::random()); - /// # Ok(()) - /// # } - /// ``` - pub fn set_signer_key(&mut self, sk: SecretKey) { - self.signer = Arc::new(sk); - } - - /// Get a register from network - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get a signed register - /// let signed_register = client.get_signed_register_from_network(address, true); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_signed_register_from_network( - &self, - address: RegisterAddress, - is_verifying: bool, - ) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - let get_quorum = if is_verifying { - Quorum::All - } else { - Quorum::Majority - }; - let retry_strategy = if is_verifying { - Some(RetryStrategy::Balanced) - } else { - Some(RetryStrategy::Quick) - }; - let get_cfg = GetRecordCfg { - get_quorum, - retry_strategy, - target_record: None, - expected_holders: Default::default(), - is_register: true, - }; - - let maybe_record = self.network.get_record_from_network(key, &get_cfg).await; - let record = match &maybe_record { - Ok(r) => r, - Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { - let mut results_to_merge = HashMap::default(); - - for (address, (r, _peers)) in result_map { - results_to_merge.insert(*address, r.clone()); - } - - return merge_register_records(address, &results_to_merge); - } - Err(e) => { - warn!("Failed to get record at {address:?} from the network: {e:?}"); - return Err(ProtocolError::RegisterNotFound(Box::new(address)).into()); - } - }; - - debug!( - "Got record from the network, {:?}", - PrettyPrintRecordKey::from(&record.key) - ); - - let register = get_register_from_record(record) - .map_err(|_| ProtocolError::RegisterNotFound(Box::new(address)))?; - Ok(register) - } - - /// Retrieve a Register from the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[ClientRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// // Set up a client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Get the register - /// let retrieved_register = client.get_register(address); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_register(&self, address: RegisterAddress) -> Result { - info!("Retrieving a Register replica at {address}"); - ClientRegister::retrieve(self.clone(), address).await - } - - /// Create a new Register on the Network. - /// Tops up payments and retries if necessary and verification failed - /// - /// # Arguments - /// * 'address' - [XorName] - /// * 'wallet_client' - [WalletClient] - /// * 'verify_store' - Boolean - /// * 'perms' - [Permissions] - /// - /// Return Type: - /// - /// Result<([ClientRegister], [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{MainSecretKey}; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client, Wallet, etc - /// use sn_registers::Permissions; - /// use sn_transfers::HotWallet; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Example: - /// let (mut client_register, _storage_cost, _royalties_fees) = client - /// .create_and_pay_for_register( - /// xorname, - /// &mut wallet_client, - /// true, - /// Permissions::default(), - /// ) - /// .await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_and_pay_for_register( - &self, - address: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(ClientRegister, NanoTokens, NanoTokens)> { - info!("Instantiating a new Register replica with address {address:?}"); - let (reg, mut total_cost, mut total_royalties) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - false, - perms.clone(), - ) - .await?; - - debug!("{address:?} Created in theory"); - let reg_address = reg.address(); - if verify_store { - debug!("We should verify stored at {address:?}"); - let mut stored = self.verify_register_stored(*reg_address).await.is_ok(); - - while !stored { - info!("Register not completely stored on the network yet. Retrying..."); - // this verify store call here ensures we get the record from Quorum::all - let (reg, top_up_cost, royalties_top_up) = ClientRegister::create_online( - self.clone(), - address, - wallet_client, - true, - perms.clone(), - ) - .await?; - let reg_address = reg.address(); - - total_cost = total_cost - .checked_add(top_up_cost) - .ok_or(Error::TotalPriceTooHigh)?; - total_royalties = total_cost - .checked_add(royalties_top_up) - .ok_or(Error::Wallet(sn_transfers::WalletError::from( - sn_transfers::TransferError::ExcessiveNanoValue, - )))?; - stored = self.verify_register_stored(*reg_address).await.is_ok(); - } - } - - Ok((reg, total_cost, total_royalties)) - } - - /// Store `Chunk` as a record. Protected method. - /// - /// # Arguments - /// * 'chunk' - [Chunk] - /// * 'payee' - [PeerId] - /// * 'payment' - [Payment] - /// * 'verify_store' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - pub(super) async fn store_chunk( - &self, - chunk: Chunk, - payee: PeerId, - payment: Payment, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - info!("Store chunk: {:?}", chunk.address()); - let key = chunk.network_address().to_record_key(); - let retry_strategy = Some(retry_strategy.unwrap_or(RetryStrategy::Quick)); - - let record_kind = RecordKind::ChunkWithPayment; - let record = Record { - key: key.clone(), - value: try_serialize_record(&(payment, chunk.clone()), record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let verification = if verify_store { - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(QUORUM_N_IS_2), - retry_strategy, - target_record: None, // Not used since we use ChunkProof - expected_holders: Default::default(), - is_register: false, - }; - // The `ChunkWithPayment` is only used to send out via PutRecord. - // The holders shall only hold the `Chunk` copies. - // Hence the fetched copies shall only be a `Chunk` - - let stored_on_node = try_serialize_record(&chunk, RecordKind::Chunk)?.to_vec(); - let random_nonce = thread_rng().gen::(); - let expected_proof = ChunkProof::new(&stored_on_node, random_nonce); - - Some(( - VerificationKind::ChunkProof { - expected_proof, - nonce: random_nonce, - }, - verification_cfg, - )) - } else { - None - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::One, - retry_strategy, - use_put_record_to: Some(vec![payee]), - verification, - }; - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get chunk from chunk address. - /// - /// # Arguments - /// * 'address' - [ChunkAddress] - /// * 'show_holders' - Boolean - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - /// - /// Return Type: - /// - /// Result<[Chunk]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use xor_name::XorName; - /// use sn_protocol::storage::ChunkAddress; - /// // client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // chunk address - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let chunk_address = ChunkAddress::new(xorname); - /// // get chunk - /// let chunk = client.get_chunk(chunk_address,true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn get_chunk( - &self, - address: ChunkAddress, - show_holders: bool, - retry_strategy: Option, - ) -> Result { - info!("Getting chunk: {address:?}"); - let key = NetworkAddress::from_chunk_address(address).to_record_key(); - - let expected_holders = if show_holders { - let result: HashSet<_> = self - .network - .get_closest_peers(&NetworkAddress::from_chunk_address(address), true) - .await? - .iter() - .cloned() - .collect(); - result - } else { - Default::default() - }; - - let get_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(retry_strategy.unwrap_or(RetryStrategy::Quick)), - target_record: None, - expected_holders, - is_register: false, - }; - let record = self.network.get_record_from_network(key, &get_cfg).await?; - let header = RecordHeader::from_record(&record)?; - if let RecordKind::Chunk = header.kind { - let chunk: Chunk = try_deserialize_record(&record)?; - Ok(chunk) - } else { - Err(NetworkError::RecordKindMismatch(RecordKind::Chunk).into()) - } - } - - /// Verify if a `Chunk` is stored by expected nodes on the network. - /// Single local use. Marked Private. - pub async fn verify_chunk_stored(&self, chunk: &Chunk) -> Result<()> { - let address = chunk.network_address(); - info!("Verifying chunk: {address:?}"); - let random_nonce = thread_rng().gen::(); - let record_value = try_serialize_record(&chunk, RecordKind::Chunk)?; - let expected_proof = ChunkProof::new(record_value.as_ref(), random_nonce); - - if let Err(err) = self - .network - .verify_chunk_existence( - address.clone(), - random_nonce, - expected_proof, - Quorum::N(QUORUM_N_IS_2), - None, - ) - .await - { - error!("Failed to verify the existence of chunk {address:?} with err {err:?}"); - return Err(err.into()); - } - - Ok(()) - } - - /// Verify if a `Register` is stored by expected nodes on the network. - /// - /// # Arguments - /// * 'address' - [RegisterAddress] - /// - /// Return Type: - /// - /// Result<[SignedRegister]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_register_stored(&self, address: RegisterAddress) -> Result { - info!("Verifying register: {address:?}"); - self.get_signed_register_from_network(address, true).await - } - - /// Quickly checks if a `Register` is stored by expected nodes on the network. - /// - /// To be used for initial register put checks eg, if we expect the data _not_ - /// to exist, we can use it and essentially use the RetryStrategy::Quick under the hood - /// - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_registers::RegisterAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// // Set up Client - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Set up an address - /// let mut rng = rand::thread_rng(); - /// let owner = SecretKey::random().public_key(); - /// let xorname = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xorname, owner); - /// // Verify address is stored - /// let is_stored = client.verify_register_stored(address).await.is_ok(); - /// # Ok(()) - /// # } - /// ``` - pub async fn quickly_check_if_register_stored( - &self, - address: RegisterAddress, - ) -> Result { - info!("Quickly checking for existing register : {address:?}"); - self.get_signed_register_from_network(address, false).await - } - - /// Send a `SpendCashNote` request to the network. Protected method. - /// - /// # Arguments - /// * 'spend' - [SignedSpend] - /// * 'verify_store' - Boolean - /// - pub(crate) async fn network_store_spend( - &self, - spend: SignedSpend, - verify_store: bool, - ) -> Result<()> { - let unique_pubkey = *spend.unique_pubkey(); - let cash_note_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); - let network_address = NetworkAddress::from_spend_address(cash_note_addr); - - let key = network_address.to_record_key(); - - let record_kind = RecordKind::Spend; - let record = Record { - key: key.clone(), - value: try_serialize_record(&[spend], record_kind)?.to_vec(), - publisher: None, - expires: None, - }; - - let pretty_key = PrettyPrintRecordKey::from(&key); - info!("Sending spend {unique_pubkey:?} to the network via put_record, with addr of {cash_note_addr:?} - {pretty_key:?}, size of {}", - record.value.len()); - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = self - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - info!("Expecting holders: {expected_holders:?}"); - (Some(record.clone()), expected_holders) - } else { - (None, Default::default()) - }; - - // When there is retry on Put side, no need to have a retry on Get - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: None, - target_record: record_to_verify, - expected_holders, - is_register: false, - }; - - let verification = if verify_store { - Some((VerificationKind::Network, verification_cfg)) - } else { - None - }; - - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Persistent), - use_put_record_to: None, - verification, - }; - - Ok(self.network.put_record(record, &put_cfg).await?) - } - - /// Get a spend from network. - /// - /// # Arguments - /// * 'address' - [SpendAddress] - /// - /// Return Type: - /// - /// Result<[SignedSpend]> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// use xor_name::XorName; - /// use sn_transfers::SpendAddress; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a SpendAddress - /// let mut rng = rand::thread_rng(); - /// let xorname = XorName::random(&mut rng); - /// let spend_address = SpendAddress::new(xorname); - /// // Here we get the spend address - /// let spend = client.get_spend_from_network(spend_address).await?; - /// // Example: We can use the spend to get its unique public key: - /// let unique_pubkey = spend.unique_pubkey(); - /// # Ok(()) - /// # } - /// ``` - pub async fn get_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Balanced), - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// Try to peek a spend by just fetching one copy of it. - /// Useful to help decide whether a re-put is necessary, or a spend exists already - /// (client side verification). - pub async fn peek_a_spend(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// This is a similar funcation to `get_spend_from_network` to get a spend from network. - /// Just using different `RetryStrategy` to improve the performance during crawling. - pub async fn crawl_spend_from_network(&self, address: SpendAddress) -> Result { - self.try_fetch_spend_from_network( - address, - GetRecordCfg { - get_quorum: Quorum::All, - retry_strategy: None, - target_record: None, - expected_holders: Default::default(), - is_register: false, - }, - ) - .await - } - - /// Try to confirm the Genesis spend doesn't present in the network yet. - /// It shall be quick, and any signle returned copy shall consider as error. - pub async fn is_genesis_spend_present(&self) -> bool { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - self.peek_a_spend(genesis_addr).await.is_ok() - } - - async fn try_fetch_spend_from_network( - &self, - address: SpendAddress, - get_cfg: GetRecordCfg, - ) -> Result { - let key = NetworkAddress::from_spend_address(address).to_record_key(); - - info!( - "Getting spend at {address:?} with record_key {:?}", - PrettyPrintRecordKey::from(&key) - ); - let record = self - .network - .get_record_from_network(key.clone(), &get_cfg) - .await?; - info!( - "For spend at {address:?} got record from the network, {:?}", - PrettyPrintRecordKey::from(&record.key) - ); - - let signed_spend = get_signed_spend_from_record(&address, &record)?; - - // check addr - let spend_addr = SpendAddress::from_unique_pubkey(signed_spend.unique_pubkey()); - if address != spend_addr { - let s = format!("Spend got from the Network at {address:?} contains different spend address: {spend_addr:?}"); - warn!("{s}"); - return Err(Error::Transfer(TransferError::InvalidSpendValue( - *signed_spend.unique_pubkey(), - ))); - } - - // check spend - match signed_spend.verify() { - Ok(()) => { - trace!("Verified signed spend got from network for {address:?}"); - Ok(signed_spend.clone()) - } - Err(err) => { - warn!("Invalid signed spend got from network for {address:?}: {err:?}."); - Err(Error::from(err)) - } - } - } - - /// This function is used to receive a Vector of CashNoteRedemptions and turn them back into spendable CashNotes. - /// For this we need a network connection. - /// Verify CashNoteRedemptions and rebuild spendable currency from them. - /// Returns an `Error::InvalidTransfer` if any CashNoteRedemption is not valid - /// Else returns a list of CashNotes that can be spent by the owner. - /// - /// # Arguments - /// * 'main_pubkey' - [MainPubkey] - /// * 'cashnote_redemptions' - [CashNoteRedemption] - /// - /// Return Type: - /// - /// Result<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, Error}; - /// use bls::SecretKey; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use sn_transfers::{CashNote, CashNoteRedemption, MainPubkey}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Create a main public key - /// let pk = SecretKey::random().public_key(); - /// let main_pub_key = MainPubkey::new(pk); - /// // Create a Cash Note Redemption Vector - /// let cash_note = CashNote::from_hex("&hex").unwrap(); - /// let cashNoteRedemption = CashNoteRedemption::from_cash_note(&cash_note); - /// let vector = vec![cashNoteRedemption.clone(), cashNoteRedemption.clone()]; - /// // Verify the cash note redemptions - /// let cash_notes = client.verify_cash_notes_redemptions(main_pub_key,&vector); - /// # Ok(()) - /// # } - /// ``` - pub async fn verify_cash_notes_redemptions( - &self, - main_pubkey: MainPubkey, - cashnote_redemptions: &[CashNoteRedemption], - ) -> Result> { - let cash_notes = self - .network - .verify_cash_notes_redemptions(main_pubkey, cashnote_redemptions) - .await?; - Ok(cash_notes) - } -} - -fn get_register_from_record(record: &Record) -> Result { - let header = RecordHeader::from_record(record)?; - - if let RecordKind::Register = header.kind { - let register = try_deserialize_record::(record)?; - Ok(register) - } else { - error!("RecordKind mismatch while trying to retrieve a signed register"); - Err(NetworkError::RecordKindMismatch(RecordKind::Register).into()) - } -} - -/// if multiple register records where found for a given key, merge them into a single register -fn merge_register_records( - address: RegisterAddress, - map: &HashMap, -) -> Result { - let key = NetworkAddress::from_register_address(address).to_record_key(); - let pretty_key = PrettyPrintRecordKey::from(&key); - info!( - "Got {} register records from the network for key: {pretty_key:?}", - map.len() - ); - let mut all_registers = vec![]; - for record in map.values() { - match get_register_from_record(record) { - Ok(r) => all_registers.push(r), - Err(e) => { - warn!("Ignoring invalid register record {pretty_key:?} with error {e:?}"); - continue; - } - } - } - - // get the first valid register - let one_valid_reg = if let Some(r) = all_registers.clone().iter().find(|r| r.verify().is_ok()) { - r.clone() - } else { - error!("No valid register records found for {key:?}"); - return Err(Error::Protocol(ProtocolError::RegisterNotFound(Box::new( - address, - )))); - }; - - // merge it with the others if they are valid - let register: SignedRegister = all_registers.into_iter().fold(one_valid_reg, |mut acc, r| { - if acc.verified_merge(&r).is_err() { - warn!("Skipping register that failed to merge. Entry found for {key:?}"); - } - acc - }); - - Ok(register) -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeSet; - - use sn_registers::{Register, RegisterCrdt, RegisterOp}; - - use super::*; - - fn write_atop( - signed_reg: &mut SignedRegister, - crdt_reg: &mut RegisterCrdt, - entry: &[u8], - owner: &SecretKey, - ) -> eyre::Result<()> { - let children: BTreeSet<_> = crdt_reg.read().iter().map(|(hash, _)| *hash).collect(); - - let (_hash, address, crdt_op) = crdt_reg.write(entry.to_vec(), &children)?; - - let op = RegisterOp::new(address, crdt_op, owner); - - signed_reg.add_op(op)?; - - Ok(()) - } - - #[test] - fn test_merge_register_records() -> eyre::Result<()> { - let mut rng = rand::thread_rng(); - let meta = XorName::random(&mut rng); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(meta, owner_pk); - - let base_register = Register::new(owner_pk, meta, Default::default()); - let signature = owner_sk.sign(base_register.bytes()?); - - // prepare registers - let mut register_root = SignedRegister::new(base_register, signature, BTreeSet::new()); - let mut crdt_reg_root = RegisterCrdt::new(address); - - write_atop( - &mut register_root, - &mut crdt_reg_root, - b"root_entry", - &owner_sk, - )?; - - let mut signed_register1 = register_root.clone(); - let mut crdt_reg1 = crdt_reg_root.clone(); - write_atop(&mut signed_register1, &mut crdt_reg1, b"entry1", &owner_sk)?; - - let mut signed_register2 = register_root.clone(); - let mut crdt_reg2 = crdt_reg_root.clone(); - write_atop(&mut signed_register2, &mut crdt_reg2, b"entry2", &owner_sk)?; - - let base_register_bad = Register::new(owner_pk, meta, Default::default()); - let bad_sk = SecretKey::random(); - let signature_bad = bad_sk.sign(base_register_bad.bytes()?); - let signed_register_bad = - SignedRegister::new(base_register_bad, signature_bad, BTreeSet::new()); - - // prepare records - let record1 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register1, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname1 = XorName::from_content(&record1.value); - let record2 = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register2, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname2 = XorName::from_content(&record2.value); - let record_bad = Record { - key: NetworkAddress::from_register_address(address).to_record_key(), - value: try_serialize_record(&signed_register_bad, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - let xorname_bad = XorName::from_content(&record_bad.value); - - // test with 2 valid records: should return the two merged - let mut expected_merge = signed_register1.clone(); - expected_merge.merge(&signed_register2)?; - let map = HashMap::from_iter(vec![(xorname1, record1.clone()), (xorname2, record2)]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, expected_merge); - - // test with 1 valid record and 1 invalid record: should return the valid one - let map = HashMap::from_iter(vec![(xorname1, record1), (xorname2, record_bad.clone())]); - let reg = merge_register_records(address, &map)?; // Ok - assert_eq!(reg, signed_register1); - - // test with 2 invalid records: should error out - let map = HashMap::from_iter(vec![ - (xorname_bad, record_bad.clone()), - (xorname_bad, record_bad), - ]); - let res = merge_register_records(address, &map); // Err - assert!(res.is_err()); - - Ok(()) - } -} diff --git a/sn_client/src/audit.rs b/sn_client/src/audit.rs deleted file mode 100644 index 0d9bb8daec..0000000000 --- a/sn_client/src/audit.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod dag_crawling; -mod dag_error; -mod spend_dag; - -#[cfg(test)] -mod tests; - -pub use dag_error::{DagError, SpendFault}; -pub use spend_dag::{SpendDag, SpendDagGet}; diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs deleted file mode 100644 index fa00a5078f..0000000000 --- a/sn_client/src/audit/dag_crawling.rs +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{Client, Error, SpendDag}; - -use dashmap::DashMap; -use futures::{ - future::join_all, - stream::{self, StreamExt}, -}; -use sn_networking::{GetRecordError, NetworkError}; -use sn_transfers::{ - NanoTokens, SignedSpend, SpendAddress, SpendReason, UniquePubkey, WalletError, WalletResult, - DEFAULT_NETWORK_ROYALTIES_PK, GENESIS_SPEND_UNIQUE_KEY, NETWORK_ROYALTIES_PK, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::mpsc::Sender; - -const SPENDS_PROCESSING_BUFFER_SIZE: usize = 4096; - -enum InternalGetNetworkSpend { - Spend(Box), - DoubleSpend(Vec), - NotFound, - Error(Error), -} - -impl Client { - pub async fn new_dag_with_genesis_only(&self) -> WalletResult { - let genesis_addr = SpendAddress::from_unique_pubkey(&GENESIS_SPEND_UNIQUE_KEY); - let mut dag = SpendDag::new(genesis_addr); - match self.get_spend_from_network(genesis_addr).await { - Ok(spend) => { - dag.insert(genesis_addr, spend); - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - println!("Burnt spend detected at Genesis: {genesis_addr:?}"); - warn!("Burnt spend detected at Genesis: {genesis_addr:?}"); - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!( - "burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants - ); - dag.insert(genesis_addr, spend); - } - } - Err(e) => return Err(WalletError::FailedToGetSpend(e.to_string())), - }; - - Ok(dag) - } - - /// Builds a SpendDag from a given SpendAddress recursively following descendants all the way to UTxOs - /// Started from Genesis this gives the entire SpendDag of the Network at a certain point in time - /// Once the DAG collected, optionally verifies and records errors in the DAG - /// - /// ```text - /// -> Spend7 ---> UTXO_11 - /// / - /// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 - /// \ - /// ---> Spend3 ---> Spend6 ---> UTXO_9 - /// \ - /// -> Spend4 ---> UTXO_8 - /// - /// ``` - pub async fn spend_dag_build_from( - &self, - spend_addr: SpendAddress, - spend_processing: Option>, - verify: bool, - ) -> WalletResult { - let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE); - - // start crawling from the given spend address - let self_clone = self.clone(); - let crawl_handle = - tokio::spawn(async move { self_clone.spend_dag_crawl_from(spend_addr, tx).await }); - - // start DAG building from the spends gathered while crawling - // forward spends to processing if provided - let build_handle: tokio::task::JoinHandle> = - tokio::spawn(async move { - debug!("Starting building DAG from {spend_addr:?}..."); - let now = std::time::Instant::now(); - let mut dag = SpendDag::new(spend_addr); - while let Some(spend) = rx.recv().await { - let addr = spend.address(); - debug!( - "Inserting spend at {addr:?} size: {}", - dag.all_spends().len() - ); - dag.insert(addr, spend.clone()); - if let Some(sender) = &spend_processing { - let outputs = spend.spend.descendants.len() as u64; - sender - .send((spend, outputs, false)) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - info!( - "Done gathering DAG of size: {} in {:?}", - dag.all_spends().len(), - now.elapsed() - ); - Ok(dag) - }); - - // wait for both to finish - let (crawl_res, build_res) = tokio::join!(crawl_handle, build_handle); - crawl_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join crawling results {e}")) - })??; - let mut dag = build_res.map_err(|e| { - WalletError::SpendProcessing(format!("Failed to Join DAG building results {e}")) - })??; - - // verify the DAG - if verify { - info!("Now verifying SpendDAG from {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG from {spend_addr:?} in {elapsed:?}"); - } - - Ok(dag) - } - - /// Get spends from a set of given SpendAddresses - /// Drain the addresses at the same layer first, then: - /// 1, return failed_utxos for re-attempt (with insertion time stamp) - /// 2, return fetched_address to avoid un-necessary re-attempts - /// 3, return addrs_for_further_track for further track - pub async fn crawl_to_next_utxos( - &self, - addrs_to_get: BTreeMap, - sender: Sender<(SignedSpend, u64, bool)>, - reattempt_seconds: u64, - ) -> ( - BTreeMap, - Vec, - BTreeSet<(SpendAddress, NanoTokens)>, - ) { - // max concurrency for the tasks of fetching records from network. - const MAX_CONCURRENT: usize = 64; - - let failed_utxos_arc: Arc> = Arc::new(DashMap::new()); - let addrs_for_further_track_arc: Arc> = Arc::new(DashMap::new()); - let fetched_addrs_arc: Arc> = Arc::new(DashMap::new()); - - stream::iter(addrs_to_get.into_iter()) - .map(|(addr, (failed_times, amount))| { - let client_clone = self.clone(); - let sender_clone = sender.clone(); - - let failed_utxos = Arc::clone(&failed_utxos_arc); - let addrs_for_further_track = Arc::clone(&addrs_for_further_track_arc); - let fetched_addrs = Arc::clone(&fetched_addrs_arc); - async move { - let result = client_clone.crawl_spend(addr).await; - - match result { - InternalGetNetworkSpend::Spend(spend) => { - let for_further_track = beta_track_analyze_spend(&spend); - let _ = sender_clone - .send((*spend, for_further_track.len() as u64, false)) - .await; - for entry in for_further_track { - let _ = addrs_for_further_track.insert(entry, ()); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - warn!( - "Detected burnt spend regarding {addr:?} - {:?}", - spends.len() - ); - - for (i, spend) in spends.into_iter().enumerate() { - let reason = spend.reason(); - let amount = spend.spend.amount(); - let ancestors_len = spend.spend.ancestors.len(); - let descendants_len = spend.spend.descendants.len(); - let roy_len = spend.spend.network_royalties().len(); - warn!("burnt spend entry {i} reason {reason:?}, amount {amount}, ancestors: {ancestors_len}, descendants: {descendants_len}, royalties: {roy_len}, {:?} - {:?}", - spend.spend.ancestors, spend.spend.descendants); - } - fetched_addrs.insert(addr, ()); - } - InternalGetNetworkSpend::NotFound => { - let reattempt_interval = if amount.as_nano() > 100000 { - info!("Not find spend of big-UTXO {addr:?} with {amount}"); - reattempt_seconds - } else { - reattempt_seconds * (failed_times * 8 + 1) - }; - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_interval), - amount, - ), - ); - } - InternalGetNetworkSpend::Error(e) => { - warn!("Fetching spend {addr:?} with {amount:?} result in error {e:?}"); - // Error of `NotEnoughCopies` could be re-attempted and succeed eventually. - failed_utxos.insert( - addr, - ( - failed_times + 1, - Instant::now() + Duration::from_secs(reattempt_seconds), - amount, - ), - ); - } - } - - (addr, amount) - } - }) - .buffer_unordered(MAX_CONCURRENT) - .for_each(|(address, amount)| async move { - info!("Completed fetching attempt of {address:?} with amount {amount:?}"); - }) - .await; - - let mut failed_utxos_result = BTreeMap::new(); - for entry in failed_utxos_arc.iter() { - let key = entry.key(); - let val = entry.value(); - let _ = failed_utxos_result.insert(*key, *val); - } - - let mut fetched_addrs = Vec::new(); - for entry in fetched_addrs_arc.iter() { - let key = entry.key(); - fetched_addrs.push(*key); - } - - let mut addrs_for_further_track = BTreeSet::new(); - for entry in addrs_for_further_track_arc.iter() { - let key = entry.key(); - let _ = addrs_for_further_track.insert(*key); - } - - (failed_utxos_result, fetched_addrs, addrs_for_further_track) - } - - /// Crawls the Spend Dag from a given SpendAddress recursively - /// following descendants all the way to UTXOs - /// Returns the UTXOs reached - pub async fn spend_dag_crawl_from( - &self, - spend_addr: SpendAddress, - spend_processing: Sender, - ) -> WalletResult> { - info!("Crawling spend DAG from {spend_addr:?}"); - let mut utxos = BTreeSet::new(); - - // get first spend - let mut descendants_to_follow = match self.crawl_spend(spend_addr).await { - InternalGetNetworkSpend::Spend(spend) => { - let spend = *spend; - let descendants_to_follow = spend.spend.descendants.clone(); - - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - descendants_to_follow - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - let mut descendants_to_follow = BTreeMap::new(); - for spend in spends.into_iter() { - descendants_to_follow.extend(spend.spend.descendants.clone()); - spend_processing - .send(spend) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - descendants_to_follow - } - InternalGetNetworkSpend::NotFound => { - // the cashnote was not spent yet, so it's an UTXO - info!("UTXO at {spend_addr:?}"); - utxos.insert(spend_addr); - return Ok(utxos); - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(e.to_string())); - } - }; - - // use iteration instead of recursion to avoid stack overflow - let mut known_descendants: BTreeSet = BTreeSet::new(); - let mut gen: u32 = 0; - let start = std::time::Instant::now(); - - while !descendants_to_follow.is_empty() { - let mut next_gen_descendants = BTreeMap::new(); - - // list up all descendants - let mut addrs = vec![]; - for (descendant, _amount) in descendants_to_follow.iter() { - let addrs_to_follow = SpendAddress::from_unique_pubkey(descendant); - info!("Gen {gen} - Following descendant : {descendant:?}"); - addrs.push(addrs_to_follow); - } - - // get all spends in parallel - let mut stream = futures::stream::iter(addrs.clone()) - .map(|a| async move { (self.crawl_spend(a).await, a) }) - .buffer_unordered(crate::MAX_CONCURRENT_TASKS); - info!( - "Gen {gen} - Getting {} spends from {} txs in batches of: {}", - addrs.len(), - descendants_to_follow.len(), - crate::MAX_CONCURRENT_TASKS, - ); - - // insert spends in the dag as they are collected - while let Some((get_spend, addr)) = stream.next().await { - match get_spend { - InternalGetNetworkSpend::Spend(spend) => { - next_gen_descendants.extend(spend.spend.descendants.clone()); - spend_processing - .send(*spend.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - InternalGetNetworkSpend::DoubleSpend(spends) => { - info!("Fetched double spend(s) of len {} at {addr:?} from network, following all of them.", spends.len()); - for s in spends.into_iter() { - next_gen_descendants.extend(s.spend.descendants.clone()); - spend_processing - .send(s.clone()) - .await - .map_err(|e| WalletError::SpendProcessing(e.to_string()))?; - } - } - InternalGetNetworkSpend::NotFound => { - info!("Reached UTXO at {addr:?}"); - utxos.insert(addr); - } - InternalGetNetworkSpend::Error(err) => { - error!("Failed to get spend at {addr:?} during DAG collection: {err:?}") - } - } - } - - // only follow descendants we haven't already gathered - let followed_descendants: BTreeSet = - descendants_to_follow.keys().copied().collect(); - known_descendants.extend(followed_descendants); - descendants_to_follow = next_gen_descendants - .into_iter() - .filter(|(key, _)| !known_descendants.contains(key)) - .collect(); - - // go on to next gen - gen += 1; - } - - let elapsed = start.elapsed(); - info!("Finished crawling SpendDAG from {spend_addr:?} in {elapsed:?}"); - Ok(utxos) - } - - /// Extends an existing SpendDag with a new SignedSpend, - /// tracing back the ancestors of that Spend all the way to a known Spend in the DAG or else back to Genesis - /// Verifies the DAG and records faults if any - /// This is useful to keep a partial SpendDag to be able to verify that new spends come from Genesis - /// - /// ```text - /// ... -- - /// \ - /// ... ---- ... -- - /// \ \ - /// Spend0 -> Spend1 -----> Spend2 ---> Spend5 ---> Spend2 ---> Genesis - /// \ / - /// ---> Spend3 ---> Spend6 -> - /// \ / - /// -> Spend4 -> - /// / - /// ... - /// - /// ``` - pub async fn spend_dag_extend_until( - &self, - dag: &mut SpendDag, - spend_addr: SpendAddress, - new_spend: SignedSpend, - ) -> WalletResult<()> { - // check existence of spend in dag - let is_new_spend = dag.insert(spend_addr, new_spend.clone()); - if !is_new_spend { - return Ok(()); - } - - // use iteration instead of recursion to avoid stack overflow - let mut ancestors_to_verify = new_spend.spend.ancestors.clone(); - let mut depth = 0; - let mut known_ancestors = BTreeSet::from_iter([dag.source()]); - let start = std::time::Instant::now(); - - while !ancestors_to_verify.is_empty() { - let mut next_gen_ancestors = BTreeSet::new(); - - for ancestor in ancestors_to_verify { - let addrs_to_verify = vec![SpendAddress::from_unique_pubkey(&ancestor)]; - debug!("Depth {depth} - checking parent : {ancestor:?} - {addrs_to_verify:?}"); - - // get all parent spends in parallel - let tasks: Vec<_> = addrs_to_verify - .iter() - .map(|a| self.crawl_spend(*a)) - .collect(); - let mut spends = BTreeSet::new(); - for (spend_get, a) in join_all(tasks) - .await - .into_iter() - .zip(addrs_to_verify.clone()) - { - match spend_get { - InternalGetNetworkSpend::Spend(s) => { - spends.insert(*s); - } - InternalGetNetworkSpend::DoubleSpend(s) => { - spends.extend(s.into_iter()); - } - InternalGetNetworkSpend::NotFound => { - return Err(WalletError::FailedToGetSpend(format!( - "Missing ancestor spend at {a:?}" - ))) - } - InternalGetNetworkSpend::Error(e) => { - return Err(WalletError::FailedToGetSpend(format!( - "Failed to get ancestor spend at {a:?}: {e}" - ))) - } - } - } - let spends_len = spends.len(); - debug!("Depth {depth} - Got {spends_len} spends for parent: {addrs_to_verify:?}"); - trace!("Spends for {addrs_to_verify:?} - {spends:?}"); - - // add spends to the dag - known_ancestors.extend(addrs_to_verify.clone()); - for (spend, addr) in spends.clone().into_iter().zip(addrs_to_verify) { - let is_new_spend = dag.insert(addr, spend.clone()); - - // no need to check this spend's parents if it was already in the DAG - if is_new_spend { - next_gen_ancestors.extend(spend.spend.ancestors.clone()); - } - } - } - - // only verify parents we haven't already verified - ancestors_to_verify = next_gen_ancestors - .into_iter() - .filter(|ancestor| { - !known_ancestors.contains(&SpendAddress::from_unique_pubkey(ancestor)) - }) - .collect(); - - depth += 1; - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Now at depth {depth} - Collected spends from {n} transactions in {elapsed:?}"); - } - - let elapsed = start.elapsed(); - let n = known_ancestors.len(); - info!("Collected the DAG branch all the way to known spends or genesis! Through {depth} generations, collecting spends from {n} transactions in {elapsed:?}"); - - // verify the DAG - info!("Now verifying SpendDAG extended at {spend_addr:?} and recording errors..."); - let start = std::time::Instant::now(); - if let Err(e) = dag.record_faults(&dag.source()) { - let s = format!( - "Collected DAG starting at {spend_addr:?} is invalid, this is probably a bug: {e}" - ); - error!("{s}"); - return Err(WalletError::Dag(s)); - } - let elapsed = start.elapsed(); - info!("Finished verifying SpendDAG extended from {spend_addr:?} in {elapsed:?}"); - Ok(()) - } - - /// Extends an existing SpendDag starting from the given utxos - /// If verify is true, records faults in the DAG - pub async fn spend_dag_continue_from( - &self, - dag: &mut SpendDag, - utxos: BTreeSet, - spend_processing: Option>, - verify: bool, - ) { - let main_dag_src = dag.source(); - info!( - "Expanding spend DAG with source: {main_dag_src:?} from {} utxos", - utxos.len() - ); - - let sender = spend_processing.clone(); - let tasks = utxos - .iter() - .map(|utxo| self.spend_dag_build_from(*utxo, sender.clone(), false)); - let sub_dags = join_all(tasks).await; - for (res, addr) in sub_dags.into_iter().zip(utxos.into_iter()) { - match res { - Ok(sub_dag) => { - debug!("Gathered sub DAG from: {addr:?}"); - if let Err(e) = dag.merge(sub_dag, verify) { - warn!("Failed to merge sub dag from {addr:?} into dag: {e}"); - } - } - Err(e) => warn!("Failed to gather sub dag from {addr:?}: {e}"), - }; - } - - info!("Done gathering spend DAG from utxos"); - } - - /// Extends an existing SpendDag starting from the utxos in this DAG - /// Covers the entirety of currently existing Spends if the DAG was built from Genesis - /// If verify is true, records faults in the DAG - /// Stops gathering after max_depth generations - pub async fn spend_dag_continue_from_utxos( - &self, - dag: &mut SpendDag, - spend_processing: Option>, - verify: bool, - ) { - let utxos = dag.get_utxos(); - self.spend_dag_continue_from(dag, utxos, spend_processing, verify) - .await - } - - /// Internal get spend helper for DAG purposes - /// For crawling, a special fetch policy is deployed to improve the performance: - /// 1. Expect `majority` copies as it is a `Spend`; - /// 2. But don't retry as most will be `UTXO` which won't be found. - async fn crawl_spend(&self, spend_addr: SpendAddress) -> InternalGetNetworkSpend { - match self.crawl_spend_from_network(spend_addr).await { - Ok(s) => { - debug!("DAG crawling: fetched spend {spend_addr:?} from network"); - InternalGetNetworkSpend::Spend(Box::new(s)) - } - Err(Error::Network(NetworkError::GetRecordError(GetRecordError::RecordNotFound))) => { - debug!("DAG crawling: spend at {spend_addr:?} not found on the network"); - InternalGetNetworkSpend::NotFound - } - Err(Error::Network(NetworkError::DoubleSpendAttempt(spends))) => { - debug!("DAG crawling: got a double spend(s) of len {} at {spend_addr:?} on the network", spends.len()); - InternalGetNetworkSpend::DoubleSpend(spends) - } - Err(e) => { - debug!( - "DAG crawling: got an error for spend at {spend_addr:?} on the network: {e}" - ); - InternalGetNetworkSpend::Error(e) - } - } - } -} - -/// Helper function to analyze spend for beta_tracking optimization. -/// returns the new_utxos that needs to be further tracked. -fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet<(SpendAddress, NanoTokens)> { - // Filter out royalty outputs - let royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - let default_royalty_pubkeys: BTreeSet<_> = spend - .spend - .network_royalties() - .iter() - .map(|(_, _, der)| DEFAULT_NETWORK_ROYALTIES_PK.new_unique_pubkey(der)) - .collect(); - - let spend_addr = spend.address(); - let new_utxos: BTreeSet<_> = spend - .spend - .descendants - .iter() - .filter_map(|(unique_pubkey, amount)| { - if default_royalty_pubkeys.contains(unique_pubkey) - || royalty_pubkeys.contains(unique_pubkey) - { - None - } else { - let addr = SpendAddress::from_unique_pubkey(unique_pubkey); - - if amount.as_nano() > 100000 { - info!("Spend {spend_addr:?} has a big-UTXO {addr:?} with {amount}"); - } - - Some((addr, *amount)) - } - }) - .collect(); - - if let SpendReason::BetaRewardTracking(_) = spend.reason() { - // Do not track down forwarded payment further - Default::default() - } else { - trace!( - "Spend {spend_addr:?} original has {} outputs, tracking {} of them.", - spend.spend.descendants.len(), - new_utxos.len() - ); - new_utxos - } -} diff --git a/sn_client/src/audit/dag_error.rs b/sn_client/src/audit/dag_error.rs deleted file mode 100644 index 6fb79953fd..0000000000 --- a/sn_client/src/audit/dag_error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::{Deserialize, Serialize}; -use sn_transfers::SpendAddress; -use thiserror::Error; - -/// Errors that mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum DagError { - #[error("DAG has no source at {0:?}")] - MissingSource(SpendAddress), - #[error("DAG is incoherent at {0:?}: {1}")] - IncoherentDag(SpendAddress, String), - #[error("DAG with root {0:?} contains a cycle")] - DagContainsCycle(SpendAddress), -} - -/// List of possible faults that can be found in the DAG during verification -/// This indicates a certain spend is invalid and the reason for it -/// but does not mean the DAG is invalid -#[derive(Error, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord)] -pub enum SpendFault { - #[error("Double Spend at {0:?}")] - DoubleSpend(SpendAddress), - #[error("Spend at {addr:?} has a missing ancestor at {ancestor:?}, until this ancestor is added to the DAG, it cannot be verified")] - MissingAncestry { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error( - "Spend at {addr:?} has a double spent ancestor at {ancestor:?}, making it unspendable" - )] - DoubleSpentAncestor { - addr: SpendAddress, - ancestor: SpendAddress, - }, - #[error("Invalid transaction for spend at {0:?}: {1}")] - InvalidTransaction(SpendAddress, String), - #[error("Poisoned ancestry for spend at {0:?}: {1}")] - PoisonedAncestry(SpendAddress, String), - #[error("Spend at {addr:?} does not descend from given source: {src:?}")] - OrphanSpend { - addr: SpendAddress, - src: SpendAddress, - }, -} - -impl DagError { - pub fn spend_address(&self) -> SpendAddress { - match self { - DagError::MissingSource(addr) - | DagError::IncoherentDag(addr, _) - | DagError::DagContainsCycle(addr) => *addr, - } - } -} - -impl SpendFault { - pub fn spend_address(&self) -> SpendAddress { - match self { - SpendFault::DoubleSpend(addr) - | SpendFault::MissingAncestry { addr, .. } - | SpendFault::DoubleSpentAncestor { addr, .. } - | SpendFault::InvalidTransaction(addr, _) - | SpendFault::PoisonedAncestry(addr, _) - | SpendFault::OrphanSpend { addr, .. } => *addr, - } - } -} diff --git a/sn_client/src/audit/spend_dag.rs b/sn_client/src/audit/spend_dag.rs deleted file mode 100644 index fbf00bd947..0000000000 --- a/sn_client/src/audit/spend_dag.rs +++ /dev/null @@ -1,831 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use bls::SecretKey; -use petgraph::dot::Dot; -use petgraph::graph::{DiGraph, NodeIndex}; -use petgraph::visit::EdgeRef; -use serde::{Deserialize, Serialize}; -use sn_transfers::{ - is_genesis_spend, CashNoteRedemption, DerivationIndex, Hash, NanoTokens, SignedSpend, - SpendAddress, UniquePubkey, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - path::Path, -}; - -use super::dag_error::{DagError, SpendFault}; - -/// A DAG representing the spends from a specific Spend all the way to the UTXOs. -/// Starting from Genesis, this would encompass all the spends that have happened on the network -/// at a certain point in time. -/// -/// ```text -/// -> Spend7 ---> UTXO_11 -/// / -/// Genesis -> Spend1 -----> Spend2 ---> Spend5 ---> UTXO_10 -/// \ -/// ---> Spend3 ---> Spend6 ---> UTXO_9 -/// \ -/// -> Spend4 ---> UTXO_8 -/// -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SpendDag { - /// A directed graph of spend addresses - dag: DiGraph, - /// All the spends refered to in the dag indexed by their SpendAddress - spends: BTreeMap, - /// The source of the DAG (aka Genesis) - source: SpendAddress, - /// Recorded faults in the DAG - faults: BTreeMap>, -} - -type DagIndex = usize; - -/// Internal Dag entry type -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -enum DagEntry { - NotGatheredYet(DagIndex), - DoubleSpend(Vec<(SignedSpend, DagIndex)>), - Spend(Box, DagIndex), -} - -impl DagEntry { - fn indexes(&self) -> Vec { - match self { - DagEntry::NotGatheredYet(idx) => vec![*idx], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(_, idx)| *idx).collect(), - DagEntry::Spend(_, idx) => vec![*idx], - } - } - - fn spends(&self) -> Vec<&SignedSpend> { - match self { - DagEntry::Spend(spend, _) => vec![&**spend], - DagEntry::DoubleSpend(spends) => spends.iter().map(|(s, _)| s).collect(), - DagEntry::NotGatheredYet(_) => vec![], - } - } -} - -/// The result of a get operation on the DAG -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] -pub enum SpendDagGet { - /// Spend does not exist in the DAG - SpendNotFound, - /// Spend key is refered to by known spends but does not exist in the DAG yet - Utxo, - /// Spend is a double spend - DoubleSpend(Vec), - /// Spend is in the DAG - Spend(Box), -} - -impl SpendDag { - /// Create a new DAG with a given source - pub fn new(source: SpendAddress) -> Self { - Self { - dag: DiGraph::new(), - spends: BTreeMap::new(), - source, - faults: BTreeMap::new(), - } - } - - pub fn source(&self) -> SpendAddress { - self.source - } - - pub fn load_from_file>(path: P) -> crate::Result { - let bytes = std::fs::read(path)?; - let dag: SpendDag = rmp_serde::from_slice(&bytes)?; - Ok(dag) - } - - pub fn dump_to_file>(&self, path: P) -> crate::Result<()> { - let bytes = rmp_serde::to_vec(&self)?; - std::fs::write(path, bytes)?; - Ok(()) - } - - /// Insert a spend into the dag - /// Creating edges (links) from its ancestors and to its descendants - /// If the inserted spend is already known, it will be ignored - /// If the inserted spend is a double spend, it will be saved along with the previous spend - /// Return true if the spend was inserted and false if it was already in the DAG - pub fn insert(&mut self, spend_addr: SpendAddress, spend: SignedSpend) -> bool { - let existing_entry = self.spends.get(&spend_addr).cloned(); - let new_node_idx = match existing_entry { - // add new spend to the DAG - None => { - let node_idx = self.dag.add_node(spend_addr); - self.spends.insert( - spend_addr, - DagEntry::Spend(Box::new(spend.clone()), node_idx.index()), - ); - node_idx - } - // or upgrade a known but not gathered entry to spend - Some(DagEntry::NotGatheredYet(idx)) => { - self.spends - .insert(spend_addr, DagEntry::Spend(Box::new(spend.clone()), idx)); - let node_idx = NodeIndex::new(idx); - self.remove_all_edges(node_idx); - node_idx - } - // or upgrade spend to double spend if it is different from the existing one - Some(DagEntry::Spend(s, idx)) => { - let existing_spend = *s.clone(); - if existing_spend == spend { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let double_spend = DagEntry::DoubleSpend(vec![ - (existing_spend.clone(), idx), - (spend.clone(), node_idx.index()), - ]); - self.spends.insert(spend_addr, double_spend); - node_idx - } - // or add extra spend to an existing double spend if it is unknown yet - Some(DagEntry::DoubleSpend(vec_s)) => { - if vec_s.iter().any(|(s, _idx)| s == &spend) { - return false; - } - - let node_idx = self.dag.add_node(spend_addr); - let mut vec_s = vec_s.clone(); - vec_s.push((spend.clone(), node_idx.index())); - self.spends.insert(spend_addr, DagEntry::DoubleSpend(vec_s)); - node_idx - } - }; - - // link to descendants - for (descendant, amount) in spend.spend.descendants.iter() { - let descendant_addr = SpendAddress::from_unique_pubkey(descendant); - - // add descendant if not already in dag - let spends_at_addr = self.spends.entry(descendant_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(descendant_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to descendant - for idx in spends_at_addr.indexes() { - let descendant_idx = NodeIndex::new(idx); - self.dag.update_edge(new_node_idx, descendant_idx, *amount); - } - } - - // do not link to ancestors if the spend is the source - if spend_addr == self.source { - return true; - } - - // link to ancestors - const PENDING_AMOUNT: NanoTokens = NanoTokens::from(0); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - - // add ancestor if not already in dag - let spends_at_addr = self.spends.entry(ancestor_addr).or_insert_with(|| { - let node_idx = self.dag.add_node(ancestor_addr); - DagEntry::NotGatheredYet(node_idx.index()) - }); - - // link to ancestor - match spends_at_addr { - DagEntry::NotGatheredYet(idx) => { - let ancestor_idx = NodeIndex::new(*idx); - self.dag - .update_edge(ancestor_idx, new_node_idx, PENDING_AMOUNT); - } - DagEntry::Spend(ancestor_spend, idx) => { - let ancestor_idx = NodeIndex::new(*idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| **descendant == spend.spend.unique_pubkey) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - DagEntry::DoubleSpend(multiple_ancestors) => { - for (ancestor_spend, ancestor_idx) in multiple_ancestors { - if ancestor_spend - .spend - .descendants - .contains_key(spend.unique_pubkey()) - { - let ancestor_idx = NodeIndex::new(*ancestor_idx); - let ancestor_given_amount = ancestor_spend - .spend - .descendants - .iter() - .find(|(descendant, _amount)| { - **descendant == spend.spend.unique_pubkey - }) - .map(|(_descendant, amount)| *amount) - .unwrap_or(PENDING_AMOUNT); - self.dag - .update_edge(ancestor_idx, new_node_idx, ancestor_given_amount); - } - } - } - } - } - - true - } - - /// Get spend addresses that probably exist as they are refered to by spends we know, - /// but we haven't gathered them yet - /// This includes UTXOs and unknown ancestors - pub fn get_pending_spends(&self) -> BTreeSet { - self.spends - .iter() - .filter_map(|(addr, entry)| match entry { - DagEntry::NotGatheredYet(_) => Some(*addr), - _ => None, - }) - .collect() - } - - /// Get the UTXOs: all the addresses that are refered to as children by other spends - /// but that don't have children themselves. - /// Those will eventually exist on the Network as the address is spent by their owners. - pub fn get_utxos(&self) -> BTreeSet { - let mut leaves = BTreeSet::new(); - for node_index in self.dag.node_indices() { - if !self - .dag - .neighbors_directed(node_index, petgraph::Direction::Outgoing) - .any(|_| true) - { - let utxo_addr = self.dag[node_index]; - leaves.insert(utxo_addr); - } - } - leaves - } - - pub fn dump_dot_format(&self) -> String { - format!("{:?}", Dot::with_config(&self.dag, &[])) - } - - pub fn dump_payment_forward_statistics(&self, sk: &SecretKey) -> String { - let mut statistics: BTreeMap> = Default::default(); - - let mut hash_dictionary: BTreeMap = Default::default(); - - // The following three is used in the memcheck test script. - // Update whenever these three got changed in the script. - let bootstrap_string = "bootstrap".to_string(); - let restart_string = "restart".to_string(); - let restarted_string = "restarted".to_string(); - let _ = hash_dictionary.insert(Hash::hash(bootstrap_string.as_bytes()), bootstrap_string); - let _ = hash_dictionary.insert(Hash::hash(restart_string.as_bytes()), restart_string); - let _ = hash_dictionary.insert(Hash::hash(restarted_string.as_bytes()), restarted_string); - for i in 0..50 { - let node_string = format!("node_{i}"); - let _ = hash_dictionary.insert(Hash::hash(node_string.as_bytes()), node_string); - } - - for spend_dag_entry in self.spends.values() { - if let DagEntry::Spend(signed_spend, _) = spend_dag_entry { - if let Some(sender_hash) = signed_spend.spend.reason.decrypt_discord_cypher(sk) { - let sender = if let Some(readable_sender) = hash_dictionary.get(&sender_hash) { - readable_sender.clone() - } else { - format!("{sender_hash:?}") - }; - let holders = statistics.entry(sender).or_default(); - holders.push(signed_spend.spend.amount()); - } - } - } - - let mut content = "Sender, Times, Amount".to_string(); - for (sender, payments) in statistics.iter() { - let total_amount: u64 = payments - .iter() - .map(|nano_tokens| nano_tokens.as_nano()) - .sum(); - content = format!("{content}\n{sender}, {}, {total_amount}", payments.len()); - } - content - } - - /// Merges the given dag into ours, optionally recomputing the faults after merge - /// If verify is set to false, the faults will not be computed, this can be useful when batching merges to avoid re-verifying - /// be sure to manually verify afterwards - pub fn merge(&mut self, sub_dag: SpendDag, verify: bool) -> Result<(), DagError> { - let source = self.source(); - info!( - "Merging sub DAG starting at {:?} into our DAG with source {:?}", - sub_dag.source(), - source - ); - for (addr, spends) in sub_dag.spends { - // only add spends to the dag, ignoring utxos and not yet gathered relatives - // utxos will be added automatically as their ancestors are added - // edges are updated by the insert method - match spends { - DagEntry::NotGatheredYet(_) => continue, - DagEntry::DoubleSpend(spends) => { - for (spend, _) in spends { - self.insert(addr, spend); - } - } - DagEntry::Spend(spend, _) => { - self.insert(addr, *spend); - } - } - } - - // recompute faults - if verify { - self.record_faults(&source)?; - } - - Ok(()) - } - - /// Get the spend at a given address - pub fn get_spend(&self, addr: &SpendAddress) -> SpendDagGet { - match self.spends.get(addr) { - None => SpendDagGet::SpendNotFound, - Some(DagEntry::NotGatheredYet(_)) => SpendDagGet::Utxo, - Some(DagEntry::DoubleSpend(spends)) => { - SpendDagGet::DoubleSpend(spends.iter().map(|(s, _)| s.clone()).collect()) - } - Some(DagEntry::Spend(spend, _)) => SpendDagGet::Spend(spend.clone()), - } - } - - /// Get the recorded faults if any for a given spend address - pub fn get_spend_faults(&self, addr: &SpendAddress) -> BTreeSet { - self.faults.get(addr).cloned().unwrap_or_default() - } - - /// Helper to get underlying index of spend entry in the DAG - /// This unstable API is used to access the underlying graph for testing purposes - /// An empty vec is returned if the spend is not in the DAG - pub fn get_spend_indexes(&self, addr: &SpendAddress) -> Vec { - self.spends - .get(addr) - .map(|spends| spends.indexes()) - .unwrap_or_default() - } - - /// Get all spends from the DAG - pub fn all_spends(&self) -> Vec<&SignedSpend> { - self.spends - .values() - .flat_map(|entry| entry.spends()) - .collect() - } - - /// Get the faults recorded in the DAG - pub fn faults(&self) -> &BTreeMap> { - &self.faults - } - - /// Get all royalties from the DAG - pub fn all_royalties(&self) -> crate::Result> { - let spends = self.all_spends(); - let mut royalties_by_unique_pk: BTreeMap< - UniquePubkey, - Vec<(DerivationIndex, SpendAddress)>, - > = BTreeMap::new(); - for s in spends { - let parent_spend_addr = SpendAddress::from_unique_pubkey(&s.spend.unique_pubkey); - for (roy_pk, _, derivation_idx) in s.spend.network_royalties() { - royalties_by_unique_pk - .entry(roy_pk) - .and_modify(|v| v.push((derivation_idx, parent_spend_addr))) - .or_insert(vec![(derivation_idx, parent_spend_addr)]); - } - } - - // assemble those and check - let mut royalties = vec![]; - for (unique_pk, vec) in royalties_by_unique_pk.into_iter() { - let parents_spend_addrs = vec.iter().map(|(_di, spend_addr)| *spend_addr).collect(); - let derivation_idx_uniq: BTreeSet<_> = - vec.iter().map(|(di, _spend_addr)| *di).collect(); - let idx_vec: Vec<_> = derivation_idx_uniq.into_iter().collect(); - let derivation_index = match idx_vec.as_slice() { - [one_unique] => *one_unique, - _ => { - warn!("DerivationIndex in single royalty output for {unique_pk:?} should have been unique, found parents and reported derivation index {vec:?}"); - continue; - } - }; - royalties.push(CashNoteRedemption::new( - derivation_index, - parents_spend_addrs, - )) - } - Ok(royalties) - } - - /// Remove all edges from a Node in the DAG - fn remove_all_edges(&mut self, node: NodeIndex) { - let incoming: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Incoming) - .map(|e| e.id()) - .collect(); - let outgoing: Vec<_> = self - .dag - .edges_directed(node, petgraph::Direction::Outgoing) - .map(|e| e.id()) - .collect(); - for edge in incoming.into_iter().chain(outgoing.into_iter()) { - self.dag.remove_edge(edge); - } - } - - /// helper that returns the direct ancestors of a given spend - /// along with any faults detected - /// On error returns the address of the missing ancestor - fn get_direct_ancestors( - &self, - spend: &SignedSpend, - ) -> Result<(BTreeSet, BTreeSet), SpendAddress> { - let addr = spend.address(); - let mut ancestors = BTreeSet::new(); - let mut faults = BTreeSet::new(); - for ancestor in spend.spend.ancestors.iter() { - let ancestor_addr = SpendAddress::from_unique_pubkey(ancestor); - match self.spends.get(&ancestor_addr) { - Some(DagEntry::Spend(ancestor_spend, _)) => { - ancestors.insert(*ancestor_spend.clone()); - } - Some(DagEntry::NotGatheredYet(_)) | None => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is missing"); - return Err(ancestor_addr); - } - Some(DagEntry::DoubleSpend(multiple_ancestors)) => { - debug!("Direct ancestor for spend {spend:?} at {ancestor_addr:?} is a double spend"); - faults.insert(SpendFault::DoubleSpentAncestor { - addr, - ancestor: ancestor_addr, - }); - let actual_ancestor: Vec<_> = multiple_ancestors - .iter() - .filter(|(s, _)| s.spend.descendants.contains_key(spend.unique_pubkey())) - .map(|(s, _)| s.clone()) - .collect(); - match actual_ancestor.as_slice() { - [ancestor_spend] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend but one of those match our parent_tx hash, using it for verification"); - ancestors.insert(ancestor_spend.clone()); - } - [ancestor1, _ancestor2, ..] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and mutliple match our parent_tx hash, using the first one for verification"); - ancestors.insert(ancestor1.clone()); - } - [] => { - warn!("Direct ancestor of {spend:?} at {ancestor_addr:?} is a double spend and none of them match the spend parent_tx, which means the parent for this spend is missing!"); - return Err(ancestor_addr); - } - } - } - } - } - Ok((ancestors, faults)) - } - - /// helper that returns all the descendants (recursively all the way to UTXOs) of a given spend - fn all_descendants(&self, addr: &SpendAddress) -> Result, DagError> { - let mut descendants = BTreeSet::new(); - let mut to_traverse = BTreeSet::from_iter(vec![addr]); - while let Some(current_addr) = to_traverse.pop_first() { - // get the spend at this address - let dag_entry = match self.spends.get(current_addr) { - Some(entry) => entry, - None => { - warn!("Incoherent DAG, missing descendant spend when expecting one at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("Missing descendant spend in DAG at: {current_addr:?}"), - )); - } - }; - let (spends, indexes) = (dag_entry.spends(), dag_entry.indexes()); - - // get descendants via spend - let descendants_via_spend: BTreeSet = spends - .into_iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - - // get descendants via DAG - let descendants_via_dag: BTreeSet<&SpendAddress> = indexes - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Outgoing) - .map(|i| &self.dag[i]) - }) - .collect(); - - // report inconsistencies - if descendants_via_dag != descendants_via_spend.iter().collect() { - if matches!(dag_entry, DagEntry::NotGatheredYet(_)) { - debug!("Spend at {current_addr:?} was not gathered yet and has children refering to it, continuing traversal through those children..."); - } else { - warn!("Incoherent DAG at: {current_addr:?}"); - return Err(DagError::IncoherentDag( - *current_addr, - format!("descendants via DAG: {descendants_via_dag:?} do not match descendants via spend: {descendants_via_spend:?}") - )); - } - } - - // continue traversal - let not_transversed = descendants_via_dag.difference(&descendants); - to_traverse.extend(not_transversed); - descendants.extend(descendants_via_dag.iter().cloned()); - } - Ok(descendants) - } - - /// find all the orphans in the DAG and record them as OrphanSpend - /// returns the list of OrphanSpend and other errors encountered in the way - fn find_orphans(&self, source: &SpendAddress) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let all_addresses: BTreeSet<&SpendAddress> = self.spends.keys().collect(); - let all_descendants = self.all_descendants(source)?; - let parents: BTreeSet<_> = self - .get_spend_indexes(source) - .into_iter() - .flat_map(|idx| { - self.dag - .neighbors_directed(NodeIndex::new(idx), petgraph::Direction::Incoming) - }) - .map(|parent_idx| &self.dag[parent_idx]) - .collect(); - let non_orphans = - BTreeSet::from_iter(all_descendants.into_iter().chain(parents).chain([source])); - - // orphans are those that are neither descandants nor source's parents nor source itself - let orphans: BTreeSet<&SpendAddress> = - all_addresses.difference(&non_orphans).cloned().collect(); - for orphan in orphans { - let src = *source; - let addr = *orphan; - debug!("Found orphan: {orphan:?} of {src:?}"); - recorded_faults.insert(SpendFault::OrphanSpend { addr, src }); - } - - Ok(recorded_faults) - } - - /// Checks if a double spend has multiple living descendant branches that fork - fn double_spend_has_forking_descendant_branches(&self, spends: &Vec<&SignedSpend>) -> bool { - // gather all living descendants for each branch - let mut set_of_living_descendants: BTreeSet> = BTreeSet::new(); - for spend in spends { - let gathered_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .filter_map(|a| self.spends.get(&a)) - .filter_map(|s| { - if matches!(s, DagEntry::NotGatheredYet(_)) { - None - } else { - Some(s.spends()) - } - }) - .flatten() - .collect::>(); - set_of_living_descendants.insert(gathered_descendants); - } - - // make sure there is no fork - for set1 in set_of_living_descendants.iter() { - for set2 in set_of_living_descendants.iter() { - if set1.is_subset(set2) || set2.is_subset(set1) { - continue; - } else { - return true; - } - } - } - - false - } - - /// Verify the DAG and record faults in the DAG - /// If the DAG is invalid, return an error immediately, without mutating the DAG - pub fn record_faults(&mut self, source: &SpendAddress) -> Result<(), DagError> { - let faults = self.verify(source)?; - - self.faults.clear(); - for f in faults { - self.faults.entry(f.spend_address()).or_default().insert(f); - } - Ok(()) - } - - /// Verify the DAG and return faults detected in the DAG - /// If the DAG itself is invalid, return an error immediately - pub fn verify(&self, source: &SpendAddress) -> Result, DagError> { - info!("Verifying DAG starting off: {source:?}"); - let mut recorded_faults = BTreeSet::new(); - - // verify the DAG is acyclic - if petgraph::algo::is_cyclic_directed(&self.dag) { - warn!("DAG is cyclic"); - return Err(DagError::DagContainsCycle(*source)); - } - - // verify DAG source exists in the DAG (Genesis in case of a complete DAG) - debug!("Verifying DAG source: {source:?}"); - match self.spends.get(source) { - None => { - debug!("DAG does not contain its source: {source:?}"); - return Err(DagError::MissingSource(*source)); - } - Some(DagEntry::DoubleSpend(_)) => { - debug!("DAG source is a double spend: {source:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*source)); - } - _ => (), - } - - // identify orphans (spends that don't come from the source) - debug!("Looking for orphans of {source:?}"); - recorded_faults.extend(self.find_orphans(source)?); - - // check all transactions - for (addr, _) in self.spends.iter() { - debug!("Verifying transaction at: {addr:?}"); - // get the spend at this address - let spends = self - .spends - .get(addr) - .map(|s| s.spends()) - .unwrap_or_default(); - - // record double spends - if spends.len() > 1 { - debug!("Found a double spend entry in DAG at {addr:?}"); - recorded_faults.insert(SpendFault::DoubleSpend(*addr)); - let direct_descendants: BTreeSet = spends - .iter() - .flat_map(|s| s.spend.descendants.keys()) - .map(SpendAddress::from_unique_pubkey) - .collect(); - debug!("Making the direct descendants of the double spend at {addr:?} as faulty: {direct_descendants:?}"); - for a in direct_descendants.iter() { - recorded_faults.insert(SpendFault::DoubleSpentAncestor { - addr: *a, - ancestor: *addr, - }); - } - if self.double_spend_has_forking_descendant_branches(&spends) { - debug!("Double spend at {addr:?} has multiple living descendant branches, poisoning them..."); - let poison = format!( - "spend is on one of multiple branches of a double spent ancestor: {addr:?}" - ); - let direct_living_descendant_spends: BTreeSet<_> = direct_descendants - .iter() - .filter_map(|a| self.spends.get(a)) - .flat_map(|s| s.spends()) - .collect(); - for s in direct_living_descendant_spends { - recorded_faults.extend(self.poison_all_descendants(s, poison.clone())?); - } - } - continue; - } - - // skip parent verification for source as we don't know its ancestors - if addr == source { - debug!("Skip parent verification for source at: {addr:?}"); - continue; - } - - // verify parents - for s in spends { - recorded_faults.extend(self.verify_spend_parents(s)?); - } - } - - info!( - "Found {} faults: {recorded_faults:#?}", - recorded_faults.len() - ); - Ok(recorded_faults) - } - - /// Verifies a single spend and returns resulting errors and DAG poisoning spread - fn verify_spend_parents(&self, spend: &SignedSpend) -> Result, DagError> { - let addr = spend.address(); - let mut recorded_faults = BTreeSet::new(); - debug!("Verifying spend: {spend:?}"); - - // skip if spend matches genesis - if is_genesis_spend(spend) { - debug!("Skip transaction verification for Genesis: {spend:?}"); - return Ok(recorded_faults); - } - - // get the ancestors of this spend - let (ancestor_spends, faults) = match self.get_direct_ancestors(spend) { - Ok(a) => a, - Err(missing_ancestor) => { - debug!("Failed to get ancestor spends of {spend:?} as ancestor at {missing_ancestor:?} is missing"); - recorded_faults.insert(SpendFault::MissingAncestry { - addr, - ancestor: missing_ancestor, - }); - - let poison = format!("missing ancestor at: {missing_ancestor:?}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - return Ok(recorded_faults); - } - }; - recorded_faults.extend(faults); - - // verify the parents - if let Err(e) = spend.verify_parent_spends(&ancestor_spends) { - warn!("Parent verfication failed for spend at: {spend:?}: {e}"); - recorded_faults.insert(SpendFault::InvalidTransaction(addr, format!("{e}"))); - let poison = format!("ancestor transaction was poisoned at: {spend:?}: {e}"); - let descendants_faults = self.poison_all_descendants(spend, poison)?; - recorded_faults.extend(descendants_faults); - } - - Ok(recorded_faults) - } - - /// Poison all descendants of a spend with given the poison message - fn poison_all_descendants( - &self, - spend: &SignedSpend, - poison: String, - ) -> Result, DagError> { - let mut recorded_faults = BTreeSet::new(); - let direct_descendants = spend - .spend - .descendants - .keys() - .map(SpendAddress::from_unique_pubkey) - .collect::>(); - let mut all_descendants = direct_descendants - .iter() - .map(|addr| self.all_descendants(addr)) - .collect::, _>>()? - .into_iter() - .flatten() - .collect::>(); - all_descendants.extend(direct_descendants.iter()); - - for d in all_descendants { - recorded_faults.insert(SpendFault::PoisonedAncestry(*d, poison.clone())); - } - - Ok(recorded_faults) - } -} - -#[cfg(test)] -mod tests { - use xor_name::XorName; - - use super::*; - - #[test] - fn test_spend_dag_serialisation() { - let mut rng = rand::thread_rng(); - let dummy_source = SpendAddress::new(XorName::random(&mut rng)); - let dag = SpendDag::new(dummy_source); - let serialized_data = rmp_serde::to_vec(&dag).expect("Serialization failed"); - let deserialized_instance: SpendDag = - rmp_serde::from_slice(&serialized_data).expect("Deserialization failed"); - let reserialized_data = - rmp_serde::to_vec(&deserialized_instance).expect("Serialization failed"); - assert_eq!(reserialized_data, serialized_data); - } -} diff --git a/sn_client/src/audit/tests/mod.rs b/sn_client/src/audit/tests/mod.rs deleted file mode 100644 index d00e4b1055..0000000000 --- a/sn_client/src/audit/tests/mod.rs +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use std::collections::BTreeSet; - -use setup::MockNetwork; - -use eyre::Result; -use sn_transfers::SpendAddress; - -use crate::{SpendDag, SpendFault}; - -#[test] -fn test_spend_dag_verify_valid_simple() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - net.send(&owner3, &owner4, 100)?; - net.send(&owner4, &owner5, 100)?; - net.send(&owner5, &owner6, 100)?; - - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_verify_valid_simple")?; - - assert_eq!(dag.verify(&genesis), Ok(BTreeSet::new())); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_poisonning() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner_cheat = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend1 = net.send(&owner2, &owner3, 100)?; - let spend_ko3 = net.send(&owner3, &owner4, 100)?; - let spend_ok4 = net.send(&owner4, &owner5, 100)?; - let spend_ok5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend far back in history - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2 = net.send(&owner2, &owner_cheat, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_poisonning")?; - - // make sure double spend is detected - assert_eq!(spend1, spend2, "both spends should be at the same address"); - let double_spent = spend1.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are unspendable - let upk = net - .wallets - .get(&owner_cheat) - .expect("owner_cheat wallet to exist") - .cn - .first() - .expect("owner_cheat wallet to have 1 cashnote") - .unique_pubkey(); - let utxo = SpendAddress::from_unique_pubkey(&upk); - let got = dag.get_spend_faults(&utxo); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "UTXO of double spend should be unspendable"); - let s3 = spend_ko3.first().expect("spend_ko3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend_ko3 should be unspendable"); - - // make sure this didn't poison the rest of the DAG - let s4 = spend_ok4.first().expect("spend_ok4 to be unique"); - let s5 = spend_ok5.first().expect("spend_ok5 to be unique"); - let unaffected = BTreeSet::new(); - - assert_eq!(dag.get_spend_faults(s4), unaffected); - assert_eq!(dag.get_spend_faults(s5), unaffected); - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_branches() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - let owner3a = net.new_pk_with_balance(0)?; - let owner4a = net.new_pk_with_balance(0)?; - let owner5a = net.new_pk_with_balance(0)?; - - // spend normaly and save a cashnote to reuse later - net.send(&owner1, &owner2, 100)?; - let cn_to_reuse_later = net - .wallets - .get(&owner2) - .expect("owner2 wallet to exist") - .cn - .clone(); - let spend2 = net.send(&owner2, &owner3, 100)?; - let spend3 = net.send(&owner3, &owner4, 100)?; - let spend4 = net.send(&owner4, &owner5, 100)?; - let spend5 = net.send(&owner5, &owner6, 100)?; - - // reuse that cashnote to perform a double spend and create a branch - net.wallets - .get_mut(&owner2) - .expect("owner2 wallet to still exist") - .cn = cn_to_reuse_later; - let spend2a = net.send(&owner2, &owner3a, 100)?; - let spend3a = net.send(&owner3a, &owner4a, 100)?; - let spend4a = net.send(&owner4a, &owner5a, 100)?; - - // create dag - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - println!("Adding into dag with spend {spend:?}"); - dag.insert(spend.address(), spend.clone()); - } - - assert_eq!(dag.record_faults(&genesis), Ok(())); - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_branches")?; - - // make sure double spend is detected - assert_eq!(spend2, spend2a, "both spends should be at the same address"); - let double_spent = spend2.first().expect("spend1 to have an element"); - let got = dag.get_spend_faults(double_spent); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!(got, expected, "DAG should have detected double spend"); - - // make sure the double spend's direct descendants are marked as double spent - let s3 = spend3.first().expect("spend3 to have an element"); - let got = dag.get_spend_faults(s3); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3 should be unspendable"); - let s3a = spend3a.first().expect("spend3a to have an element"); - let got = dag.get_spend_faults(s3a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: *s3a, - ancestor: *double_spent, - }]); - assert_eq!(got, expected, "spend3a should be unspendable"); - - // make sure all the descendants further down the branch are poisoned due to a double spent ancestor - let utxo_of_5a = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner5a) - .expect("owner5a wallet to exist") - .cn - .first() - .expect("owner5a wallet to have 1 cashnote") - .unique_pubkey(), - ); - let utxo_of_6 = SpendAddress::from_unique_pubkey( - &net.wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(), - ); - let all_descendants = [spend4, spend5, vec![utxo_of_6], spend4a, vec![utxo_of_5a]]; - for d in all_descendants.iter() { - let got = dag.get_spend_faults(d.first().expect("descendant spend to have an element")); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - *d.first().expect("d to have an element"), - format!( - "spend is on one of multiple branches of a double spent ancestor: {double_spent:?}" - ), - )]); - assert_eq!(got, expected, "all descendants should be marked as bad"); - } - Ok(()) -} - -#[test] -fn test_spend_dag_double_spend_detection() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2a = net.new_pk_with_balance(0)?; - let owner2b = net.new_pk_with_balance(0)?; - - // perform double spend - let cn_to_reuse = net - .wallets - .get(&owner1) - .expect("owner1 wallet to exist") - .cn - .clone(); - let spend1_addr = net.send(&owner1, &owner2a, 100)?; - net.wallets - .get_mut(&owner1) - .expect("owner1 wallet to still exist") - .cn = cn_to_reuse; - let spend2_addr = net.send(&owner1, &owner2b, 100)?; - - // get the UTXOs of the two spends - let upk_of_2a = net - .wallets - .get(&owner2a) - .expect("owner2a wallet to exist") - .cn - .first() - .expect("owner2a wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2a = SpendAddress::from_unique_pubkey(&upk_of_2a); - let upk_of_2b = net - .wallets - .get(&owner2b) - .expect("owner2b wallet to exist") - .cn - .first() - .expect("owner2b wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_of_2b = SpendAddress::from_unique_pubkey(&upk_of_2b); - - // make DAG - let mut dag = SpendDag::new(genesis); - for spend in net.spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_double_spend_detection")?; - - // make sure the double spend is detected - assert_eq!( - spend1_addr, spend2_addr, - "both spends should be at the same address" - ); - assert_eq!(spend1_addr.len(), 1, "there should only be one spend"); - let double_spent = spend1_addr.first().expect("spend1_addr to have an element"); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpend(*double_spent)]); - assert_eq!( - dag.get_spend_faults(double_spent), - expected, - "DAG should have detected double spend" - ); - - // make sure the UTXOs of the double spend are unspendable - let got = dag.get_spend_faults(&utxo_of_2a); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2a, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO a of double spend should be unspendable" - ); - - let got = dag.get_spend_faults(&utxo_of_2b); - let expected = BTreeSet::from_iter([SpendFault::DoubleSpentAncestor { - addr: utxo_of_2b, - ancestor: *double_spent, - }]); - assert_eq!( - got, expected, - "UTXO b of double spend should be unspendable" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_missing_ancestry() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let spent_after2 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after2 should have 1 element") - .to_owned(); - let utxo_after3 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after3); - - // create dag with one missing spend - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_missing_ancestry")?; - - // make sure the missing spend makes its descendants invalid - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing, - }]); - assert_eq!(got, expected, "DAG should have detected missing ancestry"); - - let got = dag.get_spend_faults(&spent_after2); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - spent_after2, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error to descendants" - ); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = BTreeSet::from_iter([SpendFault::PoisonedAncestry( - utxo_addr, - format!("missing ancestor at: {spend_missing:?}"), - )]); - assert_eq!( - got, expected, - "DAG should have propagated the error all the way to descendant utxos" - ); - Ok(()) -} - -#[test] -fn test_spend_dag_orphans() -> Result<()> { - let mut net = MockNetwork::genesis()?; - let genesis = net.genesis_spend; - - let owner1 = net.new_pk_with_balance(100)?; - let owner2 = net.new_pk_with_balance(0)?; - let owner3 = net.new_pk_with_balance(0)?; - let owner4 = net.new_pk_with_balance(0)?; - let owner5 = net.new_pk_with_balance(0)?; - let owner6 = net.new_pk_with_balance(0)?; - - net.send(&owner1, &owner2, 100)?; - net.send(&owner2, &owner3, 100)?; - let spend_missing1 = net - .send(&owner3, &owner4, 100)? - .first() - .expect("spend_missing should have 1 element") - .to_owned(); - let spend_missing2 = net - .send(&owner4, &owner5, 100)? - .first() - .expect("spend_missing2 should have 1 element") - .to_owned(); - let spent_after1 = net - .send(&owner5, &owner6, 100)? - .first() - .expect("spent_after1 should have 1 element") - .to_owned(); - let utxo_after2 = net - .wallets - .get(&owner6) - .expect("owner6 wallet to exist") - .cn - .first() - .expect("owner6 wallet to have 1 cashnote") - .unique_pubkey(); - let utxo_addr = SpendAddress::from_unique_pubkey(&utxo_after2); - - // create dag with two missing spends in the chain - let net_spends = net - .spends - .into_iter() - .filter(|s| spend_missing1 != s.address() && spend_missing2 != s.address()); - let mut dag = SpendDag::new(genesis); - for spend in net_spends { - dag.insert(spend.address(), spend.clone()); - } - dag.record_faults(&genesis)?; - // dag.dump_to_file("/tmp/test_spend_dag_orphans")?; - - // make sure the spends after the two missing spends are orphans - let got = dag.get_spend_faults(&spent_after1); - let expected = BTreeSet::from_iter([ - SpendFault::OrphanSpend { - addr: spent_after1, - src: dag.source(), - }, - SpendFault::MissingAncestry { - addr: spent_after1, - ancestor: spend_missing2, - }, - ]); - assert_eq!(got, expected, "DAG should have detected orphan spend"); - - let got = dag.get_spend_faults(&utxo_addr); - let expected = SpendFault::OrphanSpend { - addr: utxo_addr, - src: dag.source(), - }; - assert!( - got.contains(&expected), - "Utxo of orphan spend should also be an orphan" - ); - Ok(()) -} diff --git a/sn_client/src/audit/tests/setup.rs b/sn_client/src/audit/tests/setup.rs deleted file mode 100644 index 4fa777ff22..0000000000 --- a/sn_client/src/audit/tests/setup.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use std::collections::{BTreeMap, BTreeSet}; - -use bls::SecretKey; -use eyre::{eyre, Result}; -use sn_transfers::{ - get_genesis_sk, CashNote, DerivationIndex, MainPubkey, MainSecretKey, NanoTokens, SignedSpend, - SignedTransaction, SpendAddress, SpendReason, GENESIS_CASHNOTE, -}; - -pub struct MockWallet { - pub sk: MainSecretKey, - pub cn: Vec, -} - -pub struct MockNetwork { - pub genesis_spend: SpendAddress, - pub spends: BTreeSet, - pub wallets: BTreeMap, -} - -impl MockNetwork { - pub fn genesis() -> Result { - let mut net = MockNetwork { - genesis_spend: SpendAddress::from_unique_pubkey(&GENESIS_CASHNOTE.unique_pubkey()), - spends: BTreeSet::new(), - wallets: BTreeMap::new(), - }; - - // create genesis wallet - let genesis_cn = GENESIS_CASHNOTE.clone(); - let genesis_pk = *GENESIS_CASHNOTE.main_pubkey(); - net.wallets.insert( - genesis_pk, - MockWallet { - sk: get_genesis_sk(), - cn: vec![genesis_cn], - }, - ); - - // spend genesis - let everything = GENESIS_CASHNOTE.value().as_nano(); - let spent_addrs = net - .send(&genesis_pk, &genesis_pk, everything) - .map_err(|e| eyre!("failed to send genesis: {e}"))?; - net.genesis_spend = match spent_addrs.as_slice() { - [one] => *one, - _ => { - return Err(eyre!( - "Expected Genesis spend to be unique but got {spent_addrs:?}" - )) - } - }; - - Ok(net) - } - - pub fn new_pk_with_balance(&mut self, balance: u64) -> Result { - let owner = MainSecretKey::new(SecretKey::random()); - let owner_pk = owner.main_pubkey(); - self.wallets.insert( - owner_pk, - MockWallet { - sk: owner, - cn: Vec::new(), - }, - ); - - if balance > 0 { - let genesis_pk = GENESIS_CASHNOTE.main_pubkey(); - println!("Sending {balance} from genesis {genesis_pk:?} to {owner_pk:?}"); - self.send(genesis_pk, &owner_pk, balance) - .map_err(|e| eyre!("failed to get money from genesis: {e}"))?; - } - Ok(owner_pk) - } - - pub fn send( - &mut self, - from: &MainPubkey, - to: &MainPubkey, - amount: u64, - ) -> Result> { - let mut rng = rand::thread_rng(); - let from_wallet = self - .wallets - .get(from) - .ok_or_else(|| eyre!("from wallet not found: {from:?}"))?; - let to_wallet = self - .wallets - .get(to) - .ok_or_else(|| eyre!("to wallet not found: {to:?}"))?; - - // perform offline transfer - let derivation_index = DerivationIndex::random(&mut rng); - let recipient = vec![( - NanoTokens::from(amount), - to_wallet.sk.main_pubkey(), - derivation_index, - false, - )]; - let tx = SignedTransaction::new( - from_wallet.cn.clone(), - recipient, - from_wallet.sk.main_pubkey(), - SpendReason::default(), - &from_wallet.sk, - ) - .map_err(|e| eyre!("failed to create transfer: {}", e))?; - let spends = tx.spends; - - // update wallets - let mut updated_from_wallet_cns = from_wallet.cn.clone(); - updated_from_wallet_cns.retain(|cn| { - !spends - .iter() - .any(|s| s.unique_pubkey() == &cn.unique_pubkey()) - }); - if let Some(ref change_cn) = tx.change_cashnote { - if !updated_from_wallet_cns - .iter() - .any(|cn| cn.unique_pubkey() == change_cn.unique_pubkey()) - { - updated_from_wallet_cns.extend(tx.change_cashnote); - } - } - - self.wallets - .entry(*from) - .and_modify(|w| w.cn = updated_from_wallet_cns); - self.wallets - .entry(*to) - .and_modify(|w| w.cn.extend(tx.output_cashnotes)); - - // update network spends - let spent_addrs = spends.iter().map(|s| s.address()).collect(); - self.spends.extend(spends); - Ok(spent_addrs) - } -} diff --git a/sn_client/src/chunks.rs b/sn_client/src/chunks.rs deleted file mode 100644 index 7dbcaef92b..0000000000 --- a/sn_client/src/chunks.rs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod error; -mod pac_man; - -pub(crate) use self::error::{Error, Result}; -pub(crate) use pac_man::{encrypt_large, DataMapLevel}; diff --git a/sn_client/src/chunks/error.rs b/sn_client/src/chunks/error.rs deleted file mode 100644 index 6f9c83474e..0000000000 --- a/sn_client/src/chunks/error.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_protocol::PrettyPrintRecordKey; -use std::io; -use thiserror::Error; -use xor_name::XorName; - -pub(crate) type Result = std::result::Result; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Failed to get find payment for record: {0:?}")] - NoPaymentForRecord(PrettyPrintRecordKey<'static>), - - #[error("Failed to get chunk permit")] - CouldNotGetChunkPermit, - - #[error(transparent)] - SelfEncryption(#[from] self_encryption::Error), - - #[error(transparent)] - Io(#[from] io::Error), - - #[error(transparent)] - Serialisation(#[from] rmp_serde::encode::Error), - - #[error(transparent)] - Deserialisation(#[from] rmp_serde::decode::Error), - - #[error("Cannot store empty file.")] - EmptyFileProvided, - - #[error("File is too small to be encrypted, it is less than {MIN_ENCRYPTABLE_BYTES} bytes")] - FileTooSmall, - - #[error( - "The provided bytes ({size}) is too large to store as a `SmallFile` which maximum can be \ - {maximum}. Store as a LargeFile instead." - )] - TooLargeAsSmallFile { - /// Number of bytes - size: usize, - /// Maximum number of bytes for a `SmallFile` - maximum: usize, - }, - - #[error("Not all chunks were retrieved, expected {expected}, retrieved {retrieved}, missing {missing_chunks:?}.")] - NotEnoughChunksRetrieved { - /// Number of Chunks expected to be retrieved - expected: usize, - /// Number of Chunks retrieved - retrieved: usize, - /// Missing chunks - missing_chunks: Vec, - }, - - #[error("Chunk could not be retrieved from the network: {0:?}")] - ChunkMissing(XorName), - - #[error("Not all data was chunked, expected {expected}, but we have {chunked}.)")] - NotAllDataWasChunked { - /// Number of Chunks expected to be generated - expected: usize, - /// Number of Chunks generated - chunked: usize, - }, -} diff --git a/sn_client/src/chunks/pac_man.rs b/sn_client/src/chunks/pac_man.rs deleted file mode 100644 index 3cd368e320..0000000000 --- a/sn_client/src/chunks/pac_man.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::Result; -use bytes::{BufMut, Bytes, BytesMut}; -use rayon::prelude::*; -use self_encryption::{DataMap, StreamSelfEncryptor, MAX_CHUNK_SIZE}; -use serde::{Deserialize, Serialize}; -use sn_protocol::storage::Chunk; -use std::{ - fs::File, - io::Write, - path::{Path, PathBuf}, -}; -use xor_name::XorName; - -#[derive(Serialize, Deserialize)] -pub(crate) enum DataMapLevel { - // Holds the data map to the source data. - First(DataMap), - // Holds the data map of an _additional_ level of chunks - // resulting from chunking up a previous level data map. - // This happens when that previous level data map was too big to fit in a chunk itself. - Additional(DataMap), -} - -#[expect(unused)] -pub(crate) fn encrypt_from_path(path: &Path, output_dir: &Path) -> Result<(Chunk, Vec)> { - let (data_map, mut encrypted_chunks) = self_encryption::encrypt_from_file(path, output_dir)?; - - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - - for chunk in additional_chunks.iter() { - encrypted_chunks.push(*chunk.name()); - let file_path = output_dir.join(hex::encode(chunk.name())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn encrypt_large( - file_path: &Path, - output_dir: &Path, -) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - let mut encryptor = StreamSelfEncryptor::encrypt_from_file( - file_path.to_path_buf(), - Some(output_dir.to_path_buf()), - )?; - - let data_map; - loop { - match encryptor.next_encryption()? { - (None, Some(m)) => { - // Returning a data_map means file encryption is completed. - data_map = m; - break; - } - _ => continue, - } - } - let mut encrypted_chunks: Vec<_> = data_map - .infos() - .iter() - .map(|chunk_info| { - let chunk_file_path = output_dir.join(hex::encode(chunk_info.dst_hash)); - (chunk_info.dst_hash, chunk_file_path) - }) - .collect(); - - // Pack the datamap into chunks that under the same output folder as well. - let (data_map_chunk, additional_chunks) = pack_data_map(data_map)?; - for chunk in additional_chunks.iter() { - let file_path = output_dir.join(hex::encode(chunk.name())); - encrypted_chunks.push((*chunk.name(), file_path.to_path_buf())); - let mut output_file = File::create(file_path)?; - output_file.write_all(&chunk.value)?; - } - - Ok((data_map_chunk, encrypted_chunks)) -} - -pub(crate) fn to_chunk(chunk_content: Bytes) -> Chunk { - Chunk::new(chunk_content) -} - -// Produces a chunk out of the first `DataMap`, which is validated for its size. -// If the chunk is too big, it is self-encrypted and the resulting (additional level) `DataMap` is put into a chunk. -// The above step is repeated as many times as required until the chunk size is valid. -// In other words: If the chunk content is too big, it will be -// self encrypted into additional chunks, and now we have a new `DataMap` -// which points to all of those additional chunks.. and so on. -fn pack_data_map(data_map: DataMap) -> Result<(Chunk, Vec)> { - let mut chunks = vec![]; - let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?; - debug!("Max chunk size: {} bytes", *MAX_CHUNK_SIZE); - - let (data_map_chunk, additional_chunks) = loop { - let chunk = to_chunk(chunk_content); - // If datamap chunk is less than or equal to MAX_CHUNK_SIZE return it so it can be directly sent to the network. - if chunk.serialised_size() <= *MAX_CHUNK_SIZE { - chunks.reverse(); - // Returns the last datamap, and all the chunks produced. - break (chunk, chunks); - } else { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - chunk.serialize(&mut serialiser)?; - let serialized_chunk = bytes.into_inner().freeze(); - - let (data_map, next_encrypted_chunks) = self_encryption::encrypt(serialized_chunk)?; - chunks = next_encrypted_chunks - .par_iter() - .map(|c| to_chunk(c.content.clone())) // no need to encrypt what is self-encrypted - .chain(chunks) - .collect(); - chunk_content = wrap_data_map(&DataMapLevel::Additional(data_map))?; - } - }; - - Ok((data_map_chunk, additional_chunks)) -} - -fn wrap_data_map(data_map: &DataMapLevel) -> Result { - // we use an initial/starting size of 300 bytes as that's roughly the current size of a DataMapLevel instance. - let mut bytes = BytesMut::with_capacity(300).writer(); - let mut serialiser = rmp_serde::Serializer::new(&mut bytes); - data_map.serialize(&mut serialiser)?; - Ok(bytes.into_inner().freeze()) -} diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs deleted file mode 100644 index d19ce4d58d..0000000000 --- a/sn_client/src/error.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) type Result = std::result::Result; - -use crate::UploadSummary; - -use super::ClientEvent; -use sn_protocol::NetworkAddress; -use sn_registers::{Entry, EntryHash}; -use sn_transfers::SpendAddress; -use std::collections::BTreeSet; -use thiserror::Error; -use tokio::time::Duration; -use xor_name::XorName; - -/// Internal error. -#[derive(Debug, Error)] -pub enum Error { - #[error("Genesis disbursement failed")] - GenesisDisbursement, - #[error("Faucet disbursement has already occured")] - FaucetDisbursement, - - #[error("Genesis error {0}")] - GenesisError(#[from] sn_transfers::GenesisError), - - #[error("Wallet Error {0}.")] - Wallet(#[from] sn_transfers::WalletError), - - #[error("Transfer Error {0}.")] - Transfer(#[from] sn_transfers::TransferError), - - #[error("Network Error {0}.")] - Network(#[from] sn_networking::NetworkError), - - #[error("Protocol error {0}.")] - Protocol(#[from] sn_protocol::error::Error), - - #[error("Register error {0}.")] - Register(#[from] sn_registers::Error), - - #[error("Chunks error {0}.")] - Chunks(#[from] super::chunks::Error), - - #[error("No cashnote found at {0:?}.")] - NoCashNoteFound(SpendAddress), - - #[error("Decrypting a Folder's item failed: {0}")] - FolderEntryDecryption(EntryHash), - - #[error("SelfEncryption Error {0}.")] - SelfEncryptionIO(#[from] self_encryption::Error), - - #[error("System IO Error {0}.")] - SystemIO(#[from] std::io::Error), - - #[error("Events receiver error {0}.")] - EventsReceiver(#[from] tokio::sync::broadcast::error::RecvError), - - #[error("Events sender error {0}.")] - EventsSender(#[from] tokio::sync::broadcast::error::SendError), - - #[error(transparent)] - JoinError(#[from] tokio::task::JoinError), - - #[error("Invalid DAG")] - InvalidDag, - #[error("Serialization error: {0:?}")] - Serialization(#[from] rmp_serde::encode::Error), - #[error("Deserialization error: {0:?}")] - Deserialization(#[from] rmp_serde::decode::Error), - - #[error( - "Content branches detected in the Register which need to be merged/resolved by user. \ - Entries hashes of branches are: {0:?}" - )] - ContentBranchDetected(BTreeSet<(EntryHash, Entry)>), - - #[error("The provided amount contains zero nanos")] - AmountIsZero, - - #[error("The payee for the address {0:?} was not found.")] - PayeeNotFound(NetworkAddress), - - /// CashNote add would overflow - #[error("Total price exceed possible token amount")] - TotalPriceTooHigh, - - #[error("Could not connect to the network in {0:?}")] - ConnectionTimeout(Duration), - - #[error("Could not send files event")] - CouldNotSendFilesEvent, - - #[error("Incorrect Download Option")] - IncorrectDownloadOption, - - #[error("The provided data map is empty")] - EmptyDataMap, - - #[error("Error occurred while assembling the downloaded chunks")] - FailedToAssembleDownloadedChunks, - - #[error("Task completion notification channel is done")] - FailedToReadFromNotificationChannel, - - #[error("Could not find register after batch sync: {0:?}")] - RegisterNotFoundAfterUpload(XorName), - - #[error("Could not connect due to incompatible network protocols. Our protocol: {0} Network protocol: {1}")] - UnsupportedProtocol(String, String), - - // ------ Upload Errors -------- - #[error("Overflow occurred while adding values")] - NumericOverflow, - - #[error("Uploadable item not found: {0:?}")] - UploadableItemNotFound(XorName), - - #[error("Invalid upload item found")] - InvalidUploadItemFound, - - #[error("The state tracked by the uploader is empty")] - UploadStateTrackerIsEmpty, - - #[error("Internal task channel dropped")] - InternalTaskChannelDropped, - - #[error("Multiple consecutive network errors reported during upload")] - SequentialNetworkErrors, - - #[error("Too many sequential payment errors reported during upload")] - SequentialUploadPaymentError, - - #[error("The maximum specified repayments has been reached for a single item: {0:?}")] - MaximumRepaymentsReached(XorName), - - #[error("The upload failed with maximum repayments reached for multiple items: {items:?} Summary: {summary:?}")] - UploadFailedWithMaximumRepaymentsReached { - items: Vec, - summary: UploadSummary, - }, - - #[error("Error occurred when access wallet file")] - FailedToAccessWallet, - - #[error("Error parsing entropy for mnemonic phrase")] - FailedToParseEntropy, - - #[error("Error parsing mnemonic phrase")] - FailedToParseMnemonic, - - #[error("Invalid mnemonic seed phrase")] - InvalidMnemonicSeedPhrase, - - #[error("SecretKey could not be created from the provided bytes")] - InvalidKeyBytes, -} diff --git a/sn_client/src/event.rs b/sn_client/src/event.rs deleted file mode 100644 index 14ba654d0f..0000000000 --- a/sn_client/src/event.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use serde::Serialize; -use tokio::sync::broadcast::{self, error::RecvError}; - -// Channel where events will be broadcasted by the client. -#[derive(Clone, Debug)] -pub struct ClientEventsBroadcaster(broadcast::Sender); - -impl Default for ClientEventsBroadcaster { - fn default() -> Self { - Self(broadcast::channel(100).0) - } -} - -impl ClientEventsBroadcaster { - /// Returns a new receiver to listen to the channel. - /// Multiple receivers can be actively listening. - pub fn subscribe(&self) -> ClientEventsReceiver { - ClientEventsReceiver(self.0.subscribe()) - } - - // Broadcast a new event, meant to be a helper only used by the client's internals. - pub(crate) fn broadcast(&self, event: ClientEvent) { - if let Err(err) = self.0.send(event) { - if self.0.receiver_count() == 0 { - return; - } - trace!("Could not broadcast ClientEvent, though we do have listeners: {err:?}"); - } - } -} - -/// Type of events broadcasted by the client to the public API. -#[derive(Clone, custom_debug::Debug, Serialize)] -pub enum ClientEvent { - /// A peer has been added to the Routing table. - /// Also contains the max number of peers to connect to before we receive ClientEvent::ConnectedToNetwork - PeerAdded { max_peers_to_connect: usize }, - /// We've encountered a Peer with an unsupported protocol. - PeerWithUnsupportedProtocol { - our_protocol: String, - their_protocol: String, - }, - /// The client has been connected to the network - ConnectedToNetwork, - /// No network activity has been received for a given duration - /// we should error out - InactiveClient(tokio::time::Duration), -} - -/// Receiver Channel where users of the public API can listen to events broadcasted by the client. -#[derive(Debug)] -pub struct ClientEventsReceiver(pub(super) broadcast::Receiver); - -impl ClientEventsReceiver { - /// Receive a new event, meant to be used by the user of the public API. - pub async fn recv(&mut self) -> std::result::Result { - self.0.recv().await - } -} diff --git a/sn_client/src/faucet.rs b/sn_client/src/faucet.rs deleted file mode 100644 index b3ccaace78..0000000000 --- a/sn_client/src/faucet.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::send, Client, Error, Result}; -use sn_transfers::{load_genesis_wallet, HotWallet, NanoTokens, FOUNDATION_PK}; - -const INITIAL_FAUCET_BALANCE: NanoTokens = NanoTokens::from(900000000000000000); - -/// Use the client to load the faucet wallet from the genesis Wallet. -/// With all balance transferred from the genesis_wallet to the faucet_wallet. -pub async fn fund_faucet_from_genesis_wallet( - client: &Client, - faucet_wallet: &mut HotWallet, -) -> Result<()> { - faucet_wallet.try_load_cash_notes()?; - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - println!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - debug!( - "Faucet wallet existing balance: {}", - faucet_wallet.balance() - ); - - return Ok(()); - } - - info!("funding faucet from genesis..."); - - // Confirm Genesis not used yet - if client.is_genesis_spend_present().await { - warn!("Faucet can't get funded from genesis, genesis is already spent!"); - println!("Faucet can't get funded from genesis, genesis is already spent!"); - // Try loading cash notes up to 100 times, waiting 1 second between attempts - for attempt in 1..=100 { - println!("Attempt {attempt} to load cash notes"); - debug!("Attempt {attempt} to load cash notes"); - faucet_wallet.try_load_cash_notes()?; - if !faucet_wallet.balance().is_zero() { - println!("Successfully loaded cash notes on attempt {attempt}"); - debug!("Successfully loaded cash notes on attempt {attempt}"); - return Ok(()); - } - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - } - - // If we've tried 100 times and still have zero balance, return an error - return Err(Error::FaucetDisbursement); - } - - println!("Initiating genesis..."); - debug!("Initiating genesis..."); - let genesis_wallet = load_genesis_wallet()?; - let genesis_balance = genesis_wallet.balance(); - - let (foundation_cashnote, faucet_cashnote) = { - println!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - debug!("Sending {INITIAL_FAUCET_BALANCE} from genesis to faucet wallet.."); - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - let faucet_cashnote = send( - genesis_wallet, - INITIAL_FAUCET_BALANCE, - faucet_wallet.address(), - client, - true, - ) - .await?; - - faucet_wallet - .deposit_and_store_to_disk(&vec![faucet_cashnote.clone()]) - .expect("Faucet wallet shall be stored successfully."); - - // now send the money to the foundation - let foundation_balance = genesis_balance - .checked_sub(INITIAL_FAUCET_BALANCE) - .ok_or(Error::GenesisDisbursement)?; - - println!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - debug!("Sending {foundation_balance:?} from genesis to foundation wallet.."); - - let genesis_wallet = load_genesis_wallet()?; - - let foundation_cashnote = send( - genesis_wallet, - foundation_balance, - *FOUNDATION_PK, - client, - true, - ) - .await?; - - (foundation_cashnote, faucet_cashnote) - }; - - println!("Faucet wallet balance: {}", faucet_wallet.balance()); - debug!("Faucet wallet balance: {}", faucet_wallet.balance()); - - println!("Verifying the transfer from genesis..."); - debug!("Verifying the transfer from genesis..."); - if let Err(error) = client.verify_cashnote(&foundation_cashnote).await { - error!("Could not verify the transfer from genesis to foundation: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to foundation: {error}"); - } else { - println!( - "Successfully verified the transfer from genesis to foundation on the second try." - ); - - #[cfg(not(target_arch = "wasm32"))] - { - // write the foundation cashnote to disk - let root_dir = faucet_wallet.api().wallet_dir(); - - let foundation_transfer_path = root_dir.join("foundation_disbursement.transfer"); - - debug!("Writing cash note to: {foundation_transfer_path:?}"); - - let transfer = - sn_transfers::Transfer::transfer_from_cash_note(&foundation_cashnote)?.to_hex()?; - - if let Err(error) = std::fs::write(foundation_transfer_path, transfer) { - error!("Could not write the foundation transfer to disk: {error}."); - return Err(Error::from(error)); - } - } - - info!("Successfully verified the transfer from genesis to foundation on the second try."); - } - - if let Err(error) = client.verify_cashnote(&faucet_cashnote).await { - error!("Could not verify the transfer from genesis to faucet: {error}. Panicking."); - panic!("Could not verify the transfer from genesis to faucet: {error}"); - } else { - println!("Successfully verified the transfer from genesis to faucet on the second try."); - info!("Successfully verified the transfer from genesis to faucet on the second try."); - } - - Ok(()) -} diff --git a/sn_client/src/files.rs b/sn_client/src/files.rs deleted file mode 100644 index 8643b71961..0000000000 --- a/sn_client/src/files.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -pub(crate) mod download; - -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, chunks::Error as ChunksError, - error::Result, wallet::StoragePaymentResult, Client, Error, WalletClient, -}; -use bytes::Bytes; -use self_encryption::{self, MIN_ENCRYPTABLE_BYTES}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; - -use std::{ - fs::{self, create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, -}; -use tempfile::tempdir; -use tracing::trace; -use xor_name::XorName; - -/// `BATCH_SIZE` determines the number of chunks that are processed in parallel during the payment and upload process. -pub const BATCH_SIZE: usize = 16; - -/// File APIs. -#[derive(Clone)] -pub struct FilesApi { - pub(crate) client: Client, - pub(crate) wallet_dir: PathBuf, -} - -/// This is the (file xorname, datamap_data, filesize, and chunks) -/// If the DataMapChunk exists and is not stored on the network, then it will not be accessible at this address of ChunkAddress(XorName) . -type ChunkFileResult = Result<(ChunkAddress, Chunk, u64, Vec<(XorName, PathBuf)>)>; - -impl FilesApi { - /// Create file apis instance. - pub fn new(client: Client, wallet_dir: PathBuf) -> Self { - Self { client, wallet_dir } - } - pub fn build(client: Client, wallet_dir: PathBuf) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&wallet_dir, None)?; - - if wallet.balance().is_zero() { - Err(Error::AmountIsZero) - } else { - Ok(FilesApi::new(client, wallet_dir)) - } - } - - /// Return the client instance - pub fn client(&self) -> &Client { - &self.client - } - - /// Create a new WalletClient for a given root directory. - pub fn wallet(&self) -> Result { - let path = self.wallet_dir.as_path(); - - let wallet = load_account_wallet_or_create_with_mnemonic(path, None)?; - - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Tries to chunk the file, returning `(head_address, data_map_chunk, file_size, chunk_names)` - /// and writes encrypted chunks to disk. - pub fn chunk_file( - file_path: &Path, - chunk_dir: &Path, - include_data_map_in_chunks: bool, - ) -> ChunkFileResult { - let file = File::open(file_path)?; - let metadata = file.metadata()?; - let file_size = metadata.len(); - - let (head_address, data_map_chunk, mut chunks_paths) = - if file_size < MIN_ENCRYPTABLE_BYTES as u64 { - Err(ChunksError::FileTooSmall)? - } else { - let (data_map_chunk, chunks) = encrypt_large(file_path, chunk_dir)?; - (*data_map_chunk.name(), data_map_chunk, chunks) - }; - - debug!("include_data_map_in_chunks {include_data_map_in_chunks:?}"); - - if include_data_map_in_chunks { - info!("Data_map_chunk to be written!"); - let data_map_path = chunk_dir.join(hex::encode(*data_map_chunk.name())); - - trace!("Data_map_chunk being written to {data_map_path:?}"); - let mut output_file = File::create(data_map_path.clone())?; - output_file.write_all(&data_map_chunk.value)?; - - chunks_paths.push((*data_map_chunk.name(), data_map_path)) - } - - Ok(( - ChunkAddress::new(head_address), - data_map_chunk, - file_size, - chunks_paths, - )) - } - - /// Directly writes Chunks to the network in the - /// form of immutable self encrypted chunks. - /// - /// * 'retry_strategy' - [Option]<[RetryStrategy]> : Uses Quick by default - pub async fn get_local_payment_and_upload_chunk( - &self, - chunk: Chunk, - verify_store: bool, - retry_strategy: Option, - ) -> Result<()> { - let chunk_addr = chunk.network_address(); - trace!("Client upload started for chunk: {chunk_addr:?}"); - - let wallet_client = self.wallet()?; - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&chunk_addr)?; - - debug!("Payments for chunk: {chunk_addr:?} to {payee:?}: {payment:?}"); - - self.client - .store_chunk(chunk, payee, payment, verify_store, retry_strategy) - .await?; - - wallet_client.remove_payment_for_addr(&chunk_addr)?; - - trace!("Client upload completed for chunk: {chunk_addr:?}"); - Ok(()) - } - - /// Pay for a given set of chunks. - /// - /// Returns the cost and the resulting new balance of the local wallet. - pub async fn pay_for_chunks(&self, chunks: Vec) -> Result { - let mut wallet_client = self.wallet()?; - info!("Paying for and uploading {:?} chunks", chunks.len()); - - let res = wallet_client - .pay_for_storage( - chunks - .iter() - .map(|name| NetworkAddress::ChunkAddress(ChunkAddress::new(*name))), - ) - .await?; - - wallet_client.store_local_wallet()?; - Ok(res) - } - - // -------------------------------------------- - // ---------- Private helpers ----------------- - // -------------------------------------------- - - /// Used for testing - pub async fn upload_test_bytes(&self, bytes: Bytes, verify: bool) -> Result { - let temp_dir = tempdir()?; - let file_path = temp_dir.path().join("tempfile"); - let mut file = File::create(&file_path)?; - file.write_all(&bytes)?; - - let chunk_path = temp_dir.path().join("chunk_path"); - create_dir_all(chunk_path.clone())?; - - let (head_address, _data_map, _file_size, chunks_paths) = - Self::chunk_file(&file_path, &chunk_path, true)?; - - for (_chunk_name, chunk_path) in chunks_paths { - let chunk = Chunk::new(Bytes::from(fs::read(chunk_path)?)); - self.get_local_payment_and_upload_chunk(chunk, verify, None) - .await?; - } - - Ok(NetworkAddress::ChunkAddress(head_address)) - } -} - -/// Encrypts a [`LargeFile`] and returns the resulting address and all chunk names. -/// Correspondent encrypted chunks are written in the specified output folder. -/// Does not store anything to the network. -/// -/// Returns data map as a chunk, and the resulting chunks -fn encrypt_large(file_path: &Path, output_dir: &Path) -> Result<(Chunk, Vec<(XorName, PathBuf)>)> { - Ok(crate::chunks::encrypt_large(file_path, output_dir)?) -} diff --git a/sn_client/src/files/download.rs b/sn_client/src/files/download.rs deleted file mode 100644 index 4444fab023..0000000000 --- a/sn_client/src/files/download.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - chunks::{DataMapLevel, Error as ChunksError}, - error::{Error as ClientError, Result}, - Client, FilesApi, BATCH_SIZE, -}; -use bytes::Bytes; -use futures::StreamExt; -use itertools::Itertools; -use self_encryption::{decrypt_full_set, DataMap, EncryptedChunk, StreamSelfDecryptor}; -use sn_networking::target_arch::Instant; -use sn_protocol::storage::{Chunk, ChunkAddress, RetryStrategy}; - -use std::{collections::HashMap, fs, path::PathBuf}; -use tokio::sync::mpsc::{self}; -use xor_name::XorName; - -/// The events emitted from the download process. -pub enum FilesDownloadEvent { - /// Downloaded a Chunk from the network - Downloaded(ChunkAddress), - /// The total number of chunks we are about to download. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - ChunksCount(usize), - /// The total number of data map chunks that we are about to download. This happens if the datamap file is. - /// very large. - /// Note: This count currently is not accurate. It does not take into account how we fetch the initial head chunk. - DatamapCount(usize), - /// The download process has terminated with an error. - Error, -} - -// Internally used to differentiate between the various ways that the downloaded chunks are returned. -enum DownloadReturnType { - EncryptedChunks(Vec), - DecryptedBytes(Bytes), - WrittenToFileSystem, -} - -/// `FilesDownload` provides functionality for downloading chunks with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -pub struct FilesDownload { - // Configurations - batch_size: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - // API - api: FilesApi, - // Events - event_sender: Option>, - logged_event_sender_absence: bool, -} - -impl FilesDownload { - /// Creates a new instance of `FilesDownload` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - pub fn new(files_api: FilesApi) -> Self { - Self { - batch_size: BATCH_SIZE, - show_holders: false, - retry_strategy: RetryStrategy::Quick, - api: files_api, - event_sender: None, - logged_event_sender_absence: false, - } - } - - /// Sets the default batch size that determines the number of chunks that are downloaded in parallel - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 64`. - pub fn set_batch_size(mut self, batch_size: usize) -> Self { - self.batch_size = batch_size; - self - } - - /// Sets the option to display the holders that are expected to be holding a chunk during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(mut self, show_holders: bool) -> Self { - self.show_holders = show_holders; - self - } - - /// Sets the RetryStrategy to increase the re-try on failure attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(mut self, retry_strategy: RetryStrategy) -> Self { - self.retry_strategy = retry_strategy; - self - } - - /// Returns a receiver for file download events. - /// This method is optional and the download process can be performed without it. - pub fn get_events(&mut self) -> mpsc::Receiver { - let (event_sender, event_receiver) = mpsc::channel(10); - // should we return error if an sender is already set? - self.event_sender = Some(event_sender); - - event_receiver - } - - /// Download bytes from the network. The contents are spread across - /// multiple chunks in the network. This function invokes the self-encryptor and returns - /// the data that was initially stored. - /// - /// Takes `position` and `length` arguments which specify the start position - /// and the length of bytes to be read. - /// Passing `0` to position reads the data from the beginning, - /// and the `length` is just an upper limit. - pub async fn download_from( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self.download_from_inner(address, position, length).await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - pub async fn download_from_inner( - &mut self, - address: ChunkAddress, - position: usize, - length: usize, - ) -> Result { - debug!("Reading {length} bytes at: {address:?}, starting from position: {position}"); - let chunk = self - .api - .client - .get_chunk(address, false, Some(self.retry_strategy)) - .await?; - - // First try to deserialize a LargeFile, if it works, we go and seek it. - // If an error occurs, we consider it to be a SmallFile. - if let Ok(data_map) = self.unpack_chunk(chunk.clone()).await { - let info = self_encryption::seek_info(data_map.file_size(), position, length); - let range = &info.index_range; - let all_infos = data_map.infos(); - - let to_download = (range.start..range.end + 1) - .clone() - .map(|i| all_infos[i].clone()) - .collect_vec(); - let to_download = DataMap::new(to_download); - - // not written to file and return the encrypted chunks - if let DownloadReturnType::EncryptedChunks(encrypted_chunks) = - self.read(to_download, None, true, false).await? - { - let bytes = self_encryption::decrypt_range( - &data_map, - &encrypted_chunks, - info.relative_pos, - length, - ) - .map_err(ChunksError::SelfEncryption)?; - return Ok(bytes); - } else { - error!("IncorrectDownloadOption: expected to get the encrypted chunks back"); - return Err(ClientError::IncorrectDownloadOption); - } - } - - // The error above is ignored to avoid leaking the storage format detail of SmallFiles and LargeFiles. - // The basic idea is that we're trying to deserialize as one, and then the other. - // The cost of it is that some errors will not be seen without a refactor. - let mut bytes = chunk.value().clone(); - - let _ = bytes.split_to(position); - bytes.truncate(length); - - Ok(bytes) - } - - /// Download a file from the network and get the decrypted bytes. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - ) -> Result { - if let Some(bytes) = self - .download_entire_file(address, data_map_chunk, None) - .await? - { - Ok(bytes) - } else { - error!("IncorrectDownloadOption: expected to get decrypted bytes, but we got None"); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network and write it to the provided path. - /// If the data_map_chunk is not provided, the DataMap is fetched from the network using the provided address. - pub async fn download_file_to_path( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - path: PathBuf, - ) -> Result<()> { - if self - .download_entire_file(address, data_map_chunk, Some(path)) - .await? - .is_none() - { - Ok(()) - } else { - error!( - "IncorrectDownloadOption: expected to not get any decrypted bytes, but got Some" - ); - Err(ClientError::IncorrectDownloadOption) - } - } - - /// Download a file from the network. - /// If you want to track the download progress, use the `get_events` method. - async fn download_entire_file( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - // clean up the trackers/stats - self.logged_event_sender_absence = false; - - let result = self - .download_entire_file_inner(address, data_map_chunk, downloaded_file_path) - .await; - - // send an event indicating that the download process completed with an error - if result.is_err() { - self.send_event(FilesDownloadEvent::Error).await?; - } - - // drop the sender to close the channel. - let sender = self.event_sender.take(); - drop(sender); - - result - } - - async fn download_entire_file_inner( - &mut self, - address: ChunkAddress, - data_map_chunk: Option, - downloaded_file_path: Option, - ) -> Result> { - let head_chunk = if let Some(chunk) = data_map_chunk { - info!("Downloading via supplied local datamap"); - chunk - } else { - match self - .api - .client - .get_chunk(address, self.show_holders, Some(self.retry_strategy)) - .await - { - Ok(chunk) => chunk, - Err(err) => { - error!("Failed to fetch head chunk {address:?}"); - return Err(err); - } - } - }; - - // first try to deserialize a LargeFile, if it works, we go and seek it - match self.unpack_chunk(head_chunk.clone()).await { - Ok(data_map) => { - // read_all emits - match self - .read(data_map, downloaded_file_path, false, false) - .await? - { - DownloadReturnType::EncryptedChunks(_) => { - error!("IncorrectDownloadOption: we should not be getting the encrypted chunks back as it is set to false."); - Err(ClientError::IncorrectDownloadOption) - } - DownloadReturnType::DecryptedBytes(bytes) => Ok(Some(bytes)), - DownloadReturnType::WrittenToFileSystem => Ok(None), - } - } - Err(ClientError::Chunks(ChunksError::Deserialisation(_))) => { - // Only in case of a deserialisation error, - // shall consider the head chunk to be a SmallFile. - // With the min-size now set to 3 Bytes, such case shall be rare. - // Hence raise a warning for it. - warn!("Consider head chunk {address:?} as an SmallFile"); - println!("Consider head chunk {address:?} as an SmallFile"); - - self.send_event(FilesDownloadEvent::ChunksCount(1)).await?; - self.send_event(FilesDownloadEvent::Downloaded(address)) - .await?; - if let Some(path) = downloaded_file_path { - fs::write(path, head_chunk.value().clone())?; - Ok(None) - } else { - Ok(Some(head_chunk.value().clone())) - } - } - Err(err) => { - // For large data_map that consists of multiple chunks, - // `unpack_chunk` function will try to fetch those chunks from network. - // During the process, any chunk could be failed to download, - // hence trigger an error to be raised. - error!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - println!("Encounter error when unpack head_chunk {address:?} : {err:?}"); - Err(err) - } - } - } - - /// The internal logic to download the provided chunks inside the datamap. - /// If the decrypted_file_path is provided, we return DownloadReturnType::WrittenToFileSystem - /// If return_encrypted_chunks is true, we return DownloadReturnType::EncryptedChunks - /// Else we return DownloadReturnType::DecryptedBytes - /// - /// Set we_are_downloading_a_datamap if we want to emit the DatamapCount else we emit ChunksCount - async fn read( - &mut self, - data_map: DataMap, - decrypted_file_path: Option, - return_encrypted_chunks: bool, - we_are_downloading_a_datamap: bool, - ) -> Result { - // used internally - enum DownloadKind { - FileSystem(StreamSelfDecryptor), - Memory(Vec), - } - - let mut download_kind = { - if let Some(path) = decrypted_file_path { - DownloadKind::FileSystem(StreamSelfDecryptor::decrypt_to_file(path, &data_map)?) - } else { - DownloadKind::Memory(Vec::new()) - } - }; - let chunk_infos = data_map.infos(); - let expected_count = chunk_infos.len(); - - if we_are_downloading_a_datamap { - self.send_event(FilesDownloadEvent::ChunksCount(expected_count)) - .await?; - } else { - // we're downloading the chunks related to a huge datamap - self.send_event(FilesDownloadEvent::DatamapCount(expected_count)) - .await?; - } - - let now = Instant::now(); - - let client_clone = self.api.client.clone(); - let show_holders = self.show_holders; - let retry_strategy = self.retry_strategy; - // the initial index is not always 0 as we might seek a range of bytes. So fetch the first index - let mut current_index = chunk_infos - .first() - .ok_or_else(|| ClientError::EmptyDataMap)? - .index; - let mut stream = futures::stream::iter(chunk_infos.into_iter()) - .map(|chunk_info| { - Self::get_chunk( - client_clone.clone(), - chunk_info.dst_hash, - chunk_info.index, - show_holders, - retry_strategy, - ) - }) - .buffer_unordered(self.batch_size); - - let mut chunk_download_cache = HashMap::new(); - - while let Some(result) = stream.next().await { - let (chunk_address, index, encrypted_chunk) = result?; - // notify about the download - self.send_event(FilesDownloadEvent::Downloaded(chunk_address)) - .await?; - info!("Downloaded chunk of index {index:?}. We are at current_index {current_index:?}"); - - // check if current_index is present in the cache before comparing the fetched index. - // try to keep removing from the cache until we run out of sequential chunks to insert. - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - // now check if we can process the fetched index, else cache it. - if index == current_index { - debug!("The downloaded chunk's index {index:?} matches the current index {current_index}. Processing it"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } else { - // since we download the chunks concurrently without order, we cache the results for an index that - // finished earlier - debug!("The downloaded chunk's index {index:?} does not match with the current_index {current_index}. Inserting into cache"); - let _ = chunk_download_cache.insert(index, encrypted_chunk); - } - } - - // finally empty out the cache. - debug!("Finally emptying out the download cache"); - while let Some(encrypted_chunk) = chunk_download_cache.remove(¤t_index) { - debug!("Got current_index {current_index:?} from the download cache. Incrementing current index"); - match &mut download_kind { - DownloadKind::FileSystem(decryptor) => { - let _ = decryptor.next_encrypted(encrypted_chunk)?; - } - DownloadKind::Memory(collector) => collector.push(encrypted_chunk), - } - current_index += 1; - } - if !chunk_download_cache.is_empty() { - error!( - "The chunk download cache is not empty. Current index {current_index:?}. The indices inside the cache: {:?}", - chunk_download_cache.keys() - ); - return Err(ClientError::FailedToAssembleDownloadedChunks); - } - - let elapsed = now.elapsed(); - info!("Client downloaded file in {elapsed:?}"); - - match download_kind { - DownloadKind::FileSystem(_) => Ok(DownloadReturnType::WrittenToFileSystem), - DownloadKind::Memory(collector) => { - let result = if return_encrypted_chunks { - DownloadReturnType::EncryptedChunks(collector) - } else { - let bytes = decrypt_full_set(&data_map, &collector) - .map_err(ChunksError::SelfEncryption)?; - DownloadReturnType::DecryptedBytes(bytes) - }; - - Ok(result) - } - } - } - - /// Extracts a file DataMapLevel from a chunk. - /// If the DataMapLevel is not the first level mapping directly to the user's contents, - /// the process repeats itself until it obtains the first level DataMapLevel. - pub async fn unpack_chunk(&mut self, mut chunk: Chunk) -> Result { - loop { - match rmp_serde::from_slice(chunk.value()).map_err(ChunksError::Deserialisation)? { - DataMapLevel::First(data_map) => { - return Ok(data_map); - } - DataMapLevel::Additional(data_map) => { - if let DownloadReturnType::DecryptedBytes(serialized_chunk) = - self.read(data_map, None, false, true).await? - { - chunk = rmp_serde::from_slice(&serialized_chunk) - .map_err(ChunksError::Deserialisation)?; - } else { - error!("IncorrectDownloadOption: we should be getting the decrypted bytes back."); - return Err(ClientError::IncorrectDownloadOption); - } - } - } - } - } - - async fn send_event(&mut self, event: FilesDownloadEvent) -> Result<()> { - if let Some(sender) = self.event_sender.as_ref() { - sender.send(event).await.map_err(|err| { - error!("Could not send files download event due to {err:?}"); - ClientError::CouldNotSendFilesEvent - })?; - } else if !self.logged_event_sender_absence { - info!("Files download event sender is not set. Use get_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - Ok(()) - } - - async fn get_chunk( - client: Client, - address: XorName, - index: usize, - show_holders: bool, - retry_strategy: RetryStrategy, - ) -> std::result::Result<(ChunkAddress, usize, EncryptedChunk), ChunksError> { - let chunk = client - .get_chunk( - ChunkAddress::new(address), - show_holders, - Some(retry_strategy), - ) - .await - .map_err(|err| { - error!("Chunk missing {address:?} with {err:?}",); - ChunksError::ChunkMissing(address) - })?; - let encrypted_chunk = EncryptedChunk { - index, - content: chunk.value, - }; - Ok((chunk.address, index, encrypted_chunk)) - } -} diff --git a/sn_client/src/folders.rs b/sn_client/src/folders.rs deleted file mode 100644 index e2c94ef929..0000000000 --- a/sn_client/src/folders.rs +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{error::Result, Client, ClientRegister, WalletClient}; -use crate::{acc_packet::load_account_wallet_or_create_with_mnemonic, Error, FilesApi, UploadCfg}; -use bls::{Ciphertext, PublicKey}; -use bytes::{BufMut, BytesMut}; -use self_encryption::MAX_CHUNK_SIZE; -use serde::{Deserialize, Serialize}; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RegisterAddress}, - NetworkAddress, -}; -use sn_registers::{Entry, EntryHash}; - -use std::{ - collections::{BTreeMap, BTreeSet}, - ffi::OsString, - path::{Path, PathBuf}, -}; -use xor_name::{XorName, XOR_NAME_LEN}; - -/// Folder Entry representing either a file or subfolder. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum FolderEntry { - File(Chunk), - Folder(RegisterAddress), -} - -/// Metadata to be stored on a Chunk, linked from and belonging to Registers' entries. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Metadata { - pub name: String, - pub content: FolderEntry, -} - -// This is the entry value used in Folders to mark a removed file/folder. -const REMOVED_ENTRY_MARK: XorName = XorName([0; XOR_NAME_LEN]); - -/// Folders APIs. -#[derive(Clone)] -pub struct FoldersApi { - client: Client, - wallet_dir: PathBuf, - register: ClientRegister, - files_api: FilesApi, - // Cache of metadata chunks. We keep the Chunk itself till we upload it to the network. - metadata: BTreeMap)>, -} - -impl FoldersApi { - /// Create FoldersApi instance. - pub fn new( - client: Client, - wallet_dir: &Path, - address: Option, - ) -> Result { - let register = if let Some(addr) = address { - ClientRegister::create_with_addr(client.clone(), addr) - } else { - let mut rng = rand::thread_rng(); - ClientRegister::create(client.clone(), XorName::random(&mut rng)) - }; - - Self::create(client, wallet_dir, register) - } - - /// Clones the register instance. Any change made to one instance will not be reflected on the other register. - pub fn register(&self) -> ClientRegister { - self.register.clone() - } - - /// Return the address of the Folder (Register address) on the network - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Return the address of the Folder (Register address) as a NetworkAddress - pub fn as_net_addr(&self) -> NetworkAddress { - NetworkAddress::RegisterAddress(*self.address()) - } - - /// Return the list of metadata chunks addresses that need to be payed for in order to be - /// able to then store all data on the network upon calling `sync` method. - pub fn meta_addrs_to_pay(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(meta_xorname, (_, chunk))| { - chunk - .as_ref() - .map(|_| NetworkAddress::ChunkAddress(ChunkAddress::new(*meta_xorname))) - }) - .collect() - } - - /// Return the list of metadata chunks. - pub fn meta_chunks(&self) -> BTreeSet { - self.metadata - .iter() - .filter_map(|(_, (_, chunk))| chunk.clone()) - .collect() - } - - /// Create a new WalletClient from the directory set. - pub fn wallet(&self) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(&self.wallet_dir, None)?; - Ok(WalletClient::new(self.client.clone(), wallet)) - } - - /// Add provided file as entry of this Folder (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn add_file( - &mut self, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Add subfolder as entry of this Folder (locally). - /// The new folder's metadata chunk will be encrypted if a key has been provided. - pub fn add_folder( - &mut self, - folder_name: OsString, - address: RegisterAddress, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: folder_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::Folder(address), - }; - - self.add_entry(metadata, &BTreeSet::default(), encryption_pk) - } - - /// Replace an existing file with the provided one (locally). - /// The new file's metadata chunk will be encrypted if a key has been provided. - pub fn replace_file( - &mut self, - existing_entry: EntryHash, - file_name: OsString, - data_map_chunk: Chunk, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - // create metadata Chunk for this entry - let metadata = Metadata { - name: file_name.to_str().unwrap_or("unknown").to_string(), - content: FolderEntry::File(data_map_chunk), - }; - - self.add_entry( - metadata, - &vec![existing_entry].into_iter().collect(), - encryption_pk, - ) - } - - /// Remove a file/folder item from this Folder (locally). - pub fn remove_item(&mut self, existing_entry: EntryHash) -> Result<()> { - let _ = self.register.write_atop( - &REMOVED_ENTRY_MARK, - &vec![existing_entry].into_iter().collect(), - )?; - Ok(()) - } - - /// Sync local Folder with the network. - pub async fn sync(&mut self, upload_cfg: UploadCfg) -> Result<()> { - let mut wallet_client = self.wallet()?; - - // First upload any newly created metadata chunk - for (_, meta_chunk) in self.metadata.values_mut() { - if let Some(chunk) = meta_chunk.take() { - self.files_api - .get_local_payment_and_upload_chunk( - chunk.clone(), - upload_cfg.verify_store, - Some(upload_cfg.retry_strategy), - ) - .await?; - } - } - - let payment_info = wallet_client.get_recent_payment_for_addr(&self.as_net_addr())?; - - self.register - .sync( - &mut wallet_client, - upload_cfg.verify_store, - Some(payment_info), - ) - .await?; - - Ok(()) - } - - /// Download a copy of the Folder from the network. - pub async fn retrieve( - client: Client, - wallet_dir: &Path, - address: RegisterAddress, - ) -> Result { - let register = ClientRegister::retrieve(client.clone(), address).await?; - Self::create(client, wallet_dir, register) - } - - /// Returns true if there is a file/folder which matches the given entry hash - pub fn contains(&self, entry_hash: &EntryHash) -> bool { - self.register - .read() - .iter() - .any(|(hash, _)| hash == entry_hash) - } - - /// Find file/folder in this Folder by its name, returning metadata chunk xorname and metadata itself. - pub fn find_by_name(&self, name: &str) -> Option<(&XorName, &Metadata)> { - // let's get the list of metadata xornames of non-removed entries - let non_removed_items: BTreeSet = self - .register - .read() - .iter() - .map(|(_, meta_xorname_entry)| xorname_from_entry(meta_xorname_entry)) - .collect(); - - self.metadata - .iter() - .find_map(|(meta_xorname, (metadata, _))| { - if metadata.name == name && non_removed_items.contains(meta_xorname) { - Some((meta_xorname, metadata)) - } else { - None - } - }) - } - - /// Returns the list of entries of this Folder, including their entry hash, - /// metadata chunk xorname, and metadata itself. - pub async fn entries(&mut self) -> Result> { - let mut entries = BTreeMap::new(); - for (entry_hash, entry) in self.register.read() { - let meta_xorname = xorname_from_entry(&entry); - if meta_xorname == REMOVED_ENTRY_MARK { - continue; - } - - let metadata = match self.metadata.get(&meta_xorname) { - Some((metadata, _)) => metadata.clone(), - None => { - // retrieve metadata Chunk from network - let chunk = self - .client - .get_chunk(ChunkAddress::new(meta_xorname), false, None) - .await?; - - // let's first assume it's unencrypted - let metadata: Metadata = match rmp_serde::from_slice(chunk.value()) { - Ok(metadata) => metadata, - Err(err) => { - // let's try to decrypt it then - let cipher = Ciphertext::from_bytes(chunk.value()).map_err(|_| err)?; - let data = self - .client - .signer() - .decrypt(&cipher) - .ok_or(Error::FolderEntryDecryption(entry_hash))?; - - // if this fails, it's either the wrong key or unexpected data - rmp_serde::from_slice(&data) - .map_err(|_| Error::FolderEntryDecryption(entry_hash))? - } - }; - self.metadata.insert(meta_xorname, (metadata.clone(), None)); - metadata - } - }; - entries.insert(entry_hash, (meta_xorname, metadata)); - } - Ok(entries) - } - - // Private helpers - - // Create a new FoldersApi instance with given register. - fn create(client: Client, wallet_dir: &Path, register: ClientRegister) -> Result { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - - Ok(Self { - client, - wallet_dir: wallet_dir.to_path_buf(), - register, - files_api, - metadata: BTreeMap::new(), - }) - } - - // Add the given entry to the underlying Register as well as creating the metadata Chunk. - // If an encryption key is given, the metadata chunk will be encrpyted with it. - fn add_entry( - &mut self, - metadata: Metadata, - children: &BTreeSet, - encryption_pk: Option, - ) -> Result<(EntryHash, XorName, Metadata)> { - let mut bytes = BytesMut::with_capacity(*MAX_CHUNK_SIZE); - let serialised_metadata = rmp_serde::to_vec(&metadata)?; - if let Some(pk) = encryption_pk { - bytes.put( - pk.encrypt(serialised_metadata.as_slice()) - .to_bytes() - .as_slice(), - ); - } else { - bytes.put(serialised_metadata.as_slice()); - } - let meta_chunk = Chunk::new(bytes.freeze()); - let meta_xorname = *meta_chunk.name(); - - self.metadata - .insert(meta_xorname, (metadata.clone(), Some(meta_chunk))); - let entry_hash = self.register.write_atop(&meta_xorname, children)?; - - Ok((entry_hash, meta_xorname, metadata)) - } -} - -// Helper to convert a Register/Folder entry into a XorName -fn xorname_from_entry(entry: &Entry) -> XorName { - let mut xorname = [0; XOR_NAME_LEN]; - xorname.copy_from_slice(entry); - XorName(xorname) -} diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs deleted file mode 100644 index 27594bfa4a..0000000000 --- a/sn_client/src/lib.rs +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -//! > **Core functionalities for interacting with the SAFE Network** -//! -//! The `sn_client` crate is a part of the [Safe Network](https://safenetwork.tech/) (SN), -//! and plays a crucial role in this ecosystem by serving as the client library that allows -//! applications and users to interact with the Safe Network, and build applications that -//! leverage the Safe Network's capabilities, providing a high-level API that simplifies the development process. -//! -//! Here are the key functionalities provided by this crate: -//! -//! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to -//! send and receive messages from the decentralized nodes that make up the network. -//! -//! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network. -//! This includes both private and public data, ensuring privacy and security. -//! -//! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and -//! managing access to data, ensuring that only authorized users can access sensitive information. -//! -//! 4. **File Management**: The crate supports operations related to file management, such as uploading, -//! downloading, and managing files and directories on the Safe Network. -//! -//! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be -//! used for various purposes within the network, including paying for storage and services. -//! -//! ## Quick links -//! - [Crates.io](https://crates.io/crates/sn_client) -//! - [Forum](https://forum.autonomi.community/) -//! - [Issues on GitHub](https://github.com/maidsafe/safe_network/issues) -//! - -#[macro_use] -extern crate tracing; - -pub mod acc_packet; -pub mod api; -mod audit; -mod chunks; -mod error; -mod event; -mod faucet; -mod files; -mod folders; -mod register; -mod uploader; -mod wallet; - -/// Test utils -#[cfg(feature = "test-utils")] -pub mod test_utils; - -// re-export used crates to make them available to app builders -// this ensures the version of the crates used by the app builders are the same as the ones used by the client -// so they don't run into issues with incompatible types due to different versions of the same crate -pub use sn_networking as networking; -pub use sn_protocol as protocol; -pub use sn_registers as registers; -pub use sn_transfers as transfers; - -const MAX_CONCURRENT_TASKS: usize = 4096; - -pub use self::{ - audit::{DagError, SpendDag, SpendDagGet, SpendFault}, - error::Error, - event::{ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver}, - faucet::fund_faucet_from_genesis_wallet, - files::{ - download::{FilesDownload, FilesDownloadEvent}, - FilesApi, BATCH_SIZE, - }, - folders::{FolderEntry, FoldersApi, Metadata}, - register::ClientRegister, - uploader::{UploadCfg, UploadEvent, UploadSummary, Uploader}, - wallet::{send, StoragePaymentResult, WalletClient}, -}; -pub(crate) use error::Result; - -use sn_networking::Network; -use std::sync::Arc; - -#[cfg(target_arch = "wasm32")] -use console_error_panic_hook; -#[cfg(target_arch = "wasm32")] -use wasm_bindgen::prelude::*; -#[cfg(target_arch = "wasm32")] -use web_sys::console; - -// This is like the `main` function, except for JavaScript. -#[cfg(target_arch = "wasm32")] -#[wasm_bindgen(start)] -pub async fn main_js() -> std::result::Result<(), JsValue> { - // This provides better error messages in debug mode. - // It's disabled in release mode so it doesn't bloat up the file size. - // #[cfg(debug_assertions)] - console_error_panic_hook::set_once(); - - console::log_1(&JsValue::from_str("Hello safe world!")); - - // Tracing - // TODO: dont log _everything_ - // right now it logs all libp2p entirely. - tracing_wasm::set_as_global_default(); - - Ok(()) -} - -/// A quick client that only takes some peers to connect to -#[wasm_bindgen] -#[cfg(target_arch = "wasm32")] -pub async fn get_data(peer: &str, data_address: &str) -> std::result::Result<(), JsError> { - let bytes = hex::decode(&data_address).expect("Input address is not a hex string"); - let xor_name = xor_name::XorName( - bytes - .try_into() - .expect("Failed to parse XorName from hex string"), - ); - - use sn_protocol::storage::ChunkAddress; - console::log_1(&JsValue::from_str(peer)); - - let the_peer = sn_peers_acquisition::parse_peer_addr(peer)?; - - console::log_1(&JsValue::from_str(&format!( - "Provided Peer was {the_peer:?}" - ))); - - // TODO: We need to tidy this up, the client loops forever in the browser, and eventually crashes - // it does _do things_ but errors surface, and even after getting data, it continues... - let client = Client::quick_start(Some(vec![the_peer])) - .await - .map_err(|e| JsError::new(&format!("Client could not start: {e:?}")))?; - - console::log_1(&JsValue::from_str("Client started {chunk:?}")); - - let chunk = client - .get_chunk(ChunkAddress::new(xor_name), false, None) - .await - .map_err(|e| JsError::new(&format!("Client get data failed: {e:?}")))?; - - console::log_1(&JsValue::from_str(&format!("Data found {chunk:?}"))); - - Ok(()) -} - -/// Client API implementation to store and get data. -#[derive(Clone, Debug)] -pub struct Client { - network: Network, - events_broadcaster: ClientEventsBroadcaster, - signer: Arc, -} diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs deleted file mode 100644 index f657898bf6..0000000000 --- a/sn_client/src/register.rs +++ /dev/null @@ -1,833 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{wallet::StoragePaymentResult, Client, Error, Result, WalletClient}; -use bls::PublicKey; -use crdts::merkle_reg::MerkleReg; -use libp2p::{ - kad::{Quorum, Record}, - PeerId, -}; -use sn_networking::{GetRecordCfg, PutRecordCfg, VerificationKind}; -use sn_protocol::{ - storage::{try_serialize_record, RecordKind, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{ - Entry, EntryHash, Error as RegisterError, Permissions, Register, RegisterAddress, RegisterCrdt, - RegisterOp, SignedRegister, -}; -use sn_transfers::{NanoTokens, Payment}; -use std::collections::{BTreeSet, HashSet}; -use xor_name::XorName; - -/// Cached operations made to an offline RegisterCrdt instance are applied locally only, -/// and accumulated until the user explicitly calls 'sync'. The user can -/// switch back to sync with the network for every op by invoking `online` API. -#[derive(Clone, custom_debug::Debug)] -pub struct ClientRegister { - #[debug(skip)] - client: Client, - register: Register, - /// CRDT data of the Register - crdt: RegisterCrdt, - /// Cached operations. - ops: BTreeSet, -} - -impl ClientRegister { - /// Create with specified meta and permission - pub fn create_register(client: Client, meta: XorName, perms: Permissions) -> Self { - let register = Register::new(client.signer_pk(), meta, perms); - let crdt = RegisterCrdt::new(*register.address()); - Self { - client, - register, - crdt, - ops: BTreeSet::new(), - } - } - - /// Create a new Register Locally. - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create(client: Client, meta: XorName) -> Self { - Self::create_register(client, meta, Permissions::default()) - } - - /// Create a new Register locally with a specific address. - /// # Arguments - /// * 'client' - [Client] - /// * 'addr' - [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use sn_protocol::storage::RegisterAddress; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = RegisterAddress::new(XorName::random(&mut rng), client.signer_pk()); - /// // Here we create a ClientRegister - /// let register = ClientRegister::create_with_addr(client.clone(), address); - /// # Ok(()) - /// # } - /// ``` - pub fn create_with_addr(client: Client, addr: RegisterAddress) -> Self { - let register = Register::new(addr.owner(), addr.meta(), Permissions::default()); - let crdt = RegisterCrdt::new(addr); - Self { - client, - register, - crdt, - ops: BTreeSet::new(), - } - } - - /// Create a new Register and send it to the Network. - /// - /// # Arguments - /// * 'client' - [Client] - /// * 'meta' - [XorName] - /// * 'wallet_client' - A borrowed mutable [WalletClient] - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// * 'perms' - [Permissions] - /// - /// Return type: Result<(Self, [NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a new Register replica from a predefined address. - /// // The create_online function runs a [sync](ClientRegister::sync) internally. - /// let (client_register, mut total_cost, mut total_royalties) = ClientRegister::create_online( - /// client, - /// address, - /// &mut wallet_client, - /// false, - /// permissions, - /// ).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn create_online( - client: Client, - meta: XorName, - wallet_client: &mut WalletClient, - verify_store: bool, - perms: Permissions, - ) -> Result<(Self, NanoTokens, NanoTokens)> { - let mut reg = Self::create_register(client, meta, perms); - let (storage_cost, royalties_fees) = reg.sync(wallet_client, verify_store, None).await?; - Ok((reg, storage_cost, royalties_fees)) - } - - /// Retrieve a Register from the network to work on it offline. - pub(super) async fn retrieve(client: Client, address: RegisterAddress) -> Result { - let signed_register = Self::get_register_from_network(&client, address).await?; - - let mut register = Self::create_with_addr(client, address); - register.merge(&signed_register); - - Ok(register) - } - - /// Return type: [RegisterAddress] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the address. In this example, we print it out: - /// println!("REGISTER_ADDRESS={}", client_register.address().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn address(&self) -> &RegisterAddress { - self.register.address() - } - - /// Returns the Owner of the Register. - /// - /// Return type: [PublicKey] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the owner. In this example, we print it out: - /// println!("REGISTER_OWNER={}", client_register.owner().to_hex()); - /// # Ok(()) - /// # } - /// ``` - pub fn owner(&self) -> PublicKey { - self.register.owner() - } - - /// Returns the Permissions of the Register. - /// - /// Return type: [Permissions] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can use the permissions. In this example, we print it out: - /// let permissions = client_register.permissions(); - /// println!("REGISTER_PERMS={:?}",permissions); - /// # Ok(()) - /// # } - /// ``` - pub fn permissions(&self) -> &Permissions { - self.register.permissions() - } - - /// Return the number of items held in the register. - /// - /// Return type: u64 - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_registers::Permissions; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let address = XorName::random(&mut rng); - /// # let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// # let permissions = Permissions::default(); - /// // Instantiate a ClientRegister (i.e. with create_online) - /// let (client_register, mut cost, mut royalties) = ClientRegister::create_online//(...) - /// # (client,address,&mut wallet_client,false,permissions,).await?; - /// // From there we can see the size. In this example, we print it out: - /// println!("REGISTER_SIZE={}", client_register.size()); - /// # Ok(()) - /// # } - /// ``` - pub fn size(&self) -> u64 { - self.crdt.size() - } - - /// Return a value corresponding to the provided 'hash', if present. - // No usages found in All Places - pub fn get(&self, hash: EntryHash) -> Result<&Entry> { - if let Some(entry) = self.crdt.get(hash) { - Ok(entry) - } else { - Err(RegisterError::NoSuchEntry(hash).into()) - } - } - - /// Read the last entry, or entries when there are branches, if the register is not empty. - /// - /// Return type: [BTreeSet]<([EntryHash], [Entry])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// // Read as bytes into the ClientRegister instance - /// let register = ClientRegister::create(client.clone(), address).read(); - /// # Ok(()) - /// # } - /// ``` - pub fn read(&self) -> BTreeSet<(EntryHash, Entry)> { - self.crdt.read() - } - - /// Write a new value onto the Register atop latest value. - /// It returns an error if it finds branches in the content/entries; if it is - /// required to merge/resolve the branches, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "Register entry"; - /// // Write as bytes into the ClientRegister instance - /// let mut register = ClientRegister::create(client.clone(), address).write(entry.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write(&mut self, entry: &[u8]) -> Result { - let children = self.crdt.read(); - if children.len() > 1 { - return Err(Error::ContentBranchDetected(children)); - } - - self.write_atop(entry, &children.into_iter().map(|(hash, _)| hash).collect()) - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are any branches of content or entries, it automatically merges them. - /// Leaving the new value as a single latest value on the Register. - /// Note you can use the `write` API if you need to handle - /// content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let entry = "entry_input_here"; - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let message = "Register entry"; - /// let register = mutable_register.write_merging_branches(message.as_bytes()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_merging_branches(&mut self, entry: &[u8]) -> Result { - let children: BTreeSet = - self.crdt.read().into_iter().map(|(hash, _)| hash).collect(); - - self.write_atop(entry, &children) - } - - /// Write a new value onto the Register atop the set of branches/entries - /// referenced by the provided list of their corresponding entry hash. - /// Note you can use `write_merging_branches` API instead if you - /// want to write atop all exiting branches/entries. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'children' - [BTreeSet]<[EntryHash]> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// let mut rng = rand::thread_rng(); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let mut mutable_register = ClientRegister::create(client.clone(), address); - /// let meta = "Register entry".as_bytes(); - /// let register = mutable_register.write_atop(meta, &BTreeSet::default()); - /// # Ok(()) - /// # } - /// ``` - pub fn write_atop( - &mut self, - entry: &[u8], - children: &BTreeSet, - ) -> Result { - // check permissions first - let public_key = self.client.signer_pk(); - self.register.check_user_permissions(public_key)?; - - let (hash, address, crdt_op) = self.crdt.write(entry.to_vec(), children)?; - - let op = RegisterOp::new(address, crdt_op, self.client.signer()); - - let _ = self.ops.insert(op); - - Ok(hash) - } - - // ********* Online methods ********* - - /// Sync this Register with the replicas on the network. - /// This will optionally verify the stored Register on the network is the same as the local one. - /// If payment info is provided it won't try to make the payment. - /// - /// # Arguments - /// * 'wallet_client' - WalletClient - /// * 'verify_store' - Boolean - /// - /// Return type: - /// Result<([NanoTokens], [NanoTokens])> - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeSet; - /// # use tempfile::TempDir; - /// # use sn_client::WalletClient; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// # let temporary_path = TempDir::new()?.path().to_owned(); - /// # let main_secret_key = Some(MainSecretKey::new(SecretKey::random())); - /// # let mut wallet = HotWallet::load_from_path(&temporary_path,main_secret_key)?; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// // Run sync of a Client Register instance - /// let mut register = - /// ClientRegister::create(client, address).sync(&mut wallet_client, true, None).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn sync( - &mut self, - wallet_client: &mut WalletClient, - verify_store: bool, - mut payment_info: Option<(Payment, PeerId)>, - ) -> Result<(NanoTokens, NanoTokens)> { - let addr = *self.address(); - debug!("Syncing Register at {addr:?}!"); - let mut storage_cost = NanoTokens::zero(); - let mut royalties_fees = NanoTokens::zero(); - let reg_result = if verify_store { - debug!("VERIFYING REGISTER STORED {:?}", self.address()); - if payment_info.is_some() { - // we expect this to be a _fresh_ register. - // It still could have been PUT previously, but we'll do a quick verification - // instead of thorough one. - self.client - .quickly_check_if_register_stored(*self.address()) - .await - } else { - self.client.verify_register_stored(*self.address()).await - } - } else { - Self::get_register_from_network(&self.client, addr).await - }; - - match reg_result { - Ok(remote_replica) => { - self.merge(&remote_replica); - self.push(verify_store).await?; - } - // any error here will result in a repayment of the register - // TODO: be smart about this and only pay for storage if we need to - Err(err) => { - debug!("Failed to get register: {err:?}"); - debug!("Creating Register as it doesn't exist at {addr:?}!"); - - // Let's check if the user has already paid for this address first - if payment_info.is_none() { - let net_addr = NetworkAddress::RegisterAddress(addr); - let payment_result = self.make_payment(wallet_client, &net_addr).await?; - storage_cost = payment_result.storage_cost; - royalties_fees = payment_result.royalty_fees; - - // Get payment proofs needed to publish the Register - let (payment, payee) = wallet_client.get_recent_payment_for_addr(&net_addr)?; - debug!("payments found: {payment:?}"); - payment_info = Some((payment, payee)); - } - - // The `creation register` has to come with `payment`. - // Hence it needs to be `published` to network separately. - self.publish_register(payment_info, verify_store).await?; - } - } - - Ok((storage_cost, royalties_fees)) - } - - /// Push all operations made locally to the replicas of this Register on the network. - /// This optionally verifies that the stored Register is the same as our local register. - /// - /// # Arguments - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// // Pass the boolean value to the Client Register instance via .Push() - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.push(false); - /// # Ok(()) - /// # } - /// ``` - pub async fn push(&mut self, verify_store: bool) -> Result<()> { - let ops_len = self.ops.len(); - let address = *self.address(); - if ops_len > 0 { - if let Err(err) = self.publish_register(None, verify_store).await { - warn!("Failed to push register {address:?} to network!: {err}"); - return Err(err); - } - - debug!("Successfully pushed register {address:?} to network!"); - } - - Ok(()) - } - - /// Write a new value onto the Register atop of the latest value. - /// It returns an error if it finds branches in the content / entries. If so, then it's - /// required to merge or resolve the branches. In that case, invoke the `write_merging_branches` API. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Register entry".as_bytes(); - /// // Use of the 'write_online' example: - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_online(&mut self, entry: &[u8], verify_store: bool) -> Result<()> { - self.write(entry)?; - self.push(verify_store).await - } - - /// Write a new value onto the Register atop of the latest value. - /// If there are branches of content/entries, it will automatically merge them. - /// This will leave a single new value as the latest entry into the Register. - /// Note that you can use the `write` API if you need to handle content/entries branches in a different way. - /// - /// # Arguments - /// * 'entry' - u8 (i.e .as_bytes) - /// * 'verify_store' - Boolean - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, ClientRegister, Error}; - /// # use bls::SecretKey; - /// # use xor_name::XorName; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let mut rng = rand::thread_rng(); - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let address = XorName::random(&mut rng); - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let meta = "Entry".as_bytes(); - /// // Use of the 'write_merging_branches_online': - /// let mut binding = ClientRegister::create(client, address); - /// let register = binding.write_merging_branches_online(meta,false); - /// # Ok(()) - /// # } - /// ``` - pub async fn write_merging_branches_online( - &mut self, - entry: &[u8], - verify_store: bool, - ) -> Result<()> { - self.write_merging_branches(entry)?; - self.push(verify_store).await - } - - /// Access the underlying MerkleReg (e.g. for access to history) - /// NOTE: This API is unstable and may be removed in the future - pub fn merkle_reg(&self) -> &MerkleReg { - self.crdt.merkle_reg() - } - - /// Returns the local ops list - pub fn ops_list(&self) -> &BTreeSet { - &self.ops - } - - /// Log the crdt DAG in tree structured view - pub fn log_update_history(&self) -> String { - self.crdt.log_update_history() - } - - // ********* Private helpers ********* - - // Make a storage payment for the provided network address - async fn make_payment( - &self, - wallet_client: &mut WalletClient, - net_addr: &NetworkAddress, - ) -> Result { - // Let's make the storage payment - let payment_result = wallet_client - .pay_for_storage(std::iter::once(net_addr.clone())) - .await?; - let cost = payment_result - .storage_cost - .checked_add(payment_result.royalty_fees) - .ok_or(Error::TotalPriceTooHigh)?; - - println!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - info!("Successfully made payment of {cost} for a Register (At a cost per record of {cost:?}.)"); - - if let Err(err) = wallet_client.store_local_wallet() { - warn!("Failed to store wallet with cached payment proofs: {err:?}"); - println!("Failed to store wallet with cached payment proofs: {err:?}"); - } else { - println!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - info!( - "Successfully stored wallet with cached payment proofs, and new balance {}.", - wallet_client.balance() - ); - } - - Ok(payment_result) - } - - /// Publish a `Register` command on the network. - /// If `verify_store` is true, it will verify the Register was stored on the network. - /// Optionally contains the Payment and the PeerId that we paid to. - pub async fn publish_register( - &self, - payment: Option<(Payment, PeerId)>, - verify_store: bool, - ) -> Result<()> { - let client = self.client.clone(); - let signed_reg = self.get_signed_reg()?; - - let network_address = NetworkAddress::from_register_address(*self.register.address()); - let key = network_address.to_record_key(); - let (record, payee) = match payment { - Some((payment, payee)) => { - let record = Record { - key: key.clone(), - value: try_serialize_record( - &(payment, &signed_reg), - RecordKind::RegisterWithPayment, - )? - .to_vec(), - publisher: None, - expires: None, - }; - (record, Some(vec![payee])) - } - None => { - let record = Record { - key: key.clone(), - value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }; - (record, None) - } - }; - - let (record_to_verify, expected_holders) = if verify_store { - let expected_holders: HashSet<_> = client - .network - .get_closest_peers(&network_address, true) - .await? - .iter() - .cloned() - .collect(); - ( - Some(Record { - key, - value: try_serialize_record(&signed_reg, RecordKind::Register)?.to_vec(), - publisher: None, - expires: None, - }), - expected_holders, - ) - } else { - (None, Default::default()) - }; - - let verification_cfg = GetRecordCfg { - get_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Quick), - target_record: record_to_verify, - expected_holders, - is_register: true, - }; - let put_cfg = PutRecordCfg { - put_quorum: Quorum::All, - retry_strategy: Some(RetryStrategy::Balanced), - use_put_record_to: payee, - verification: Some((VerificationKind::Network, verification_cfg)), - }; - - // Register edits might exist, so we cannot be sure that just because we get a record back that this should fail - Ok(client.network.put_record(record, &put_cfg).await?) - } - - /// Retrieve a `Register` from the Network. - pub async fn get_register_from_network( - client: &Client, - address: RegisterAddress, - ) -> Result { - debug!("Retrieving Register from: {address}"); - let signed_reg = client.get_signed_register_from_network(address).await?; - signed_reg.verify_with_address(address)?; - Ok(signed_reg) - } - - /// Merge a network fetched copy with the local one. - /// Note the `get_register_from_network` already verified - /// * the fetched register is the same (address) as to the local one - /// * the ops of the fetched copy are all signed by the owner - pub fn merge(&mut self, signed_reg: &SignedRegister) { - debug!("Merging Register of: {:?}", self.register.address()); - - // Take out the difference between local ops and fetched ops - // note the `difference` functions gives entry that: in a but not in b - let diff: Vec<_> = signed_reg.ops().difference(&self.ops).cloned().collect(); - - // Apply the new ops to local - for op in diff { - // in case of deploying error, record then continue to next - if let Err(err) = self.crdt.apply_op(op.clone()) { - error!( - "Apply op to local Register {:?} failed with {err:?}", - self.register.address() - ); - } else { - let _ = self.ops.insert(op); - } - } - } - - /// Generate SignedRegister from local copy, so that can be published to network - fn get_signed_reg(&self) -> Result { - let signature = self.client.sign(self.register.bytes()?); - Ok(SignedRegister::new( - self.register.clone(), - signature, - self.ops.clone(), - )) - } -} diff --git a/sn_client/src/test_utils.rs b/sn_client/src/test_utils.rs deleted file mode 100644 index 5e0485e543..0000000000 --- a/sn_client/src/test_utils.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, WalletClient, -}; -use sn_peers_acquisition::parse_peer_addr; -use sn_protocol::{storage::Chunk, NetworkAddress}; -use sn_transfers::{HotWallet, NanoTokens}; - -use bls::SecretKey; -use bytes::Bytes; -use eyre::{bail, Result}; -use rand::distributions::{Distribution, Standard}; -use std::path::Path; -use tokio::{ - sync::Mutex, - time::{Duration, Instant}, -}; -use tracing::{info, warn}; - -/// 100 SNT is the amount `get_funded_wallet` funds the created wallet with. -pub const AMOUNT_TO_FUND_WALLETS: u64 = 100 * 1_000_000_000; - -// The number of times to try to load the faucet wallet -const LOAD_FAUCET_WALLET_RETRIES: usize = 10; - -// mutex to restrict access to faucet wallet from concurrent tests -static FAUCET_WALLET_MUTEX: Mutex<()> = Mutex::const_new(()); - -/// Get a new Client for testing -pub async fn get_new_client(owner_sk: SecretKey) -> Result { - let bootstrap_peers = if cfg!(feature = "local") { - None - } else { - match std::env::var("SAFE_PEERS") { - Ok(str) => match parse_peer_addr(&str) { - Ok(peer) => Some(vec![peer]), - Err(err) => bail!("Can't parse SAFE_PEERS {str:?} with error {err:?}"), - }, - Err(err) => bail!("Can't get env var SAFE_PEERS with error {err:?}"), - } - }; - - println!("Client bootstrap with peer {bootstrap_peers:?}"); - let client = Client::new(owner_sk, bootstrap_peers, None, None).await?; - Ok(client) -} - -/// Generate a Chunk with random bytes -pub fn random_file_chunk() -> Chunk { - let mut rng = rand::thread_rng(); - let random_content: Vec = >::sample_iter(Standard, &mut rng) - .take(100) - .collect(); - Chunk::new(Bytes::from(random_content)) -} - -/// Creates and funds a new hot-wallet at the provided path -pub async fn get_funded_wallet(client: &Client, wallet_dir: &Path) -> Result { - let wallet_balance = NanoTokens::from(AMOUNT_TO_FUND_WALLETS); - let _guard = FAUCET_WALLET_MUTEX.lock().await; - let from_faucet_wallet = load_faucet_wallet().await?; - - let mut local_wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None) - .expect("Wallet shall be successfully created."); - - println!("Getting {wallet_balance} tokens from the faucet..."); - info!("Getting {wallet_balance} tokens from the faucet..."); - let tokens = send( - from_faucet_wallet, - wallet_balance, - local_wallet.address(), - client, - true, - ) - .await?; - - println!("Verifying the transfer from faucet..."); - info!("Verifying the transfer from faucet..."); - client.verify_cashnote(&tokens).await?; - local_wallet.deposit_and_store_to_disk(&vec![tokens])?; - assert_eq!(local_wallet.balance(), wallet_balance); - println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); - - Ok(local_wallet) -} - -/// Pay the network for the provided list of storage addresses. -pub async fn pay_for_storage( - client: &Client, - wallet_dir: &Path, - addrs2pay: Vec, -) -> Result<()> { - let wallet = load_account_wallet_or_create_with_mnemonic(wallet_dir, None)?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - let _ = wallet_client.pay_for_storage(addrs2pay.into_iter()).await?; - Ok(()) -} - -async fn load_faucet_wallet() -> Result { - info!("Loading faucet wallet..."); - let now = Instant::now(); - for attempt in 1..LOAD_FAUCET_WALLET_RETRIES + 1 { - let faucet_wallet = create_faucet_account_and_wallet(); - - let faucet_balance = faucet_wallet.balance(); - if !faucet_balance.is_zero() { - info!("Loaded faucet wallet after {:?}", now.elapsed()); - return Ok(faucet_wallet); - } - tokio::time::sleep(Duration::from_secs(1)).await; - warn!("The faucet wallet is empty. Attempts: {attempt}/{LOAD_FAUCET_WALLET_RETRIES}") - } - bail!("The faucet wallet is empty even after {LOAD_FAUCET_WALLET_RETRIES} retries. Bailing after {:?}. Check the faucet_server logs.", now.elapsed()); -} diff --git a/sn_client/src/uploader/mod.rs b/sn_client/src/uploader/mod.rs deleted file mode 100644 index c3495b99ab..0000000000 --- a/sn_client/src/uploader/mod.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#[cfg(test)] -mod tests; -mod upload; - -use self::upload::{start_upload, InnerUploader, MAX_REPAYMENTS_PER_FAILED_ITEM}; -use crate::{Client, ClientRegister, Error, Result, BATCH_SIZE}; -use itertools::Either; -use sn_networking::PayeeQuote; -use sn_protocol::{ - storage::{Chunk, ChunkAddress, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{RegisterAddress, SignedRegister}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Debug, - path::PathBuf, -}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The set of options to pass into the `Uploader` -#[derive(Debug, Clone, Copy)] -pub struct UploadCfg { - pub batch_size: usize, - pub verify_store: bool, - pub show_holders: bool, - pub retry_strategy: RetryStrategy, - pub max_repayments_for_failed_data: usize, // we want people to specify an explicit limit here. - pub collect_registers: bool, -} - -impl Default for UploadCfg { - fn default() -> Self { - Self { - batch_size: BATCH_SIZE, - verify_store: true, - show_holders: false, - retry_strategy: RetryStrategy::Balanced, - max_repayments_for_failed_data: MAX_REPAYMENTS_PER_FAILED_ITEM, - collect_registers: false, - } - } -} - -/// The result of a successful upload. -#[derive(Debug, Clone)] -pub struct UploadSummary { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub final_balance: NanoTokens, - pub uploaded_addresses: BTreeSet, - pub uploaded_registers: BTreeMap, - pub uploaded_count: usize, - pub skipped_count: usize, -} - -impl UploadSummary { - /// Merge two UploadSummary together. - pub fn merge(mut self, other: Self) -> Result { - self.uploaded_addresses.extend(other.uploaded_addresses); - self.uploaded_registers.extend(other.uploaded_registers); - - let summary = Self { - storage_cost: self - .storage_cost - .checked_add(other.storage_cost) - .ok_or(Error::NumericOverflow)?, - royalty_fees: self - .royalty_fees - .checked_add(other.royalty_fees) - .ok_or(Error::NumericOverflow)?, - final_balance: self - .final_balance - .checked_add(other.final_balance) - .ok_or(Error::NumericOverflow)?, - uploaded_addresses: self.uploaded_addresses, - uploaded_registers: self.uploaded_registers, - uploaded_count: self.uploaded_count + other.uploaded_count, - skipped_count: self.skipped_count + other.skipped_count, - }; - Ok(summary) - } -} - -#[derive(Debug, Clone)] -/// The events emitted from the upload process. -pub enum UploadEvent { - /// Uploaded a record to the network. - ChunkUploaded(ChunkAddress), - /// Uploaded a Register to the network. - /// The returned register is just the passed in register. - RegisterUploaded(ClientRegister), - /// - /// The Chunk already exists in the network. No payments were made. - ChunkAlreadyExistsInNetwork(ChunkAddress), - /// The Register already exists in the network. The locally register changes were pushed to the network. - /// No payments were made. - /// The returned register contains the remote replica merged with the passed in register. - RegisterUpdated(ClientRegister), - /// Payment for a batch of records has been made. - PaymentMade { - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - /// The upload process has terminated with an error. - // Note: We cannot send the Error enum as it does not implement Clone. So we cannot even do Result if - // we also want to return this error from the function. - Error, -} - -pub struct Uploader { - // Has to be stored as an Option as we have to take ownership of inner during the upload. - inner: Option, -} - -impl Uploader { - /// Start the upload process. - pub async fn start_upload(mut self) -> Result { - let event_sender = self - .inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .event_sender - .clone(); - match start_upload(Box::new(self)).await { - Err(err) => { - if let Some(event_sender) = event_sender { - if let Err(err) = event_sender.send(UploadEvent::Error).await { - error!("Error while emitting event: {err:?}"); - } - } - Err(err) - } - Ok(summary) => Ok(summary), - } - } - - /// Creates a new instance of `Uploader` with the default configuration. - /// To modify the configuration, use the provided setter methods (`set_...` functions). - // NOTE: Self has to be constructed only using this method. We expect `Self::inner` is present everywhere. - pub fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - inner: Some(InnerUploader::new(client, root_dir)), - } - } - - /// Update all the configurations by passing the `UploadCfg` struct - pub fn set_upload_cfg(&mut self, cfg: UploadCfg) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_cfg(cfg); - } - - /// Sets the default batch size that determines the number of data that are processed in parallel. - /// - /// By default, this option is set to the constant `BATCH_SIZE: usize = 16`. - pub fn set_batch_size(&mut self, batch_size: usize) { - // Self can only be constructed with new(), which will set inner to InnerUploader always. - // So it is okay to call unwrap here. - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_batch_size(batch_size); - } - - /// Sets the option to verify the data after they have been uploaded. - /// - /// By default, this option is set to true. - pub fn set_verify_store(&mut self, verify_store: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_verify_store(verify_store); - } - - /// Sets the option to display the holders that are expected to be holding the data during verification. - /// - /// By default, this option is set to false. - pub fn set_show_holders(&mut self, show_holders: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_show_holders(show_holders); - } - - /// Sets the RetryStrategy to increase the re-try during the GetStoreCost & Upload tasks. - /// This does not affect the retries during the Payment task. Use `set_max_repayments_for_failed_data` to - /// configure the re-payment attempts. - /// - /// By default, this option is set to RetryStrategy::Quick - pub fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_retry_strategy(retry_strategy); - } - - /// Sets the maximum number of repayments to perform if the initial payment failed. - /// NOTE: This creates an extra Spend and uses the wallet funds. - /// - /// By default, this option is set to 1 retry. - pub fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_max_repayments_for_failed_data(retries); - } - - /// Enables the uploader to return all the registers that were Uploaded or Updated. - /// The registers are emitted through the event channel whenever they're completed, but this returns them - /// through the UploadSummary when the whole upload process completes. - /// - /// By default, this option is set to False - pub fn set_collect_registers(&mut self, collect_registers: bool) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .set_collect_registers(collect_registers); - } - - /// Returns a receiver for UploadEvent. - /// This method is optional and the upload process can be performed without it. - pub fn get_event_receiver(&mut self) -> mpsc::Receiver { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .get_event_receiver() - } - - /// Insert a list of chunk paths to upload to upload. - pub fn insert_chunk_paths(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunk_paths(chunks); - } - - /// Insert a list of chunks to upload to upload. - pub fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_chunks(chunks); - } - - /// Insert a list of registers to upload. - pub fn insert_register(&mut self, registers: impl IntoIterator) { - self.inner - .as_mut() - .expect("Uploader::new makes sure inner is present") - .insert_register(registers); - } -} - -// ======= Private ======== - -/// An interface to make the testing easier by not interacting with the network. -trait UploaderInterface: Send + Sync { - fn take_inner_uploader(&mut self) -> InnerUploader; - - // Mutable reference is used in tests. - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ); - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ); - - #[expect(clippy::too_many_arguments)] - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ); - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ); - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ); -} - -// Configuration functions are used in tests. So these are defined here and re-used inside `Uploader` -impl InnerUploader { - pub(super) fn set_cfg(&mut self, cfg: UploadCfg) { - self.cfg = cfg; - } - - pub(super) fn set_batch_size(&mut self, batch_size: usize) { - self.cfg.batch_size = batch_size; - } - - pub(super) fn set_verify_store(&mut self, verify_store: bool) { - self.cfg.verify_store = verify_store; - } - - pub(super) fn set_show_holders(&mut self, show_holders: bool) { - self.cfg.show_holders = show_holders; - } - - pub(super) fn set_retry_strategy(&mut self, retry_strategy: RetryStrategy) { - self.cfg.retry_strategy = retry_strategy; - } - - pub(super) fn set_max_repayments_for_failed_data(&mut self, retries: usize) { - self.cfg.max_repayments_for_failed_data = retries; - } - - pub(super) fn set_collect_registers(&mut self, collect_registers: bool) { - self.cfg.collect_registers = collect_registers; - } - - pub(super) fn get_event_receiver(&mut self) -> mpsc::Receiver { - let (tx, rx) = mpsc::channel(100); - self.event_sender = Some(tx); - rx - } - - pub(super) fn insert_chunk_paths( - &mut self, - chunks: impl IntoIterator, - ) { - self.all_upload_items - .extend(chunks.into_iter().map(|(xorname, path)| { - let item = UploadItem::Chunk { - address: ChunkAddress::new(xorname), - chunk: Either::Right(path), - }; - (xorname, item) - })); - } - - pub(super) fn insert_chunks(&mut self, chunks: impl IntoIterator) { - self.all_upload_items - .extend(chunks.into_iter().map(|chunk| { - let xorname = *chunk.name(); - let item = UploadItem::Chunk { - address: *chunk.address(), - chunk: Either::Left(chunk), - }; - (xorname, item) - })); - } - - pub(super) fn insert_register(&mut self, registers: impl IntoIterator) { - self.all_upload_items - .extend(registers.into_iter().map(|reg| { - let address = *reg.address(); - let item = UploadItem::Register { address, reg }; - (address.xorname(), item) - })); - } -} - -#[derive(Debug, Clone)] -enum UploadItem { - Chunk { - address: ChunkAddress, - // Either the actual chunk or the path to the chunk. - chunk: Either, - }, - Register { - address: RegisterAddress, - reg: ClientRegister, - }, -} - -impl UploadItem { - fn address(&self) -> NetworkAddress { - match self { - Self::Chunk { address, .. } => NetworkAddress::from_chunk_address(*address), - Self::Register { address, .. } => NetworkAddress::from_register_address(*address), - } - } - - fn xorname(&self) -> XorName { - match self { - UploadItem::Chunk { address, .. } => *address.xorname(), - UploadItem::Register { address, .. } => address.xorname(), - } - } -} - -#[derive(Debug)] -enum TaskResult { - GetRegisterFromNetworkOk { - remote_register: SignedRegister, - }, - GetRegisterFromNetworkErr(XorName), - PushRegisterOk { - updated_register: ClientRegister, - }, - PushRegisterErr(XorName), - GetStoreCostOk { - xorname: XorName, - quote: Box, - }, - GetStoreCostErr { - xorname: XorName, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_reached: bool, - }, - MakePaymentsOk { - paid_xornames: Vec, - storage_cost: NanoTokens, - royalty_fees: NanoTokens, - new_balance: NanoTokens, - }, - MakePaymentsErr { - failed_xornames: Vec<(XorName, Box)>, - insufficient_balance: Option<(NanoTokens, NanoTokens)>, - }, - UploadOk(XorName), - UploadErr { - xorname: XorName, - }, -} - -#[derive(Debug, Clone)] -enum GetStoreCostStrategy { - /// Selects the PeerId with the lowest quote - Cheapest, - /// Selects the cheapest PeerId that we have not made payment to. - SelectDifferentPayee, -} diff --git a/sn_client/src/uploader/tests/mod.rs b/sn_client/src/uploader/tests/mod.rs deleted file mode 100644 index 75916bbb97..0000000000 --- a/sn_client/src/uploader/tests/mod.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod setup; - -use crate::{ - uploader::tests::setup::{ - get_dummy_chunk_paths, get_dummy_registers, get_inner_uploader, start_uploading_with_steps, - TestSteps, - }, - Error as ClientError, UploadEvent, -}; -use assert_matches::assert_matches; -use eyre::Result; -use sn_logging::LogBuilder; -use std::collections::VecDeque; -use tempfile::tempdir; - -// ===== HAPPY PATH ======= - -/// 1. Chunk: if cost =0, then chunk is present in the network. -#[tokio::test] -async fn chunk_that_already_exists_in_the_network_should_return_zero_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![TestSteps::GetStoreCostOk { - trigger_zero_cost: true, - assert_select_different_payee: false, - }]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::ChunkAlreadyExistsInNetwork(_)); - Ok(()) -} - -/// 2. Chunk: if cost !=0, then make payment upload to the network. -#[tokio::test] -async fn chunk_should_be_paid_for_and_uploaded_if_cost_is_not_zero() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 3. Register: if GET register = ok, then merge and push the register. -#[tokio::test] -async fn register_should_be_merged_and_pushed_if_it_already_exists_in_the_network() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![TestSteps::GetRegisterOk, TestSteps::PushRegisterOk]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 1); - assert_matches!(events[0], UploadEvent::RegisterUpdated { .. }); - Ok(()) -} - -/// 4. Register: if Get register = err, then get store cost and upload. -#[tokio::test] -async fn register_should_be_paid_and_uploaded_if_it_does_not_exists() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - // todo: what if cost = 0 even after GetRegister returns error. check that - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== REPAYMENTS ====== - -/// 1. Chunks: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn chunks_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::ChunkUploaded(..)); - Ok(()) -} - -/// 2. Register: if upload task fails > threshold, then get store cost should be triggered with SelectDifferentStrategy -/// and then uploaded. -#[tokio::test] -async fn registers_should_perform_repayment_if_the_upload_fails_multiple_times() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemOk, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - let _stats = upload_handle.await??; - let events = events_handle.await?; - - assert_eq!(events.len(), 3); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - assert_matches!(events[2], UploadEvent::RegisterUploaded(..)); - Ok(()) -} - -// ===== ERRORS ======= -/// 1. Registers: Multiple PushRegisterErr should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_upload_should_error_out_if_there_are_multiple_push_failures() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterOk, - TestSteps::PushRegisterErr, - TestSteps::PushRegisterErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 2. Chunk: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 3. Register: Multiple errors during get store cost should result in Error::SequentialNetworkErrors -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_get_store_cost() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - TestSteps::GetStoreCostErr { - assert_select_different_payee: false, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialNetworkErrors) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 4. Chunk: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn chunk_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -/// 5. Register: Multiple errors during make payment should result in Error::SequentialUploadPaymentError -#[tokio::test] -async fn register_should_error_out_if_there_are_multiple_errors_during_make_payment() -> Result<()> -{ - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_register(get_dummy_registers(1, inner_uploader.client.clone())); - - // the path to test - let steps = vec![ - TestSteps::GetRegisterErr, - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentErr, - TestSteps::MakePaymentErr, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::SequentialUploadPaymentError) - ); - let events = events_handle.await?; - - // UploadEvent::Error is performed by the caller of start_upload, so we can't check that one here. - assert_eq!(events.len(), 0); - Ok(()) -} - -// 6: Chunks + Registers: if the number of repayments exceed a threshold, it should return MaximumRepaymentsReached error. -#[tokio::test] -async fn maximum_repayment_error_should_be_triggered_during_get_store_cost() -> Result<()> { - let _log_guards = LogBuilder::init_single_threaded_tokio_test("uploader", true); - let temp_dir = tempdir()?; - let (mut inner_uploader, task_result_rx) = get_inner_uploader(temp_dir.path().to_path_buf())?; - - // cfg - inner_uploader.set_batch_size(1); - inner_uploader.insert_chunk_paths(get_dummy_chunk_paths(1, temp_dir.path().to_path_buf())); - - // the path to test - let steps = vec![ - // initial payment done - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: false, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // first repayment - TestSteps::GetStoreCostOk { - trigger_zero_cost: false, - assert_select_different_payee: true, - }, - TestSteps::MakePaymentOk, - TestSteps::UploadItemErr, - TestSteps::UploadItemErr, - // thus after reaching max repayments, we should error out during get store cost. - TestSteps::GetStoreCostErr { - assert_select_different_payee: true, - }, - ]; - - let (upload_handle, events_handle) = - start_uploading_with_steps(inner_uploader, VecDeque::from(steps), task_result_rx); - - assert_matches!( - upload_handle.await?, - Err(ClientError::UploadFailedWithMaximumRepaymentsReached { .. }) - ); - let events = events_handle.await?; - - assert_eq!(events.len(), 2); - assert_matches!(events[0], UploadEvent::PaymentMade { .. }); - assert_matches!(events[1], UploadEvent::PaymentMade { .. }); - Ok(()) -} diff --git a/sn_client/src/uploader/tests/setup.rs b/sn_client/src/uploader/tests/setup.rs deleted file mode 100644 index 59f9005c4a..0000000000 --- a/sn_client/src/uploader/tests/setup.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::{ - uploader::{ - upload::{start_upload, InnerUploader}, - GetStoreCostStrategy, TaskResult, UploadItem, UploaderInterface, - }, - ClientRegister, UploadEvent, -}; -use crate::{Client, Result as ClientResult, UploadSummary}; -use assert_matches::assert_matches; -use bls::SecretKey; -use eyre::Result; -use libp2p::PeerId; -use libp2p_identity::Keypair; -use rand::thread_rng; -use sn_networking::{NetworkBuilder, PayeeQuote}; -use sn_protocol::{storage::RetryStrategy, NetworkAddress}; -use sn_registers::{Permissions, RegisterAddress, SignedRegister}; -use sn_transfers::{MainSecretKey, NanoTokens, PaymentQuote, WalletApi}; -use std::{ - collections::{BTreeMap, VecDeque}, - path::PathBuf, - sync::Arc, -}; -use tokio::{runtime::Handle, sync::mpsc, task::JoinHandle}; -use xor_name::XorName; - -struct TestUploader { - inner: Option, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, - - // test states - make_payment_collector: Vec<(XorName, Box)>, - payments_made_per_xorname: BTreeMap, - batch_size: usize, -} - -impl UploaderInterface for TestUploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner.take().unwrap() - } - - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - _task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::GetRegisterOk => { - handle.spawn(async move { - let remote_register = - SignedRegister::test_new_from_address(reg_addr, client.signer()); - task_result_sender - .send(TaskResult::GetRegisterFromNetworkOk { remote_register }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetRegisterFromNetworkErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetRegister step. Got: {con:?}"), - } - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - _verify_store: bool, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a PushRegister step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_push_register called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::PushRegisterOk => { - handle.spawn(async move { - let updated_register = match upload_item { - UploadItem::Register { reg, .. } => reg, - _ => panic!("Expected UploadItem::Register"), - }; - task_result_sender - .send(TaskResult::PushRegisterOk { - // this register is just used for returning. - updated_register, - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::PushRegisterErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::PushRegisterErr(xorname)) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected PushRegister step. Got: {con:?}"), - } - } - - fn submit_get_store_cost_task( - &mut self, - _client: Client, - _wallet_api: WalletApi, - xorname: XorName, - _address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - _task_result_sender: mpsc::Sender, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a GetStoreCost step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_get_store_cost called for: {xorname:?}. Step to execute: {step:?}"); - - let has_max_payments_reached_closure = - |get_store_cost_strategy: &GetStoreCostStrategy| -> bool { - match get_store_cost_strategy { - GetStoreCostStrategy::SelectDifferentPayee => { - if let Some(n_payments) = self.payments_made_per_xorname.get(&xorname) { - InnerUploader::have_we_reached_max_repayments( - *n_payments, - max_repayments_for_failed_data, - ) - } else { - false - } - } - _ => false, - } - }; - - // if select different payee, then it can possibly error out if max_repayments have been reached. - // then the step should've been a GetStoreCostErr. - if has_max_payments_reached_closure(&get_store_cost_strategy) { - assert_matches!(step, TestSteps::GetStoreCostErr { .. }, "Max repayments have been reached, so we expect a GetStoreCostErr, not GetStoreCostOk"); - } - - match step { - TestSteps::GetStoreCostOk { - trigger_zero_cost, - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - - let mut quote = PaymentQuote::zero(); - if !trigger_zero_cost { - quote.cost = NanoTokens::from(10); - } - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(( - PeerId::random(), - MainSecretKey::random().main_pubkey(), - quote, - )), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::GetStoreCostErr { - assert_select_different_payee, - } => { - // Make sure that the received strategy is the one defined in the step. - assert!(match get_store_cost_strategy { - // match here to not miss out on any new strategies. - GetStoreCostStrategy::Cheapest => !assert_select_different_payee, - GetStoreCostStrategy::SelectDifferentPayee { .. } => - assert_select_different_payee, - }); - let max_repayments_reached = - has_max_payments_reached_closure(&get_store_cost_strategy); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected GetStoreCost step. Got: {con:?}"), - } - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - _make_payment_sender: mpsc::Sender)>>, - ) { - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a MakePayment step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - match &to_send { - Some((upload_item, quote)) => { - let xorname = upload_item.xorname(); - println!("spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}"); - info!( - "TEST: spawn_make_payment called for: {xorname:?}. Step to execute: {step:?}" - ); - - self.make_payment_collector - .push((upload_item.xorname(), quote.clone())); - } - None => { - println!( - "spawn_make_payment called with force make payment. Step to execute: {step:?}" - ); - info!("TEST: spawn_make_payment called with force make payment. Step to execute: {step:?}"); - } - } - - // gotta collect batch size before sending task result. - let _make_payment = self.make_payment_collector.len() >= self.batch_size - || (to_send.is_none() && !self.make_payment_collector.is_empty()); - - match step { - // TestSteps::MakePaymentJustCollectItem => { - // // The test expected for us to just collect item, but if the logic wants us to make payment, then it as - // // error - // assert!(!make_payment); - // } - TestSteps::MakePaymentOk => { - let paid_xornames = std::mem::take(&mut self.make_payment_collector) - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - // track the payments per xorname - for xorname in paid_xornames.iter() { - let entry = self.payments_made_per_xorname.entry(*xorname).or_insert(0); - *entry += 1; - } - let batch_size = self.batch_size; - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost: NanoTokens::from(batch_size as u64 * 10), - royalty_fees: NanoTokens::from(batch_size as u64 * 3), - new_balance: NanoTokens::from(batch_size as u64 * 1000), - }) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::MakePaymentErr => { - let failed_xornames = std::mem::take(&mut self.make_payment_collector); - - handle.spawn(async move { - task_result_sender - .send(TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected MakePayment step. Got: {con:?}"), - } - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - _client: Client, - _wallet_api: WalletApi, - _verify_store: bool, - _retry_strategy: RetryStrategy, - _task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - let step = self - .test_steps - .pop_front() - .expect("TestSteps are empty. Expected a UploadItem step."); - let handle = Handle::current(); - let task_result_sender = self.task_result_sender.clone(); - - println!("spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - info!("TEST: spawn_upload_item called for: {xorname:?}. Step to execute: {step:?}"); - match step { - TestSteps::UploadItemOk => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadOk(xorname)) - .await - .expect("Failed to send task result"); - }); - } - TestSteps::UploadItemErr => { - handle.spawn(async move { - task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await - .expect("Failed to send task result"); - }); - } - con => panic!("Test failed: Expected UploadItem step. Got: {con:?}"), - } - } -} - -#[derive(Debug, Clone)] -pub enum TestSteps { - GetRegisterOk, - GetRegisterErr, - PushRegisterOk, - PushRegisterErr, - GetStoreCostOk { - trigger_zero_cost: bool, - assert_select_different_payee: bool, - }, - GetStoreCostErr { - assert_select_different_payee: bool, - }, - // MakePaymentJustCollectItem, - MakePaymentOk, - MakePaymentErr, - UploadItemOk, - UploadItemErr, -} - -pub fn get_inner_uploader(root_dir: PathBuf) -> Result<(InnerUploader, mpsc::Sender)> { - let client = build_unconnected_client(root_dir.clone())?; - - let mut inner = InnerUploader::new(client, root_dir); - let (task_result_sender, task_result_receiver) = mpsc::channel(100); - inner.testing_task_channels = Some((task_result_sender.clone(), task_result_receiver)); - - Ok((inner, task_result_sender)) -} - -// Spawns two tasks. One is the actual upload task that will return an UploadStat when completed. -// The other is a one to collect all the UploadEvent emitted by the previous task. -pub fn start_uploading_with_steps( - mut inner_uploader: InnerUploader, - test_steps: VecDeque, - task_result_sender: mpsc::Sender, -) -> ( - JoinHandle>, - JoinHandle>, -) { - let batch_size = inner_uploader.cfg.batch_size; - let mut upload_event_rx = inner_uploader.get_event_receiver(); - - let upload_handle = tokio::spawn(start_upload(Box::new(TestUploader { - inner: Some(inner_uploader), - test_steps, - task_result_sender, - make_payment_collector: Default::default(), - payments_made_per_xorname: Default::default(), - batch_size, - }))); - - let event_handle = tokio::spawn(async move { - let mut events = vec![]; - while let Some(event) = upload_event_rx.recv().await { - events.push(event); - } - events - }); - - (upload_handle, event_handle) -} - -// Collect all the upload events into a list - -// Build a very simple client struct for testing. This does not connect to any network. -// The UploaderInterface eliminates the need for direct networking in tests. -pub fn build_unconnected_client(root_dir: PathBuf) -> Result { - let network_builder = NetworkBuilder::new(Keypair::generate_ed25519(), true, root_dir); - let (network, ..) = network_builder.build_client()?; - let client = Client { - network, - events_broadcaster: Default::default(), - signer: Arc::new(SecretKey::random()), - }; - Ok(client) -} - -// We don't perform any networking, so the paths can be dummy ones. -pub fn get_dummy_chunk_paths(num: usize, temp_dir: PathBuf) -> Vec<(XorName, PathBuf)> { - let mut rng = thread_rng(); - let mut chunks = Vec::with_capacity(num); - for _ in 0..num { - chunks.push((XorName::random(&mut rng), temp_dir.clone())); - } - chunks -} - -pub fn get_dummy_registers(num: usize, client: Client) -> Vec { - let mut rng = thread_rng(); - let mut registers = Vec::with_capacity(num); - for _ in 0..num { - // test_new_from_address that is used during get_register, - // uses AnyoneCanWrite permission, so use the same here - let client_reg = ClientRegister::create_register( - client.clone(), - XorName::random(&mut rng), - Permissions::AnyoneCanWrite, - ); - - registers.push(client_reg); - } - registers -} diff --git a/sn_client/src/uploader/upload.rs b/sn_client/src/uploader/upload.rs deleted file mode 100644 index 857c9fc31c..0000000000 --- a/sn_client/src/uploader/upload.rs +++ /dev/null @@ -1,1084 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use super::{ - GetStoreCostStrategy, TaskResult, UploadCfg, UploadEvent, UploadItem, UploadSummary, - UploaderInterface, -}; -use crate::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, - transfers::{TransferError, WalletError}, - Client, ClientRegister, Error as ClientError, Result, Uploader, WalletClient, -}; -use bytes::Bytes; -use itertools::Either; -use libp2p::PeerId; -use sn_networking::PayeeQuote; -use sn_protocol::{ - storage::{Chunk, RetryStrategy}, - NetworkAddress, -}; -use sn_registers::{RegisterAddress, SignedRegister}; -use sn_transfers::{NanoTokens, WalletApi}; -use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - path::{Path, PathBuf}, -}; -use tiny_keccak::{Hasher, Sha3}; -use tokio::sync::mpsc; -use xor_name::XorName; - -/// The number of repayments to attempt for a failed item before returning an error. -/// If value = 1, we do an initial payment & 1 repayment. Thus we make a max 2 payments per data item. -#[cfg(not(test))] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 3; -#[cfg(test)] -pub(super) const MAX_REPAYMENTS_PER_FAILED_ITEM: usize = 1; - -/// The maximum number of sequential payment failures before aborting the upload process. -#[cfg(not(test))] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 3; -#[cfg(test)] -const MAX_SEQUENTIAL_PAYMENT_FAILS: usize = 1; - -/// The maximum number of sequential network failures before aborting the upload process. -// todo: use uploader.retry_strategy.get_count() instead. -#[cfg(not(test))] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 32; -#[cfg(test)] -const MAX_SEQUENTIAL_NETWORK_ERRORS: usize = 1; - -/// The number of upload failures for a single data item before -#[cfg(not(test))] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 3; -#[cfg(test)] -const UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE: usize = 1; - -// TODO: -// 1. log whenever we insert/remove items. i.e., don't ignore values with `let _` - -/// The main loop that performs the upload process. -/// An interface is passed here for easy testing. -pub(super) async fn start_upload( - mut interface: Box, -) -> Result { - let mut uploader = interface.take_inner_uploader(); - // Take out the testing task senders if any. This is only set for tests. - let (task_result_sender, mut task_result_receiver) = - if let Some(channels) = uploader.testing_task_channels.take() { - channels - } else { - // 6 because of the 6 pipelines, 1 for redundancy. - mpsc::channel(uploader.cfg.batch_size * 6 + 1) - }; - let (make_payment_sender, make_payment_receiver) = mpsc::channel(uploader.cfg.batch_size); - - uploader.start_make_payment_processing_loop( - make_payment_receiver, - task_result_sender.clone(), - uploader.cfg.batch_size, - )?; - - // chunks can be pushed to pending_get_store_cost directly - uploader.pending_to_get_store_cost = uploader - .all_upload_items - .iter() - .filter_map(|(xorname, item)| { - if let UploadItem::Chunk { .. } = item { - Some((*xorname, GetStoreCostStrategy::Cheapest)) - } else { - None - } - }) - .collect(); - - // registers have to be verified + merged with remote replica, so we have to fetch it first. - uploader.pending_to_get_register = uploader - .all_upload_items - .iter() - .filter_map(|(_xorname, item)| { - if let UploadItem::Register { address, .. } = item { - Some(*address) - } else { - None - } - }) - .collect(); - - loop { - // Break if we have uploaded all the items. - // The loop also breaks if we fail to get_store_cost / make payment / upload for n consecutive times. - if uploader.all_upload_items.is_empty() { - debug!("Upload items are empty, exiting main upload loop."); - // To avoid empty final_balance when all items are skipped. - uploader.upload_final_balance = - InnerUploader::load_wallet_client(uploader.client.clone(), &uploader.root_dir)? - .balance(); - #[cfg(test)] - trace!("UPLOADER STATE: finished uploading all items {uploader:?}"); - let summary = UploadSummary { - storage_cost: uploader.upload_storage_cost, - royalty_fees: uploader.upload_royalty_fees, - final_balance: uploader.upload_final_balance, - uploaded_addresses: uploader.uploaded_addresses, - uploaded_count: uploader.uploaded_count, - skipped_count: uploader.skipped_count, - uploaded_registers: uploader.uploaded_registers, - }; - - if !uploader.max_repayments_reached.is_empty() { - error!( - "The maximum repayments were reached for these addresses: {:?}", - uploader.max_repayments_reached - ); - return Err(ClientError::UploadFailedWithMaximumRepaymentsReached { - items: uploader.max_repayments_reached.into_iter().collect(), - summary, - }); - } - - return Ok(summary); - } - - // try to GET register if we have enough buffer. - // The results of the get & push register steps are used to fill up `pending_to_get_store` cost - // Since the get store cost list is the init state, we don't have to check if it is not full. - while !uploader.pending_to_get_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - if let Some(reg_addr) = uploader.pending_to_get_register.pop() { - trace!("Conditions met for GET registers {:?}", reg_addr.xorname()); - let _ = uploader.on_going_get_register.insert(reg_addr.xorname()); - interface.submit_get_register_task( - uploader.client.clone(), - reg_addr, - task_result_sender.clone(), - ); - } - } - - // try to push register if we have enough buffer. - // No other checks for the same reason as the above step. - while !uploader.pending_to_push_register.is_empty() - && uploader.on_going_get_register.len() < uploader.cfg.batch_size - { - let upload_item = uploader.pop_item_for_push_register()?; - trace!( - "Conditions met for push registers {:?}", - upload_item.xorname() - ); - let _ = uploader - .on_going_push_register - .insert(upload_item.xorname()); - interface.submit_push_register_task( - upload_item, - uploader.cfg.verify_store, - task_result_sender.clone(), - ); - } - - // try to get store cost for an item if pending_to_pay needs items & if we have enough buffer. - while !uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.len() < uploader.cfg.batch_size - && uploader.pending_to_pay.len() < uploader.cfg.batch_size - { - let (xorname, address, get_store_cost_strategy) = - uploader.pop_item_for_get_store_cost()?; - trace!("Conditions met for get store cost. {xorname:?} {get_store_cost_strategy:?}",); - - let _ = uploader.on_going_get_cost.insert(xorname); - interface.submit_get_store_cost_task( - uploader.client.clone(), - uploader.wallet_api.clone(), - xorname, - address, - get_store_cost_strategy, - uploader.cfg.max_repayments_for_failed_data, - task_result_sender.clone(), - ); - } - - // try to make payment for an item if pending_to_upload needs items & if we have enough buffer. - while !uploader.pending_to_pay.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - && uploader.pending_to_upload.len() < uploader.cfg.batch_size - { - let (upload_item, quote) = uploader.pop_item_for_make_payment()?; - trace!( - "Conditions met for making payments. {:?} {quote:?}", - upload_item.xorname() - ); - let _ = uploader.on_going_payments.insert(upload_item.xorname()); - - interface - .submit_make_payment_task(Some((upload_item, quote)), make_payment_sender.clone()); - } - - // try to upload if we have enough buffer to upload. - while !uploader.pending_to_upload.is_empty() - && uploader.on_going_uploads.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: upload_item : {uploader:?}"); - let upload_item = uploader.pop_item_for_upload_item()?; - - trace!("Conditions met for uploading. {:?}", upload_item.xorname()); - let _ = uploader.on_going_uploads.insert(upload_item.xorname()); - interface.submit_upload_item_task( - upload_item, - uploader.client.clone(), - uploader.wallet_api.clone(), - uploader.cfg.verify_store, - uploader.cfg.retry_strategy, - task_result_sender.clone(), - ); - } - - // Fire None to trigger a forced round of making leftover payments, if there are not enough store cost tasks - // to fill up the buffer. - if uploader.pending_to_get_store_cost.is_empty() - && uploader.on_going_get_cost.is_empty() - && !uploader.on_going_payments.is_empty() - && uploader.on_going_payments.len() < uploader.cfg.batch_size - { - #[cfg(test)] - trace!("UPLOADER STATE: make_payment (forced): {uploader:?}"); - - debug!("There are not enough on going payments to trigger a batch Payment and no get_store_costs to fill the batch. Triggering forced round of payment"); - interface.submit_make_payment_task(None, make_payment_sender.clone()); - } - - #[cfg(test)] - trace!("UPLOADER STATE: before await task result: {uploader:?}"); - - trace!("Fetching task result"); - let task_result = task_result_receiver - .recv() - .await - .ok_or(ClientError::InternalTaskChannelDropped)?; - trace!("Received task result: {task_result:?}"); - match task_result { - TaskResult::GetRegisterFromNetworkOk { remote_register } => { - // if we got back the register, then merge & PUT it. - let xorname = remote_register.address().xorname(); - trace!("TaskResult::GetRegisterFromNetworkOk for remote register: {xorname:?} \n{remote_register:?}"); - let _ = uploader.on_going_get_register.remove(&xorname); - - let reg = uploader - .all_upload_items - .get_mut(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - if let UploadItem::Register { reg, .. } = reg { - reg.merge(&remote_register); - uploader.pending_to_push_register.push(xorname); - } - } - TaskResult::GetRegisterFromNetworkErr(xorname) => { - // then the register is a new one. It can follow the same flow as chunks now. - let _ = uploader.on_going_get_register.remove(&xorname); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::Cheapest)); - } - TaskResult::PushRegisterOk { updated_register } => { - // push modifies the register, so we return this instead of the one from all_upload_items - let xorname = updated_register.address().xorname(); - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.skipped_count += 1; - let _ = uploader - .uploaded_addresses - .insert(NetworkAddress::from_register_address( - *updated_register.address(), - )); - - let _old_register = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*updated_register.address(), updated_register.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(updated_register)); - } - TaskResult::PushRegisterErr(xorname) => { - // the register failed to be Pushed. Retry until failure. - let _ = uploader.on_going_push_register.remove(&xorname); - uploader.pending_to_push_register.push(xorname); - - uploader.push_register_errors += 1; - if uploader.push_register_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during PushRegisterErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::GetStoreCostOk { xorname, quote } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - uploader.get_store_cost_errors = 0; // reset error if Ok. We only throw error after 'n' sequential errors - - trace!("GetStoreCostOk for {xorname:?}'s store_cost {:?}", quote.2); - - if quote.2.cost != NanoTokens::zero() { - uploader.pending_to_pay.push((xorname, quote)); - } - // if cost is 0, then it already in the network. - else { - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - trace!("{xorname:?} has store cost of 0 and it already exists on the network"); - uploader.skipped_count += 1; - - // if during the first try we skip the item, then it is already present in the network. - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkAlreadyExistsInNetwork( - address, - )); - } - - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUpdated(reg)); - } - } - } - } - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } => { - let _ = uploader.on_going_get_cost.remove(&xorname); - trace!("GetStoreCostErr for {xorname:?} , get_store_cost_strategy: {get_store_cost_strategy:?}, max_repayments_reached: {max_repayments_reached:?}"); - - // If max repayments reached, track it separately. Else retry get_store_cost. - if max_repayments_reached { - error!("Max repayments reached for {xorname:?}. Skipping upload for it"); - uploader.max_repayments_reached.insert(xorname); - uploader.all_upload_items.remove(&xorname); - } else { - // use the same strategy. The repay different payee is set only if upload fails. - uploader - .pending_to_get_store_cost - .push((xorname, get_store_cost_strategy.clone())); - } - uploader.get_store_cost_errors += 1; - if uploader.get_store_cost_errors > MAX_SEQUENTIAL_NETWORK_ERRORS { - error!("Max sequential network failures reached during GetStoreCostErr."); - return Err(ClientError::SequentialNetworkErrors); - } - } - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance, - } => { - trace!("MakePaymentsOk for {} items: hash({:?}), with {storage_cost:?} store_cost and {royalty_fees:?} royalty_fees, and new_balance is {new_balance:?}", - paid_xornames.len(), InnerUploader::hash_of_xornames(paid_xornames.iter())); - for xorname in paid_xornames.iter() { - let _ = uploader.on_going_payments.remove(xorname); - } - uploader.pending_to_upload.extend(paid_xornames); - uploader.make_payments_errors = 0; - uploader.upload_final_balance = new_balance; - uploader.upload_storage_cost = uploader - .upload_storage_cost - .checked_add(storage_cost) - .ok_or(ClientError::TotalPriceTooHigh)?; - uploader.upload_royalty_fees = uploader - .upload_royalty_fees - .checked_add(royalty_fees) - .ok_or(ClientError::TotalPriceTooHigh)?; - - // reset sequential payment fail error if ok. We throw error if payment fails continuously more than - // MAX_SEQUENTIAL_PAYMENT_FAILS errors. - uploader.emit_upload_event(UploadEvent::PaymentMade { - storage_cost, - royalty_fees, - new_balance, - }); - } - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance, - } => { - trace!( - "MakePaymentsErr for {:?} items: hash({:?})", - failed_xornames.len(), - InnerUploader::hash_of_xornames(failed_xornames.iter().map(|(name, _)| name)) - ); - if let Some((available, required)) = insufficient_balance { - error!("Wallet does not have enough funds. This error is not recoverable"); - return Err(ClientError::Wallet(WalletError::Transfer( - TransferError::NotEnoughBalance(available, required), - ))); - } - - for (xorname, quote) in failed_xornames { - let _ = uploader.on_going_payments.remove(&xorname); - uploader.pending_to_pay.push((xorname, quote)); - } - uploader.make_payments_errors += 1; - - if uploader.make_payments_errors >= MAX_SEQUENTIAL_PAYMENT_FAILS { - error!("Max sequential upload failures reached during MakePaymentsErr."); - // Too many sequential overall payment failure indicating - // unrecoverable failure of spend tx continuously rejected by network. - // The entire upload process shall be terminated. - return Err(ClientError::SequentialUploadPaymentError); - } - } - TaskResult::UploadOk(xorname) => { - let _ = uploader.on_going_uploads.remove(&xorname); - uploader.uploaded_count += 1; - trace!("UploadOk for {xorname:?}"); - // remove the item since we have uploaded it. - let removed_item = uploader - .all_upload_items - .remove(&xorname) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - let _ = uploader.uploaded_addresses.insert(removed_item.address()); - - match removed_item { - UploadItem::Chunk { address, .. } => { - uploader.emit_upload_event(UploadEvent::ChunkUploaded(address)); - } - UploadItem::Register { reg, .. } => { - if uploader.cfg.collect_registers { - let _ = uploader - .uploaded_registers - .insert(*reg.address(), reg.clone()); - } - uploader.emit_upload_event(UploadEvent::RegisterUploaded(reg)); - } - } - } - TaskResult::UploadErr { xorname } => { - let _ = uploader.on_going_uploads.remove(&xorname); - trace!("UploadErr for {xorname:?}"); - - // keep track of the failure - let n_errors = uploader.n_errors_during_uploads.entry(xorname).or_insert(0); - *n_errors += 1; - - // if quote has expired, don't retry the upload again. Instead get the cheapest quote again. - if *n_errors > UPLOAD_FAILURES_BEFORE_SELECTING_DIFFERENT_PAYEE { - // if error > threshold, then select different payee. else retry again - // Also reset n_errors as we want to enable retries for the new payee. - *n_errors = 0; - debug!("Max error during upload reached for {xorname:?}. Selecting a different payee."); - - uploader - .pending_to_get_store_cost - .push((xorname, GetStoreCostStrategy::SelectDifferentPayee)); - } else { - uploader.pending_to_upload.push(xorname); - } - } - } - } -} - -impl UploaderInterface for Uploader { - fn take_inner_uploader(&mut self) -> InnerUploader { - self.inner - .take() - .expect("Uploader::new makes sure inner is present") - } - - fn submit_get_store_cost_task( - &mut self, - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning get_store_cost for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_store_cost( - client, - wallet_api, - xorname, - address, - get_store_cost_strategy.clone(), - max_repayments_for_failed_data, - ) - .await - { - Ok(quote) => { - debug!("StoreCosts retrieved for {xorname:?} quote: {quote:?}"); - TaskResult::GetStoreCostOk { - xorname, - quote: Box::new(quote), - } - } - Err(err) => { - error!("Encountered error {err:?} when getting store_cost for {xorname:?}",); - - let max_repayments_reached = - matches!(&err, ClientError::MaximumRepaymentsReached(_)); - - TaskResult::GetStoreCostErr { - xorname, - get_store_cost_strategy, - max_repayments_reached, - } - } - }; - - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_get_register_task( - &mut self, - client: Client, - reg_addr: RegisterAddress, - task_result_sender: mpsc::Sender, - ) { - let xorname = reg_addr.xorname(); - trace!("Spawning get_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::get_register(client, reg_addr).await { - Ok(register) => { - debug!("Register retrieved for {xorname:?}"); - TaskResult::GetRegisterFromNetworkOk { - remote_register: register, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - warn!("Encountered error {err:?} during get_register. The register has to be PUT as it is a new one."); - TaskResult::GetRegisterFromNetworkErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_push_register_task( - &mut self, - upload_item: UploadItem, - verify_store: bool, - task_result_sender: mpsc::Sender, - ) { - let xorname = upload_item.xorname(); - trace!("Spawning push_register for {xorname:?}"); - let _handle = tokio::spawn(async move { - let task_result = match InnerUploader::push_register(upload_item, verify_store).await { - Ok(reg) => { - debug!("Register pushed: {xorname:?}"); - TaskResult::PushRegisterOk { - updated_register: reg, - } - } - Err(err) => { - // todo match on error to only skip if GetRecordError - error!("Encountered error {err:?} during push_register. The register might not be present in the network"); - TaskResult::PushRegisterErr(xorname) - } - }; - let _ = task_result_sender.send(task_result).await; - }); - } - - fn submit_make_payment_task( - &mut self, - to_send: Option<(UploadItem, Box)>, - make_payment_sender: mpsc::Sender)>>, - ) { - let _handle = tokio::spawn(async move { - let _ = make_payment_sender.send(to_send).await; - }); - } - - fn submit_upload_item_task( - &mut self, - upload_item: UploadItem, - client: Client, - wallet_api: WalletApi, - verify_store: bool, - retry_strategy: RetryStrategy, - task_result_sender: mpsc::Sender, - ) { - trace!("Spawning upload item task for {:?}", upload_item.xorname()); - - let _handle = tokio::spawn(async move { - let xorname = upload_item.xorname(); - let result = InnerUploader::upload_item( - client, - wallet_api, - upload_item, - verify_store, - retry_strategy, - ) - .await; - - trace!("Upload item {xorname:?} uploaded with result {result:?}"); - match result { - Ok(_) => { - let _ = task_result_sender.send(TaskResult::UploadOk(xorname)).await; - } - Err(_) => { - let _ = task_result_sender - .send(TaskResult::UploadErr { xorname }) - .await; - } - }; - }); - } -} - -/// `Uploader` provides functionality for uploading both Chunks and Registers with support for retries and queuing. -/// This struct is not cloneable. To create a new instance with default configuration, use the `new` function. -/// To modify the configuration, use the provided setter methods (`set_...` functions). -#[derive(custom_debug::Debug)] -pub(super) struct InnerUploader { - pub(super) cfg: UploadCfg, - #[debug(skip)] - pub(super) client: Client, - #[debug(skip)] - pub(super) wallet_api: WalletApi, - pub(super) root_dir: PathBuf, - - // states - pub(super) all_upload_items: HashMap, - pub(super) pending_to_get_register: Vec, - pub(super) pending_to_push_register: Vec, - pub(super) pending_to_get_store_cost: Vec<(XorName, GetStoreCostStrategy)>, - pub(super) pending_to_pay: Vec<(XorName, Box)>, - pub(super) pending_to_upload: Vec, - - // trackers - pub(super) on_going_get_register: BTreeSet, - pub(super) on_going_push_register: BTreeSet, - pub(super) on_going_get_cost: BTreeSet, - pub(super) on_going_payments: BTreeSet, - pub(super) on_going_uploads: BTreeSet, - - // error trackers - pub(super) n_errors_during_uploads: BTreeMap, - pub(super) push_register_errors: usize, - pub(super) get_store_cost_errors: usize, - pub(super) make_payments_errors: usize, - - // Upload summary - pub(super) upload_storage_cost: NanoTokens, - pub(super) upload_royalty_fees: NanoTokens, - pub(super) upload_final_balance: NanoTokens, - pub(super) max_repayments_reached: BTreeSet, - pub(super) uploaded_addresses: BTreeSet, - pub(super) uploaded_registers: BTreeMap, - pub(super) uploaded_count: usize, - pub(super) skipped_count: usize, - - // Task channels for testing. Not used in actual code. - pub(super) testing_task_channels: - Option<(mpsc::Sender, mpsc::Receiver)>, - - // Public events events - #[debug(skip)] - pub(super) logged_event_sender_absence: bool, - #[debug(skip)] - pub(super) event_sender: Option>, -} - -impl InnerUploader { - pub(super) fn new(client: Client, root_dir: PathBuf) -> Self { - Self { - cfg: Default::default(), - client, - wallet_api: WalletApi::new_from_root_dir(&root_dir), - root_dir, - - all_upload_items: Default::default(), - pending_to_get_register: Default::default(), - pending_to_push_register: Default::default(), - pending_to_get_store_cost: Default::default(), - pending_to_pay: Default::default(), - pending_to_upload: Default::default(), - - on_going_get_register: Default::default(), - on_going_push_register: Default::default(), - on_going_get_cost: Default::default(), - on_going_payments: Default::default(), - on_going_uploads: Default::default(), - - n_errors_during_uploads: Default::default(), - push_register_errors: Default::default(), - get_store_cost_errors: Default::default(), - max_repayments_reached: Default::default(), - make_payments_errors: Default::default(), - - upload_storage_cost: NanoTokens::zero(), - upload_royalty_fees: NanoTokens::zero(), - upload_final_balance: NanoTokens::zero(), - uploaded_addresses: Default::default(), - uploaded_registers: Default::default(), - uploaded_count: Default::default(), - skipped_count: Default::default(), - - testing_task_channels: None, - logged_event_sender_absence: Default::default(), - event_sender: Default::default(), - } - } - - // ====== Pop items ====== - - fn pop_item_for_push_register(&mut self) -> Result { - if let Some(name) = self.pending_to_push_register.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_get_store_cost( - &mut self, - ) -> Result<(XorName, NetworkAddress, GetStoreCostStrategy)> { - let (xorname, strategy) = self - .pending_to_get_store_cost - .pop() - .ok_or(ClientError::UploadStateTrackerIsEmpty)?; - let address = self - .all_upload_items - .get(&xorname) - .map(|item| item.address()) - .ok_or(ClientError::UploadableItemNotFound(xorname))?; - Ok((xorname, address, strategy)) - } - - fn pop_item_for_make_payment(&mut self) -> Result<(UploadItem, Box)> { - if let Some((name, quote)) = self.pending_to_pay.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok((upload_item, quote)) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - fn pop_item_for_upload_item(&mut self) -> Result { - if let Some(name) = self.pending_to_upload.pop() { - let upload_item = self - .all_upload_items - .get(&name) - .cloned() - .ok_or(ClientError::UploadableItemNotFound(name))?; - Ok(upload_item) - } else { - // the caller will be making sure this does not happen. - Err(ClientError::UploadStateTrackerIsEmpty) - } - } - - // ====== Processing Loop ====== - - // This is spawned as a long running task to prevent us from reading the wallet files - // each time we have to make a payment. - fn start_make_payment_processing_loop( - &self, - mut make_payment_receiver: mpsc::Receiver)>>, - task_result_sender: mpsc::Sender, - batch_size: usize, - ) -> Result<()> { - let mut wallet_client = Self::load_wallet_client(self.client.clone(), &self.root_dir)?; - - let verify_store = self.cfg.verify_store; - let _handle = tokio::spawn(async move { - debug!("Spawning the long running make payment processing loop."); - - let mut cost_map = BTreeMap::new(); - let mut current_batch = vec![]; - - let mut got_a_previous_force_payment = false; - while let Some(payment) = make_payment_receiver.recv().await { - let make_payments = if let Some((item, quote)) = payment { - let xorname = item.xorname(); - trace!("Inserted {xorname:?} into cost_map"); - - current_batch.push((xorname, quote.clone())); - let _ = cost_map.insert(xorname, (quote.1, quote.2, quote.0.to_bytes())); - cost_map.len() >= batch_size || got_a_previous_force_payment - } else { - // using None to indicate as all paid. - let make_payments = !cost_map.is_empty(); - trace!("Got a forced forced round of make payment."); - // Note: There can be a mismatch of ordering between the main loop and the make payment loop because - // the instructions are sent via a task(channel.send().await). And there is no guarantee for the - // order to come in the same order as they were sent. - // - // We cannot just disobey the instruction inside the child loop, as the mainloop would be expecting - // a result back for a particular instruction. - if !make_payments { - got_a_previous_force_payment = true; - warn!( - "We were told to force make payment, but cost_map is empty, so we can't do that just yet. Waiting for a task to insert a quote into cost_map" - ) - } - - make_payments - }; - - if make_payments { - // reset force_make_payment - if got_a_previous_force_payment { - info!("A task inserted a quote into cost_map, so we can now make a forced round of payment!"); - got_a_previous_force_payment = false; - } - - let _ = wallet_client - .resend_pending_transaction_blocking_loop() - .await; - - let mut terminate_process = false; - - let result = match wallet_client.pay_for_records(&cost_map, verify_store).await - { - Ok((storage_cost, royalty_fees)) => { - let paid_xornames = std::mem::take(&mut current_batch); - let paid_xornames = paid_xornames - .into_iter() - .map(|(xorname, _)| xorname) - .collect::>(); - trace!( - "Made payments for {} records: hash({:?})", - cost_map.len(), - Self::hash_of_xornames(paid_xornames.iter()) - ); - TaskResult::MakePaymentsOk { - paid_xornames, - storage_cost, - royalty_fees, - new_balance: wallet_client.balance(), - } - } - Err(err) => { - let failed_xornames = std::mem::take(&mut current_batch); - error!( - "When paying {} data: hash({:?}) got error {err:?}", - failed_xornames.len(), - Self::hash_of_xornames( - failed_xornames.iter().map(|(name, _)| name) - ) - ); - match err { - WalletError::Transfer(TransferError::NotEnoughBalance( - available, - required, - )) => { - terminate_process = true; - TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: Some((available, required)), - } - } - _ => TaskResult::MakePaymentsErr { - failed_xornames, - insufficient_balance: None, - }, - } - } - }; - let pay_for_chunk_sender_clone = task_result_sender.clone(); - let _handle = tokio::spawn(async move { - let _ = pay_for_chunk_sender_clone.send(result).await; - }); - - cost_map = BTreeMap::new(); - - if terminate_process { - // The error will trigger the entire upload process to be terminated. - // Hence here we shall terminate the inner loop first, - // to avoid the wallet going furhter to be potentially got corrupted. - warn!( - "Terminating make payment processing loop due to un-recoverable error." - ); - break; - } - } - } - debug!("Make payment processing loop terminated."); - }); - Ok(()) - } - - // ====== Logic ====== - - async fn get_register(client: Client, reg_addr: RegisterAddress) -> Result { - client.verify_register_stored(reg_addr).await - } - - async fn push_register(upload_item: UploadItem, verify_store: bool) -> Result { - let mut reg = if let UploadItem::Register { reg, .. } = upload_item { - reg - } else { - return Err(ClientError::InvalidUploadItemFound); - }; - reg.push(verify_store).await?; - Ok(reg) - } - - async fn get_store_cost( - client: Client, - wallet_api: WalletApi, - xorname: XorName, - address: NetworkAddress, - get_store_cost_strategy: GetStoreCostStrategy, - max_repayments_for_failed_data: usize, - ) -> Result { - let filter_list = match get_store_cost_strategy { - GetStoreCostStrategy::Cheapest => vec![], - GetStoreCostStrategy::SelectDifferentPayee => { - // Check if we have already made payment for the provided xorname. If so filter out those payee - let filter_list = wallet_api - .get_all_payments(&xorname)? - .into_iter() - .map(|details| { - PeerId::from_bytes(&details.peer_id_bytes).map_err(|_| { - ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)) - }) - }) - .collect::>>()?; - - // if we have already made initial + max_repayments, then we should error out. - if Self::have_we_reached_max_repayments( - filter_list.len(), - max_repayments_for_failed_data, - ) { - // error is used by the caller. - return Err(ClientError::MaximumRepaymentsReached(xorname)); - } - - debug!("Filtering out payments from {filter_list:?} during get_store_cost for {xorname:?}"); - filter_list - } - }; - let quote = client - .network - .get_store_costs_from_network(address, filter_list) - .await?; - Ok(quote) - } - - async fn upload_item( - client: Client, - wallet_api: WalletApi, - upload_item: UploadItem, - verify_store: bool, - retry_strategy: RetryStrategy, - ) -> Result<()> { - let xorname = upload_item.xorname(); - - let payment_details = wallet_api.get_recent_payment(&xorname)?; - let payment = payment_details.to_payment(); - let payee = PeerId::from_bytes(&payment_details.peer_id_bytes) - .map_err(|_| ClientError::Wallet(WalletError::NoPaymentForAddress(xorname)))?; - - debug!("Payments for upload item: {xorname:?} to {payee:?}: {payment:?}"); - - match upload_item { - UploadItem::Chunk { address: _, chunk } => { - let chunk = match chunk { - Either::Left(chunk) => chunk, - Either::Right(path) => { - let bytes = std::fs::read(path)?; - Chunk::new(Bytes::from(bytes)) - } - }; - - trace!("Client upload started for chunk: {xorname:?}"); - client - .store_chunk(chunk, payee, payment, verify_store, Some(retry_strategy)) - .await?; - trace!("Client upload completed for chunk: {xorname:?}"); - } - UploadItem::Register { address: _, reg } => { - reg.publish_register(Some((payment, payee)), verify_store) - .await?; - trace!("Client upload completed for register: {xorname:?}"); - } - } - // remove the payment if the upload is successful. - wallet_api.remove_payment_transaction(&xorname); - - Ok(()) - } - - // ====== Misc ====== - - fn emit_upload_event(&mut self, event: UploadEvent) { - if let Some(sender) = self.event_sender.as_ref() { - let sender_clone = sender.clone(); - let _handle = tokio::spawn(async move { - if let Err(err) = sender_clone.send(event).await { - error!("Error emitting upload event: {err:?}"); - } - }); - } else if !self.logged_event_sender_absence { - info!("FilesUpload upload event sender is not set. Use get_upload_events() if you need to keep track of the progress"); - self.logged_event_sender_absence = true; - } - } - - /// If we have already made initial + max_repayments_allowed, then we should error out. - // separate function as it is used in test. - pub(super) fn have_we_reached_max_repayments( - payments_made: usize, - max_repayments_allowed: usize, - ) -> bool { - // if max_repayments_allowed = 1, then we have reached capacity = true if 2 payments have been made. i.e., - // i.e., 1 initial + 1 repayment. - payments_made > max_repayments_allowed - } - - /// Create a new WalletClient for a given root directory. - fn load_wallet_client(client: Client, root_dir: &Path) -> Result { - let wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - Ok(WalletClient::new(client, wallet)) - } - - // Used to debug a list of xornames. - fn hash_of_xornames<'a>(xornames: impl Iterator) -> String { - let mut output = [0; 32]; - let mut hasher = Sha3::v256(); - for xorname in xornames { - hasher.update(xorname); - } - hasher.finalize(&mut output); - - hex::encode(output) - } -} diff --git a/sn_client/src/wallet.rs b/sn_client/src/wallet.rs deleted file mode 100644 index 9a32382142..0000000000 --- a/sn_client/src/wallet.rs +++ /dev/null @@ -1,1175 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::Error; - -use super::{error::Result, Client}; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::{future::join_all, TryFutureExt}; -use libp2p::PeerId; -use sn_networking::target_arch::Instant; -use sn_networking::{GetRecordError, PayeeQuote}; -use sn_protocol::NetworkAddress; -use sn_transfers::{ - CashNote, HotWallet, MainPubkey, NanoTokens, Payment, PaymentQuote, SignedSpend, SpendAddress, - Transfer, WalletError, WalletResult, -}; -use std::{ - collections::{BTreeMap, BTreeSet}, - iter::Iterator, -}; -use tokio::{ - task::JoinSet, - time::{sleep, Duration}, -}; -use xor_name::XorName; - -const MAX_RESEND_PENDING_TX_ATTEMPTS: usize = 10; - -/// A wallet client can be used to send and receive tokens to and from other wallets. -pub struct WalletClient { - client: Client, - wallet: HotWallet, -} - -/// The result of the payment made for a set of Content Addresses -pub struct StoragePaymentResult { - pub storage_cost: NanoTokens, - pub royalty_fees: NanoTokens, - pub skipped_chunks: Vec, -} - -impl WalletClient { - /// Create a new wallet client. - /// - /// # Arguments - /// * `client` - A instance of the struct [`sn_client::Client`](Client) - /// * `wallet` - An instance of the struct [`HotWallet`] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// # Ok(()) - /// # } - /// ``` - pub fn new(client: Client, wallet: HotWallet) -> Self { - Self { client, wallet } - } - - /// Stores the wallet to the local wallet directory. - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// wallet_client.store_local_wallet()?; - /// # Ok(()) - /// # } - pub fn store_local_wallet(&mut self) -> WalletResult<()> { - self.wallet.deposit_and_store_to_disk(&vec![]) - } - - /// Display the wallet balance - /// # Example - /// ```no_run - /// // Display the wallet balance in the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// println!("{}" ,wallet_client.balance()); - /// # Ok(()) - /// # } - pub fn balance(&self) -> NanoTokens { - self.wallet.balance() - } - - /// See if any unconfirmed transactions exist. - /// # Example - /// ```no_run - /// // Print unconfirmed spends to the terminal - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// if wallet_client.unconfirmed_spend_requests_exist() {println!("Unconfirmed spends exist!")}; - /// # Ok(()) - /// # } - pub fn unconfirmed_spend_requests_exist(&self) -> bool { - self.wallet.unconfirmed_spend_requests_exist() - } - - /// Returns the most recent cached Payment for a provided NetworkAddress. This function does not check if the - /// quote has expired or not. Use get_non_expired_payment_for_addr if you want to get a non expired one. - /// - /// If multiple payments have been made to the same address, then we pick the last one as it is the most recent. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.get_recent_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_recent_payment_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult<(Payment, PeerId)> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_detail = self.wallet.api().get_recent_payment(&xorname)?; - - let payment = payment_detail.to_payment(); - trace!("Payment retrieved for {xorname:?} from wallet: {payment:?}"); - let peer_id = PeerId::from_bytes(&payment_detail.peer_id_bytes) - .map_err(|_| WalletError::NoPaymentForAddress(xorname))?; - - Ok((payment, peer_id)) - } - - /// Returns the all cached Payment for a provided NetworkAddress. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Getting the payment for an address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payments = wallet_client.get_all_payments_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn get_all_payments_for_addr( - &self, - address: &NetworkAddress, - ) -> WalletResult> { - let xorname = address - .as_xorname() - .ok_or(WalletError::InvalidAddressType)?; - let payment_details = self.wallet.api().get_all_payments(&xorname)?; - - let payments = payment_details - .into_iter() - .map(|details| { - let payment = details.to_payment(); - - match PeerId::from_bytes(&details.peer_id_bytes) { - Ok(peer_id) => Ok((payment, peer_id)), - Err(_) => Err(WalletError::NoPaymentForAddress(xorname)), - } - }) - .collect::>>()?; - - trace!( - "{} Payment retrieved for {xorname:?} from wallet: {payments:?}", - payments.len() - ); - - Ok(payments) - } - - /// Remove the payment for a given network address from disk. - /// - /// # Arguments - /// * `address` - The [`NetworkAddress`]. - /// - /// # Example - /// ```no_run - /// // Removing a payment address using a random PeerId - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use libp2p_identity::PeerId; - /// use sn_protocol::NetworkAddress; - /// - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let payment = wallet_client.remove_payment_for_addr(&network_address)?; - /// # Ok(()) - /// # } - /// ``` - pub fn remove_payment_for_addr(&self, address: &NetworkAddress) -> WalletResult<()> { - match &address.as_xorname() { - Some(xorname) => { - self.wallet.api().remove_payment_transaction(xorname); - Ok(()) - } - None => Err(WalletError::InvalidAddressType), - } - } - - /// Send tokens to another wallet. Can also verify the store has been successful. - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Arguments - /// * `amount` - [`NanoTokens`]. - /// * `to` - [`MainPubkey`]. - /// * `verify_store` - A boolean to verify store. Set this to true for mandatory verification. - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::io::Bytes; - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// use sn_transfers::NanoTokens; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let nano = NanoTokens::from(10); - /// let main_pub_key = MainSecretKey::random().main_pubkey(); - /// let payment = wallet_client.send_cash_note(nano,main_pub_key, true); - /// # Ok(()) - /// # } - /// ``` - pub async fn send_cash_note( - &mut self, - amount: NanoTokens, - to: MainPubkey, - verify_store: bool, - ) -> WalletResult { - let created_cash_notes = self.wallet.local_send(vec![(amount, to)], None)?; - - // send to network - if let Err(error) = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - { - return Err(WalletError::CouldNotSendMoney(format!( - "The transfer was not successfully registered in the network: {error:?}" - ))); - } else { - // clear unconfirmed txs - self.wallet.clear_confirmed_spend_requests(); - } - - // return the first CashNote (assuming there is only one because we only sent to one recipient) - match &created_cash_notes[..] { - [cashnote] => Ok(cashnote.clone()), - [_multiple, ..] => Err(WalletError::CouldNotSendMoney( - "Multiple CashNotes were returned from the transaction when only one was expected. This is a BUG." - .into(), - )), - [] => Err(WalletError::CouldNotSendMoney( - "No CashNotes were returned from the wallet.".into(), - )), - } - } - - /// Get storecost from the network - /// Returns the MainPubkey of the node to pay and the price in NanoTokens - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use libp2p_identity::PeerId; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// # let mut rng = rand::thread_rng(); - /// # let xor_name = XorName::random(&mut rng); - /// let network_address = NetworkAddress::from_peer(PeerId::random()); - /// let mut wallet_client = WalletClient::new(client, wallet); - /// // Use get_store_cost_at_address(network_address) to get a storecost from the network. - /// let cost = wallet_client.get_store_cost_at_address(network_address).await?.2.cost.as_nano(); - /// # Ok(()) - /// # } - pub async fn get_store_cost_at_address( - &self, - address: NetworkAddress, - ) -> WalletResult { - self.client - .network - .get_store_costs_from_network(address, vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())) - } - - /// Send tokens to nodes closest to the data we want to make storage payment for. Runs mandatory verification. - /// - /// # Arguments - /// - content_addrs - [Iterator] - /// - /// # Returns: - /// * [WalletResult]<[StoragePaymentResult]> - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use xor_name::XorName; - /// use sn_protocol::NetworkAddress; - /// use sn_registers::{Permissions, RegisterAddress}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client.clone(), wallet); - /// let mut rng = rand::thread_rng(); - /// let xor_name = XorName::random(&mut rng); - /// let address = RegisterAddress::new(xor_name, client.signer_pk()); - /// let net_addr = NetworkAddress::from_register_address(address); - /// - /// // Paying for a random Register Address - /// let cost = wallet_client.pay_for_storage(std::iter::once(net_addr)).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_storage( - &mut self, - content_addrs: impl Iterator, - ) -> WalletResult { - let verify_store = true; - let c: Vec<_> = content_addrs.collect(); - // Using default ExponentialBackoff doesn't make sense, - // as it will just fail after the first payment failure. - let mut backoff = ExponentialBackoff::default(); - let mut last_err = "No retries".to_string(); - - while let Some(delay) = backoff.next_backoff() { - trace!("Paying for storage (w/backoff retries) for: {:?}", c); - match self - .pay_for_storage_once(c.clone().into_iter(), verify_store) - .await - { - Ok(payment_result) => return Ok(payment_result), - Err(WalletError::CouldNotSendMoney(err)) => { - warn!("Attempt to pay for data failed: {err:?}"); - last_err = err; - sleep(delay).await; - } - Err(err) => return Err(err), - } - } - Err(WalletError::CouldNotSendMoney(last_err)) - } - - /// Existing chunks will have the store cost set to Zero. - /// The payment procedure shall be skipped, and the chunk upload as well. - /// Hence the list of existing chunks will be returned. - async fn pay_for_storage_once( - &mut self, - content_addrs: impl Iterator, - verify_store: bool, - ) -> WalletResult { - // get store cost from network in parallel - let mut tasks = JoinSet::new(); - for content_addr in content_addrs { - let client = self.client.clone(); - tasks.spawn(async move { - let cost = client - .network - .get_store_costs_from_network(content_addr.clone(), vec![]) - .await - .map_err(|error| WalletError::CouldNotSendMoney(error.to_string())); - - debug!("Storecosts retrieved for {content_addr:?} {cost:?}"); - (content_addr, cost) - }); - } - debug!("Pending store cost tasks: {:?}", tasks.len()); - - // collect store costs - let mut cost_map = BTreeMap::default(); - let mut skipped_chunks = vec![]; - while let Some(res) = tasks.join_next().await { - match res { - Ok((content_addr, Ok(cost))) => { - if let Some(xorname) = content_addr.as_xorname() { - if cost.2.cost == NanoTokens::zero() { - skipped_chunks.push(xorname); - debug!("Skipped existing chunk {content_addr:?}"); - } else { - debug!("Storecost inserted into payment map for {content_addr:?}"); - let _ = cost_map.insert(xorname, (cost.1, cost.2, cost.0.to_bytes())); - } - } else { - warn!("Cannot get store cost for a content that is not a data type: {content_addr:?}"); - } - } - Ok((content_addr, Err(err))) => { - warn!("Cannot get store cost for {content_addr:?} with error {err:?}"); - return Err(err); - } - Err(e) => { - return Err(WalletError::CouldNotSendMoney(format!( - "Storecost get task failed: {e:?}" - ))); - } - } - } - info!("Storecosts retrieved for all the provided content addrs"); - - // pay for records - let (storage_cost, royalty_fees) = self.pay_for_records(&cost_map, verify_store).await?; - let res = StoragePaymentResult { - storage_cost, - royalty_fees, - skipped_chunks, - }; - Ok(res) - } - - /// Send tokens to nodes closest to the data that we want to make storage payments for. - /// # Returns: - /// - /// * [WalletResult]<([NanoTokens], [NanoTokens])> - /// - /// This return contains the amount paid for storage. Including the network royalties fee paid. - /// - /// # Params: - /// * cost_map - [BTreeMap]([XorName],([MainPubkey], [PaymentQuote])) - /// * verify_store - This optional check can verify if the store has been successful. - /// - /// Verification will be attempted via GET request through a Spend on the network. - /// - /// # Example - ///```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # use std::collections::BTreeMap; - /// use xor_name::XorName; - /// use sn_transfers::{MainPubkey, Payment, PaymentQuote}; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let mut cost_map:BTreeMap)> = BTreeMap::new(); - /// wallet_client.pay_for_records(&cost_map,true).await?; - /// # Ok(()) - /// # } - pub async fn pay_for_records( - &mut self, - cost_map: &BTreeMap)>, - verify_store: bool, - ) -> WalletResult<(NanoTokens, NanoTokens)> { - // Before wallet progress, there shall be no `unconfirmed_spend_requests` - self.resend_pending_transaction_until_success(verify_store) - .await?; - let start = Instant::now(); - let total_cost = self.wallet.local_send_storage_payment(cost_map)?; - - trace!( - "local_send_storage_payment of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // send to network - trace!("Sending storage payment transfer to the network"); - let start = Instant::now(); - let spend_attempt_result = self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await; - - trace!( - "send_spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - // Here is bit risky that for the whole bunch of spends to the chunks' store_costs and royalty_fee - // they will get re-paid again for ALL, if any one of the payment failed to be put. - let start = Instant::now(); - if let Err(error) = spend_attempt_result { - warn!("The storage payment transfer was not successfully registered in the network: {error:?}. It will be retried later."); - - // if we have a DoubleSpend error, lets remove the CashNote from the wallet - if let WalletError::DoubleSpendAttemptedForCashNotes(spent_cash_notes) = &error { - for cash_note_key in spent_cash_notes { - warn!("Removing double spends CashNote from wallet: {cash_note_key:?}"); - self.wallet.mark_notes_as_spent([cash_note_key]); - self.wallet.clear_specific_spend_request(*cash_note_key); - } - } - - self.wallet.store_unconfirmed_spend_requests()?; - - return Err(WalletError::CouldNotSendMoney(format!( - "The storage payment transfer was not successfully registered in the network: {error:?}" - ))); - } else { - info!("Spend has completed: {:?}", spend_attempt_result); - self.wallet.clear_confirmed_spend_requests(); - } - trace!( - "clear up spends of {} chunks completed in {:?}", - cost_map.len(), - start.elapsed() - ); - - Ok(total_cost) - } - - /// Resend failed transactions. This can optionally verify the store has been successful. - /// This will attempt to GET the cash_note from the network. - async fn resend_pending_transactions(&mut self, verify_store: bool) { - if self - .client - .send_spends( - self.wallet.unconfirmed_spend_requests().iter(), - verify_store, - ) - .await - .is_ok() - { - self.wallet.clear_confirmed_spend_requests(); - } - } - - /// Resend previous confirmed spend. - async fn resend_confirmed_spend(&mut self, spend_addr: &SpendAddress) { - if let Ok(Some(spend)) = self.wallet.get_confirmed_spend(*spend_addr) { - let spend_vec = vec![spend]; - let _ = self.client.send_spends(spend_vec.iter(), true).await; - } else { - warn!("Cann't find confirmed spend of {spend_addr:?}"); - println!("Cann't find confirmed spend of {spend_addr:?}"); - } - } - - /// This is a blocking loop in cas there is pending transaction. - /// It will keeps resending the unconfirmed spend infinitely but explictly. - /// Function will only return on success (all unconfirmed spend uploaded), - /// or user chose to manualy, but safely, terminate the procedure. - pub async fn resend_pending_transaction_blocking_loop(&mut self) -> WalletResult<()> { - if !self.wallet.unconfirmed_spend_requests_exist() { - return Ok(()); - } - // Wallet shall be all clear to progress forward. - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions dected, sending again after 30 seconds..."); - println!("Pre-Unconfirmed transactions exist, sending again after 30 seconds..."); - println!("It's safe to terminate the work, but do remember to retain the unconfirmed_spend file during wallet update."); - println!("Otherwise, you are in risk to make the wallet corrupted."); - // Longer wait as the network will already in heavy duty situation, - // hence try not to give it further burden with short intervaled re-puts. - sleep(Duration::from_secs(30)).await; - - // Before re-sending, take a peek of un-confirmed spends first - // Helping user having a better view of what's happening. - let spends_to_check: BTreeMap> = self - .wallet - .unconfirmed_spend_requests() - .iter() - .map(|s| { - info!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Unconfirmed spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - - let parent_spends: BTreeSet<_> = s - .spend - .ancestors - .iter() - .map(SpendAddress::from_unique_pubkey) - .collect(); - (s.address(), parent_spends) - }) - .collect(); - let unconfirmed_spends_addrs: Vec<_> = spends_to_check.keys().copied().collect(); - - for addr in unconfirmed_spends_addrs { - match self.client.peek_a_spend(addr).await { - Ok(_) => { - info!("Unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!( - "Unconfirmed Spend {addr:?} is find at least one copy in the network !" - ); - } - Err(err) => { - info!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // For those that still not even have one copy in network yet - // Check it's parent's status in network - if let Some(parent_spends) = spends_to_check.get(&addr) { - for parent_addr in parent_spends.iter() { - match self.client.peek_a_spend(*parent_addr).await { - Ok(s) => { - info!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - println!("Parent {parent_addr:?} of unconfirmed Spend {addr:?} is find having at least one copy in the network !"); - info!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - info!("====== descendants : {:?} ", s.spend.descendants); - info!("====== ancestors : {:?} ", s.spend.ancestors); - println!( - "Parent spend {:?} of amount {}", - s.spend.unique_pubkey, - s.spend.amount() - ); - println!("====== descendants : {:?} ", s.spend.descendants); - println!("====== ancestors : {:?} ", s.spend.ancestors); - } - Err(err) => { - warn!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - println!( - "Parent {parent_addr:?} of unconfirmed Spend {addr:?} has no copy in the network yet {err:?} !" - ); - // In theory, it shall be traversed back to re-send all ancestors. - // However, in practical, only track back one generation is enough. - self.resend_confirmed_spend(parent_addr).await; - } - } - } - } - } - } - } - - self.resend_pending_transactions(true).await; - } - info!("Wallet is now all cleared, OK to progress further."); - println!("Wallet is now all cleared, OK to progress further."); - eprintln!("WARNING: Closing the client now could corrupt the wallet !"); - Ok(()) - } - - /// Try resending failed transactions multiple times until it succeeds or until we reach max attempts. - async fn resend_pending_transaction_until_success( - &mut self, - verify_store: bool, - ) -> WalletResult<()> { - let mut did_error = false; - // Wallet shall be all clear to progress forward. - let mut attempts = 0; - while self.wallet.unconfirmed_spend_requests_exist() { - info!("Pre-Unconfirmed transactions exist, sending again after 1 second..."); - sleep(Duration::from_secs(1)).await; - self.resend_pending_transactions(verify_store).await; - - if attempts > MAX_RESEND_PENDING_TX_ATTEMPTS { - // save the error state, but break out of the loop so we can save - did_error = true; - break; - } - - attempts += 1; - } - - if did_error { - error!("Wallet has pre-unconfirmed transactions, can't progress further."); - Err(WalletError::UnconfirmedTxAfterRetries) - } else { - Ok(()) - } - } - - /// Returns the wallet: - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.into_wallet(); - /// // Display the wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn into_wallet(self) -> HotWallet { - self.wallet - } - - /// Returns a mutable wallet instance - /// - /// Return type: [HotWallet] - /// - /// # Example - /// ```no_run - /// # use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// # use bls::SecretKey; - /// # use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// # let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// # let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let mut wallet_client = WalletClient::new(client, wallet); - /// let paying_wallet = wallet_client.mut_wallet(); - /// // Display the mutable wallet balance in the terminal - /// println!("{}",paying_wallet.balance()); - /// # Ok(()) - /// # } - pub fn mut_wallet(&mut self) -> &mut HotWallet { - &mut self.wallet - } -} - -impl Client { - /// Send spend requests to the network. - /// This can optionally verify the spends have been correctly stored before returning - /// - /// # Arguments - /// * spend_requests - [Iterator]<[SignedSpend]> - /// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// // An example of sending storage payment transfers over the network with validation - /// client.send_spends(wallet.unconfirmed_spend_requests().iter(),true).await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn send_spends( - &self, - spend_requests: impl Iterator, - verify_store: bool, - ) -> WalletResult<()> { - let mut tasks = Vec::new(); - - // send spends to the network in parralel - for spend_request in spend_requests { - trace!( - "sending spend request to the network: {:?}: {spend_request:#?}", - spend_request.unique_pubkey() - ); - - let the_task = async move { - let cash_note_key = spend_request.unique_pubkey(); - let result = self - .network_store_spend(spend_request.clone(), verify_store) - .await; - - (cash_note_key, result) - }; - tasks.push(the_task); - } - - // wait for all the tasks to complete and gather the errors - let mut errors = Vec::new(); - let mut double_spent_keys = BTreeSet::new(); - for (spend_key, spend_attempt_result) in join_all(tasks).await { - match spend_attempt_result { - Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordDoesNotMatch(_), - ))) - | Err(Error::Network(sn_networking::NetworkError::GetRecordError( - GetRecordError::SplitRecord { .. }, - ))) => { - warn!( - "Double spend detected while trying to spend: {:?}", - spend_key - ); - double_spent_keys.insert(*spend_key); - } - Err(e) => { - warn!("Spend request errored out when sent to the network {spend_key:?}: {e}"); - errors.push((spend_key, e)); - } - Ok(()) => { - trace!("Spend request was successfully sent to the network: {spend_key:?}"); - } - } - } - - // report errors accordingly - // double spend errors in priority as they should be dealt with by the wallet - if !double_spent_keys.is_empty() { - return Err(WalletError::DoubleSpendAttemptedForCashNotes( - double_spent_keys, - )); - } - if !errors.is_empty() { - let mut err_report = "Failed to send spend requests to the network:".to_string(); - for (spend_key, e) in &errors { - warn!("Failed to send spend request to the network: {spend_key:?}: {e}"); - err_report.push_str(&format!("{spend_key:?}: {e}")); - } - return Err(WalletError::CouldNotSendMoney(err_report)); - } - - Ok(()) - } - - /// Receive a Transfer, verify and redeem CashNotes from the Network. - /// - /// # Arguments - /// * transfer: &[Transfer] - Borrowed value for [Transfer] - /// * wallet: &[HotWallet] - Borrowed value for [HotWallet] - /// - /// # Return Value - /// * [WalletResult]<[Vec]<[CashNote]>> - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("13abc").unwrap(); - /// // An example for using client.receive() for cashNotes - /// let cash_notes = match client.receive(&transfer, &wallet).await { - /// Ok(cash_notes) => cash_notes, - /// Err(err) => { - /// println!("Failed to verify and redeem transfer: {err:?}"); - /// error!("Failed to verify and redeem transfer: {err:?}"); - /// return Err(err.into()); - /// } - /// }; - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn receive( - &self, - transfer: &Transfer, - wallet: &HotWallet, - ) -> WalletResult> { - let cashnotes = self - .network - .verify_and_unpack_transfer(transfer, wallet) - .map_err(|e| WalletError::CouldNotReceiveMoney(format!("{e:?}"))) - .await?; - let valuable_cashnotes = self.filter_out_already_spend_cash_notes(cashnotes).await?; - Ok(valuable_cashnotes) - } - - /// Check that the redeemed CashNotes are not already spent - async fn filter_out_already_spend_cash_notes( - &self, - mut cash_notes: Vec, - ) -> WalletResult> { - trace!("Validating CashNotes are not already spent"); - let mut tasks = JoinSet::new(); - for cn in &cash_notes { - let pk = cn.unique_pubkey(); - let addr = SpendAddress::from_unique_pubkey(&pk); - let self_clone = self.network.clone(); - let _ = tasks.spawn(async move { self_clone.get_spend(addr).await }); - } - while let Some(result) = tasks.join_next().await { - let res = result.map_err(|e| WalletError::FailedToGetSpend(format!("{e}")))?; - match res { - // if we get a RecordNotFound, it means the CashNote is not spent, which is good - Err(sn_networking::NetworkError::GetRecordError( - GetRecordError::RecordNotFound, - )) => (), - // if we get a spend, it means the CashNote is already spent - Ok(s) => { - warn!( - "CashNoteRedemption contains a CashNote that is already spent, skipping it: {:?}", - s.unique_pubkey() - ); - cash_notes.retain(|c| &c.unique_pubkey() != s.unique_pubkey()); - } - // report all other errors - Err(e) => return Err(WalletError::FailedToGetSpend(format!("{e}"))), - } - } - - if cash_notes.is_empty() { - return Err(WalletError::AllRedeemedCashnotesSpent); - } - - Ok(cash_notes) - } - - /// Verify that the spends referred to (in the CashNote) exist on the network. - /// - /// # Arguments - /// * cash_note - [CashNote] - /// - /// # Return value - /// [WalletResult] - /// - /// # Example - /// ```no_run - /// use sn_client::{Client, WalletClient, Error}; - /// # use tempfile::TempDir; - /// use bls::SecretKey; - /// use sn_transfers::{HotWallet, MainSecretKey}; - /// # #[tokio::main] - /// # async fn main() -> Result<(),Error>{ - /// use tracing::error; - /// use sn_transfers::Transfer; - /// let client = Client::new(SecretKey::random(), None, None, None).await?; - /// # let tmp_path = TempDir::new()?.path().to_owned(); - /// let mut wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; - /// let transfer = Transfer::from_hex("").unwrap(); - /// let cash_notes = client.receive(&transfer, &wallet).await?; - /// // Verification: - /// for cash_note in cash_notes { - /// println!("{:?}" , client.verify_cashnote(&cash_note).await.unwrap()); - /// } - /// # Ok(()) - /// - /// # } - /// ``` - pub async fn verify_cashnote(&self, cash_note: &CashNote) -> WalletResult<()> { - let address = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - - // We need to get all the spends in the cash_note from the network, - // and compare them to the spends in the cash_note, to know if the - // transfer is considered valid in the network. - let mut tasks = Vec::new(); - - info!( - "parent spends for cn; {address:?}: {:?}", - &cash_note.parent_spends.len() - ); - - for spend in &cash_note.parent_spends { - let address = SpendAddress::from_unique_pubkey(spend.unique_pubkey()); - info!( - "Getting parent spend for cn {address:?} pubkey {:?} from network at {address:?}", - spend.unique_pubkey() - ); - tasks.push(self.get_spend_from_network(address)); - } - - let mut received_spends = std::collections::BTreeSet::new(); - for result in join_all(tasks).await { - let network_valid_spend = match result { - Ok(spend) => Ok(spend), - Err(error) => match error { - Error::Network(sn_networking::NetworkError::DoubleSpendAttempt(spends)) => { - warn!("BurntSpend found with {spends:?}"); - Err(WalletError::BurntSpend) - } - err => Err(WalletError::CouldNotVerifyTransfer(format!("{err:?}"))), - }, - }?; - - let _ = received_spends.insert(network_valid_spend); - } - - // If all the spends in the cash_note are the same as the ones in the network, - // we have successfully verified that the cash_note is globally recognised and therefor valid. - if received_spends == cash_note.parent_spends { - return Ok(()); - } - - warn!( - "Unexpected parent spends found in CashNote verification at {:?}: {received_spends:?}.", - address - ); - Err(WalletError::UnexpectedParentSpends(address)) - } -} - -/// Use the client to send a CashNote from a local wallet to an address. -/// This marks the spent CashNote as spent in the Network -/// -/// # Arguments -/// * from - [HotWallet] -/// * amount - [NanoTokens] -/// * to - [MainPubkey] -/// * client - [Client] -/// * verify_store - Boolean. Set to true for mandatory verification via a GET request through a Spend on the network. -/// -/// # Example -/// ```no_run -/// use sn_client::{Client, WalletClient, Error}; -/// # use tempfile::TempDir; -/// use bls::SecretKey; -/// use sn_transfers::{HotWallet, MainSecretKey}; -/// # #[tokio::main] -/// # async fn main() -> Result<(),Error>{ -/// use tracing::error; -/// use sn_client::send; -/// use sn_transfers::Transfer; -/// let client = Client::new(SecretKey::random(), None, None, None).await?; -/// # let tmp_path = TempDir::new()?.path().to_owned(); -/// let mut first_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let mut second_wallet = HotWallet::load_from_path(&tmp_path,Some(MainSecretKey::new(SecretKey::random())))?; -/// let tokens = send( -/// first_wallet, // From -/// second_wallet.balance(), // To -/// second_wallet.address(), // Amount -/// &client, // Client -/// true, // Verification -/// ).await?; -/// # Ok(()) -/// # } -/// ``` -pub async fn send( - from: HotWallet, - amount: NanoTokens, - to: MainPubkey, - client: &Client, - verify_store: bool, -) -> Result { - if amount.is_zero() { - return Err(Error::AmountIsZero); - } - - let mut wallet_client = WalletClient::new(client.clone(), from); - - if let Err(err) = wallet_client - .resend_pending_transaction_until_success(verify_store) - .await - { - println!("Wallet has pre-unconfirmed transactions, can't progress further."); - warn!("Wallet has pre-unconfirmed transactions, can't progress further."); - return Err(err.into()); - } - - let new_cash_note = wallet_client - .send_cash_note(amount, to, verify_store) - .await - .map_err(|err| { - error!("Could not send cash note, err: {err:?}"); - err - })?; - - wallet_client - .resend_pending_transaction_until_success(verify_store) - .await?; - - wallet_client - .into_wallet() - .deposit_and_store_to_disk(&vec![new_cash_note.clone()])?; - - Ok(new_cash_note) -} diff --git a/sn_client/tests/folders_api.rs b/sn_client/tests/folders_api.rs deleted file mode 100644 index 8340c3ad32..0000000000 --- a/sn_client/tests/folders_api.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -// All tests require a network running so Clients can be instantiated. - -use bls::SecretKey; -use eyre::Result; -use sn_client::test_utils::{ - get_funded_wallet, get_new_client, pay_for_storage, random_file_chunk, -}; -use sn_client::{FolderEntry, FoldersApi, Metadata}; -use sn_protocol::{storage::ChunkAddress, NetworkAddress}; -use sn_registers::{EntryHash, RegisterAddress}; -use xor_name::XorName; - -#[tokio::test] -async fn test_folder_basics() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let mut rng = rand::thread_rng(); - let owner_sk = SecretKey::random(); - let owner_pk = owner_sk.public_key(); - let address = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let address_subdir = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, Some(address))?; - - let file_chunk = random_file_chunk(); - - let (file_entry_hash, file_meta_xorname, file_metadata) = - folders_api.add_file("file.txt".into(), file_chunk.clone(), None)?; - assert_eq!( - file_metadata, - Metadata { - name: "file.txt".to_string(), - content: FolderEntry::File(file_chunk) - } - ); - - let (subdir_entry_hash, subdir_meta_xorname, subdir_metadata) = - folders_api.add_folder("subdir".into(), address_subdir, None)?; - assert_eq!( - subdir_metadata, - Metadata { - name: "subdir".to_string(), - content: FolderEntry::Folder(address_subdir) - } - ); - - assert_eq!(folders_api.address(), &address); - assert_eq!( - folders_api.as_net_addr(), - NetworkAddress::RegisterAddress(address) - ); - assert_eq!( - folders_api.meta_addrs_to_pay(), - vec![ - NetworkAddress::ChunkAddress(ChunkAddress::new(file_meta_xorname)), - NetworkAddress::ChunkAddress(ChunkAddress::new(subdir_meta_xorname)) - ] - .into_iter() - .collect() - ); - - assert!(folders_api.contains(&file_entry_hash)); - assert!(folders_api.contains(&subdir_entry_hash)); - assert!(!folders_api.contains(&EntryHash::default())); - - assert_eq!( - folders_api.find_by_name("file.txt"), - Some((&file_meta_xorname, &file_metadata)) - ); - assert_eq!( - folders_api.find_by_name("subdir"), - Some((&subdir_meta_xorname, &subdir_metadata)) - ); - assert!(folders_api.find_by_name("inexistent").is_none()); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file_entry_hash, (file_meta_xorname, file_metadata)), - (subdir_entry_hash, (subdir_meta_xorname, subdir_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_remove_replace_entries() -> Result<()> { - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let mut folders_api = FoldersApi::new(client, wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - let file2_chunk = random_file_chunk(); - let file3_chunk = random_file_chunk(); - let file4_chunk = random_file_chunk(); - - let (file1_entry_hash, _, _) = - folders_api.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - folders_api.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - assert_eq!(folders_api.entries().await?.len(), 2); - assert!(folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_some()); - assert!(folders_api.find_by_name("file2.txt").is_some()); - - // let's now test removing file1.txt - folders_api.remove_item(file1_entry_hash)?; - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(folders_api.contains(&file2_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - // now we test replacing file2.txt with file3.txt - let (file3_entry_hash, file3_meta_xorname, file3_metadata) = - folders_api.replace_file(file2_entry_hash, "file3.txt".into(), file3_chunk, None)?; - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.entries().await?, - vec![( - file3_entry_hash, - (file3_meta_xorname, file3_metadata.clone()) - ),] - .into_iter() - .collect() - ); - - // let's add file4.txt, and check that final state is correct - let (file4_entry_hash, file4_meta_xorname, file4_metadata) = - folders_api.add_file("file4.txt".into(), file4_chunk, None)?; - - assert!(!folders_api.contains(&file1_entry_hash)); - assert!(!folders_api.contains(&file2_entry_hash)); - assert!(folders_api.contains(&file3_entry_hash)); - assert!(folders_api.contains(&file4_entry_hash)); - - assert!(folders_api.find_by_name("file1.txt").is_none()); - assert!(folders_api.find_by_name("file2.txt").is_none()); - assert_eq!( - folders_api.find_by_name("file3.txt"), - Some((&file3_meta_xorname, &file3_metadata)) - ); - assert_eq!( - folders_api.find_by_name("file4.txt"), - Some((&file4_meta_xorname, &file4_metadata)) - ); - - assert_eq!( - folders_api.entries().await?, - vec![ - (file3_entry_hash, (file3_meta_xorname, file3_metadata)), - (file4_entry_hash, (file4_meta_xorname, file4_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_retrieve() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_retrieve", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut folder = FoldersApi::new(client.clone(), wallet_dir, None)?; - let mut subfolder = FoldersApi::new(client.clone(), wallet_dir, None)?; - - let file1_chunk = random_file_chunk(); - - let (file1_entry_hash, file1_meta_xorname, file1_metadata) = - folder.add_file("file1.txt".into(), file1_chunk.clone(), None)?; - let (subfolder_entry_hash, subfolder_meta_xorname, subfolder_metadata) = - folder.add_folder("subfolder".into(), *subfolder.address(), None)?; - - let file2_chunk = random_file_chunk(); - let (file2_entry_hash, file2_meta_xorname, file2_metadata) = - subfolder.add_file("file2.txt".into(), file2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder.as_net_addr(), subfolder.as_net_addr()]; - addrs2pay.extend(folder.meta_addrs_to_pay()); - addrs2pay.extend(subfolder.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder.sync(Default::default()).await?; - subfolder.sync(Default::default()).await?; - - let mut retrieved_folder = - FoldersApi::retrieve(client.clone(), wallet_dir, *folder.address()).await?; - let mut retrieved_subfolder = - FoldersApi::retrieve(client, wallet_dir, *subfolder.address()).await?; - - assert_eq!(retrieved_folder.entries().await?.len(), 2); - assert!(retrieved_folder.contains(&file1_entry_hash)); - assert!(retrieved_folder.contains(&subfolder_entry_hash)); - assert_eq!( - retrieved_folder.find_by_name("file1.txt"), - Some((&file1_meta_xorname, &file1_metadata)) - ); - assert_eq!( - retrieved_folder.find_by_name("subfolder"), - Some((&subfolder_meta_xorname, &subfolder_metadata)) - ); - - assert_eq!(retrieved_subfolder.entries().await?.len(), 1); - assert!(retrieved_subfolder.contains(&file2_entry_hash)); - assert_eq!( - retrieved_subfolder.find_by_name("file2.txt"), - Some((&file2_meta_xorname, &file2_metadata)) - ); - - assert_eq!( - retrieved_folder.entries().await?, - vec![ - (file1_entry_hash, (file1_meta_xorname, file1_metadata)), - ( - subfolder_entry_hash, - (subfolder_meta_xorname, subfolder_metadata) - ), - ] - .into_iter() - .collect() - ); - assert_eq!( - retrieved_subfolder.entries().await?, - vec![(file2_entry_hash, (file2_meta_xorname, file2_metadata)),] - .into_iter() - .collect() - ); - - Ok(()) -} - -#[tokio::test] -async fn test_folder_merge_changes() -> Result<()> { - let _log_guards = - sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_merge_changes", false); - - let owner_sk = SecretKey::random(); - let client = get_new_client(owner_sk.clone()).await?; - let tmp_dir = tempfile::tempdir()?; - let wallet_dir = tmp_dir.path(); - let _ = get_funded_wallet(&client, wallet_dir).await?; - - let mut rng = rand::thread_rng(); - let owner_pk = owner_sk.public_key(); - let folder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - let subfolder_addr = RegisterAddress::new(XorName::random(&mut rng), owner_pk); - - let mut folder_a = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_a = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_a1_chunk = random_file_chunk(); - let file_a2_chunk = random_file_chunk(); - - let (file_a1_entry_hash, file_a1_meta_xorname, file_a1_metadata) = - folder_a.add_file("fileA1.txt".into(), file_a1_chunk.clone(), None)?; - let (subfolder_a_entry_hash, subfolder_a_meta_xorname, subfolder_a_metadata) = - folder_a.add_folder("subfolderA".into(), *subfolder_a.address(), None)?; - let (file_a2_entry_hash, file_a2_meta_xorname, file_a2_metadata) = - subfolder_a.add_file("fileA2.txt".into(), file_a2_chunk.clone(), None)?; - - let mut folder_b = FoldersApi::new(client.clone(), wallet_dir, Some(folder_addr))?; - let mut subfolder_b = FoldersApi::new(client.clone(), wallet_dir, Some(subfolder_addr))?; - let file_b1_chunk = random_file_chunk(); - let file_b2_chunk = random_file_chunk(); - - let (file_b1_entry_hash, file_b1_meta_xorname, file_b1_metadata) = - folder_b.add_file("fileB1.txt".into(), file_b1_chunk.clone(), None)?; - let (subfolder_b_entry_hash, subfolder_b_meta_xorname, subfolder_b_metadata) = - folder_b.add_folder("subfolderB".into(), *subfolder_b.address(), None)?; - let (file_b2_entry_hash, file_b2_meta_xorname, file_b2_metadata) = - subfolder_b.add_file("fileB2.txt".into(), file_b2_chunk.clone(), None)?; - - // let's pay for storage - let mut addrs2pay = vec![folder_a.as_net_addr(), subfolder_a.as_net_addr()]; - addrs2pay.extend(folder_a.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_a.meta_addrs_to_pay()); - addrs2pay.extend(folder_b.meta_addrs_to_pay()); - addrs2pay.extend(subfolder_b.meta_addrs_to_pay()); - pay_for_storage(&client, wallet_dir, addrs2pay).await?; - - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - folder_b.sync(Default::default()).await?; - subfolder_b.sync(Default::default()).await?; - folder_a.sync(Default::default()).await?; - subfolder_a.sync(Default::default()).await?; - - let folder_a_entries = folder_a.entries().await?; - let folder_b_entries = folder_b.entries().await?; - let subfolder_a_entries = subfolder_a.entries().await?; - let subfolder_b_entries = subfolder_b.entries().await?; - - assert_eq!(folder_a_entries.len(), 4); - assert_eq!(folder_b_entries.len(), 4); - assert_eq!(subfolder_a_entries.len(), 2); - assert_eq!(subfolder_b_entries.len(), 2); - - assert!(folder_a.contains(&file_a1_entry_hash)); - assert!(folder_a.contains(&file_b1_entry_hash)); - assert!(folder_a.contains(&subfolder_a_entry_hash)); - assert!(folder_a.contains(&subfolder_b_entry_hash)); - assert!(subfolder_a.contains(&file_a2_entry_hash)); - assert!(subfolder_a.contains(&file_b2_entry_hash)); - - assert!(folder_b.contains(&file_a1_entry_hash)); - assert!(folder_b.contains(&file_b1_entry_hash)); - assert!(folder_b.contains(&subfolder_a_entry_hash)); - assert!(folder_b.contains(&subfolder_b_entry_hash)); - assert!(subfolder_b.contains(&file_a2_entry_hash)); - assert!(subfolder_b.contains(&file_b2_entry_hash)); - - assert_eq!( - folder_a.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_a.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!( - folder_b.find_by_name("fileA1.txt"), - Some((&file_a1_meta_xorname, &file_a1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("fileB1.txt"), - Some((&file_b1_meta_xorname, &file_b1_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderA"), - Some((&subfolder_a_meta_xorname, &subfolder_a_metadata)) - ); - assert_eq!( - folder_b.find_by_name("subfolderB"), - Some((&subfolder_b_meta_xorname, &subfolder_b_metadata)) - ); - - assert_eq!(folder_a_entries, folder_b_entries); - assert_eq!( - folder_a_entries, - vec![ - (file_a1_entry_hash, (file_a1_meta_xorname, file_a1_metadata)), - (file_b1_entry_hash, (file_b1_meta_xorname, file_b1_metadata)), - ( - subfolder_a_entry_hash, - (subfolder_a_meta_xorname, subfolder_a_metadata) - ), - ( - subfolder_b_entry_hash, - (subfolder_b_meta_xorname, subfolder_b_metadata) - ), - ] - .into_iter() - .collect() - ); - - assert_eq!( - subfolder_a.find_by_name("fileA2.txt"), - Some((&file_a2_meta_xorname, &file_a2_metadata)) - ); - assert_eq!( - subfolder_a.find_by_name("fileB2.txt"), - Some((&file_b2_meta_xorname, &file_b2_metadata)) - ); - - assert_eq!(subfolder_a_entries, subfolder_b_entries); - assert_eq!( - subfolder_a_entries, - vec![ - (file_a2_entry_hash, (file_a2_meta_xorname, file_a2_metadata)), - (file_b2_entry_hash, (file_b2_meta_xorname, file_b2_metadata)) - ] - .into_iter() - .collect() - ); - - Ok(()) -} diff --git a/sn_faucet/CHANGELOG.md b/sn_faucet/CHANGELOG.md deleted file mode 100644 index e8b9817648..0000000000 --- a/sn_faucet/CHANGELOG.md +++ /dev/null @@ -1,1355 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [0.4.27](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.26...sn_faucet-v0.4.27) - 2024-06-04 - -### Other -- release -- release -- *(release)* sn_client-v0.107.5/sn_networking-v0.16.3/sn_cli-v0.93.4/sn_node-v0.107.4/node-launchpad-v0.3.5/sn-node-manager-v0.9.4/sn_auditor-v0.1.23/sn_peers_acquisition-v0.3.3/sn_faucet-v0.4.25/sn_node_rpc_client-v0.6.22 -- *(release)* sn_auditor-v0.1.22/sn_faucet-v0.4.24/node-launchpad-v0.3.4 - -## [0.4.26](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.25...sn_faucet-v0.4.26) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.25](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.24...sn_faucet-v0.4.25) - 2024-06-04 - -### Other -- updated the following local packages: sn_client, sn_cli - -## [0.4.24](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.23...sn_faucet-v0.4.24) - 2024-06-04 - -### Other -- remove gifting and start initial data uploads - -## [0.4.23](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.22...sn_faucet-v0.4.23) - 2024-06-04 - -### Added -- *(faucet_server)* download and upload gutenberger book part by part - -## [0.4.22](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.21...sn_faucet-v0.4.22) - 2024-06-03 - -### Other -- updated the following local packages: sn_client, sn_transfers, sn_cli - -## [0.4.20](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.19...sn_faucet-v0.4.20) - 2024-06-03 - -### Added -- *(faucet_server)* upload sample files and print head_addresses -- *(faucet_server)* download some iso files during startup - -### Other -- no openssl dep for faucet - -## [0.4.19](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.18...sn_faucet-v0.4.19) - 2024-05-24 - -### Added -- *(faucet)* allow gifting by default -- *(faucet)* increase initial balance -- *(faucet)* make gifting server feat dependent -- *(faucet)* send small amount to faucet, rest to foundation -- *(faucet)* add feat for gifting-from-genesis -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- enable default features during faucet release -- *(release)* sn_auditor-v0.1.16/sn_cli-v0.91.4/sn_faucet-v0.4.18/sn_metrics-v0.1.7/sn_node-v0.106.4/sn_service_management-v0.2.8/node-launchpad-v0.1.5/sn-node-manager-v0.7.7/sn_node_rpc_client-v0.6.17 -- *(release)* sn_auditor-v0.1.15/sn_cli-v0.91.3/sn_faucet-v0.4.17/sn_metrics-v0.1.6/sn_node-v0.106.3/sn_service_management-v0.2.7/node-launchpad-v0.1.2/sn_node_rpc_client-v0.6.16 -- *(release)* sn_client-v0.106.2/sn_networking-v0.15.2/sn_cli-v0.91.2/sn_node-v0.106.2/sn_auditor-v0.1.14/sn_faucet-v0.4.16/sn_node_rpc_client-v0.6.15 -- *(release)* sn_auditor-v0.1.13/sn_client-v0.106.1/sn_networking-v0.15.1/sn_protocol-v0.16.6/sn_cli-v0.91.1/sn_faucet-v0.4.15/sn_node-v0.106.1/node-launchpad-v0.1.1/sn_node_rpc_client-v0.6.14/sn_peers_acquisition-v0.2.12/sn_service_management-v0.2.6 -- *(release)* sn_auditor-v0.1.12/sn_client-v0.106.0/sn_networking-v0.15.0/sn_transfers-v0.18.0/sn_peers_acquisition-v0.2.11/sn_logging-v0.2.26/sn_cli-v0.91.0/sn_faucet-v0.4.14/sn_metrics-v0.1.5/sn_node-v0.106.0/sn_service_management-v0.2.5/test_utils-v0.4.1/node-launchpad-v/sn-node-manager-v0.7.5/sn_node_rpc_client-v0.6.13/token_supplies-v0.1.48/sn_protocol-v0.16.5 -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(release)* sn_auditor-v0.1.7/sn_client-v0.105.3/sn_networking-v0.14.4/sn_protocol-v0.16.3/sn_build_info-v0.1.7/sn_transfers-v0.17.2/sn_peers_acquisition-v0.2.10/sn_cli-v0.90.4/sn_faucet-v0.4.9/sn_metrics-v0.1.4/sn_node-v0.105.6/sn_service_management-v0.2.4/sn-node-manager-v0.7.4/sn_node_rpc_client-v0.6.8/token_supplies-v0.1.47 -- *(deps)* bump dependencies - -## [0.4.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.17...sn_faucet-v0.4.18) - 2024-05-20 - -### Other -- update Cargo.lock dependencies - -## [0.4.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.16...sn_faucet-v0.4.17) - 2024-05-15 - -### Other -- update Cargo.lock dependencies - -## [0.4.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.15...sn_faucet-v0.4.16) - 2024-05-09 - -### Other -- updated the following local packages: sn_client - -## [0.4.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14...sn_faucet-v0.4.15) - 2024-05-08 - -### Other -- update Cargo.lock dependencies - -## [0.4.14-alpha.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.14-alpha.0...sn_faucet-v0.4.14-alpha.1) - 2024-05-07 - -### Added -- faucet donate endpoint to feed the faucet -- *(faucet)* fully limit any concurrency -- *(faucet)* log from sn_client -- report protocol mismatch error - -### Fixed -- *(faucet)* cleanup unused vars -- *(faucet)* rate limit before getting wallet -- *(faucet)* ensure faucet is funded in main fn -- update calls to HotWallet::load -- *(faucet)* fix distribution 'from' wallet loading -- *(client)* move acct_packet mnemonic into client layer - -### Other -- *(versions)* sync versions with latest crates.io vs -- addres review comments -- *(faucet)* log initilization failure and upload faucet log -- *(CI)* upload faucet log during CI -- *(deps)* bump dependencies - -## [0.4.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.4.2...sn_faucet-v0.4.3) - 2024-03-28 - -### Other -- updated the following local packages: sn_client - -## [0.4.2](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.1...sn_faucet-v0.4.2) - 2024-03-28 - -### Fixed -- *(faucet)* bind to wan - -## [0.4.1](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.4.0...sn_faucet-v0.4.1) - 2024-03-28 - -### Fixed -- *(faucet)* add build info) - -## [0.4.0](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.85...sn_faucet-v0.4.0) - 2024-03-27 - -### Added -- *(faucet)* rate limit based upon wallet locks -- *(faucet)* start using warp for simpler server tweaks -- only give out 1snt per req -- make logging simpler to use -- [**breaking**] remove gossip code - -## [0.3.85](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.84...sn_faucet-v0.3.85) - 2024-03-21 - -### Added -- *(log)* set log levels on the fly - -### Other -- *(release)* sn_cli-v0.89.84/sn_node-v0.104.40/sn_networking-v0.13.34/sn_service_management-v0.1.1/sn_client-v0.104.30 - -## [0.3.84](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.83...sn_faucet-v0.3.84) - 2024-03-14 - -### Other -- *(release)* sn_transfers-v0.16.3/sn_cli-v0.89.82 - -## [0.3.83](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.82-alpha.0...sn_faucet-v0.3.83) - 2024-03-08 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.81](https://github.com/joshuef/safe_network/compare/sn_faucet-v0.3.80...sn_faucet-v0.3.81) - 2024-03-06 - -### Added -- provide `faucet add` command -- *(faucet)* claim using signature of safe wallet - -### Other -- *(release)* sn_transfers-v0.16.1 - -## [0.3.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.79...sn_faucet-v0.3.80) - 2024-02-23 - -### Other -- update Cargo.lock dependencies - -## [0.3.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.78...sn_faucet-v0.3.79) - 2024-02-21 - -### Other -- update Cargo.lock dependencies - -## [0.3.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.77...sn_faucet-v0.3.78) - 2024-02-20 - -### Other -- *(release)* sn_protocol-v0.14.6/sn_node-v0.104.33/sn-node-manager-v0.3.9/sn_cli-v0.89.78/sn_client-v0.104.25/sn_networking-v0.13.27/sn_node_rpc_client-v0.4.64 - -## [0.3.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.76...sn_faucet-v0.3.77) - 2024-02-20 - -### Other -- fix distribution test check - -## [0.3.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.75...sn_faucet-v0.3.76) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.74...sn_faucet-v0.3.75) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.26/sn-node-manager-v0.3.6/sn_client-v0.104.23/sn_node-v0.104.31 - -## [0.3.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.73...sn_faucet-v0.3.74) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.72...sn_faucet-v0.3.73) - 2024-02-20 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.71...sn_faucet-v0.3.72) - 2024-02-20 - -### Other -- updated the following local packages: sn_client - -## [0.3.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.70...sn_faucet-v0.3.71) - 2024-02-20 - -### Other -- *(release)* sn_networking-v0.13.23/sn_node-v0.104.26/sn_client-v0.104.18/sn_node_rpc_client-v0.4.57 - -## [0.3.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.69...sn_faucet-v0.3.70) - 2024-02-19 - -### Other -- *(release)* sn_networking-v0.13.21/sn_client-v0.104.16/sn_node-v0.104.24 - -## [0.3.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.68...sn_faucet-v0.3.69) - 2024-02-19 - -### Other -- token_distribution against network - -## [0.3.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.67...sn_faucet-v0.3.68) - 2024-02-15 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.66...sn_faucet-v0.3.67) - 2024-02-15 - -### Other -- *(release)* sn_protocol-v0.14.1/sn-node-manager-v0.3.1/sn_cli-v0.89.68/sn_client-v0.104.13/sn_networking-v0.13.18/sn_node-v0.104.21/sn_node_rpc_client-v0.4.54 - -## [0.3.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.65...sn_faucet-v0.3.66) - 2024-02-15 - -### Other -- token_distribution -- *(release)* sn_protocol-v0.14.0/sn-node-manager-v0.3.0/sn_cli-v0.89.67/sn_client-v0.104.12/sn_networking-v0.13.17/sn_node-v0.104.20/sn_node_rpc_client-v0.4.53 - -## [0.3.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.64...sn_faucet-v0.3.65) - 2024-02-14 - -### Other -- *(release)* sn_protocol-v0.13.0/sn-node-manager-v0.2.0/sn_cli-v0.89.65/sn_client-v0.104.10/sn_networking-v0.13.15/sn_node-v0.104.18/sn_node_rpc_client-v0.4.51 - -## [0.3.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.63...sn_faucet-v0.3.64) - 2024-02-13 - -### Other -- updated the following local packages: sn_client, sn_transfers - -## [0.3.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.62...sn_faucet-v0.3.63) - 2024-02-12 - -### Other -- *(faucet)* improve faucet server response for clippy -- *(release)* sn_networking-v0.13.12/sn_node-v0.104.12/sn-node-manager-v0.1.59/sn_client-v0.104.7/sn_node_rpc_client-v0.4.46 - -## [0.3.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.61...sn_faucet-v0.3.62) - 2024-02-12 - -### Other -- updated the following local packages: sn_client - -## [0.3.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.60...sn_faucet-v0.3.61) - 2024-02-12 - -### Added -- *(faucet)* api endpoint to return distribution - -## [0.3.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.59...sn_faucet-v0.3.60) - 2024-02-12 - -### Other -- update Cargo.lock dependencies - -## [0.3.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.58...sn_faucet-v0.3.59) - 2024-02-09 - -### Other -- *(release)* sn_networking-v0.13.10/sn_client-v0.104.4/sn_node-v0.104.8 - -## [0.3.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.57...sn_faucet-v0.3.58) - 2024-02-09 - -### Other -- update dependencies - -## [0.3.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.56...sn_faucet-v0.3.57) - 2024-02-08 - -### Other -- copyright update to current year - -## [0.3.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.55...sn_faucet-v0.3.56) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.54...sn_faucet-v0.3.55) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.53...sn_faucet-v0.3.54) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.52...sn_faucet-v0.3.53) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.51...sn_faucet-v0.3.52) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.50...sn_faucet-v0.3.51) - 2024-02-08 - -### Other -- update dependencies - -## [0.3.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.49...sn_faucet-v0.3.50) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.48...sn_faucet-v0.3.49) - 2024-02-07 - -### Other -- update dependencies - -## [0.3.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.47...sn_faucet-v0.3.48) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.46...sn_faucet-v0.3.47) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.45...sn_faucet-v0.3.46) - 2024-02-06 - -### Other -- update dependencies - -## [0.3.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.44...sn_faucet-v0.3.45) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.43...sn_faucet-v0.3.44) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.42...sn_faucet-v0.3.43) - 2024-02-05 - -### Added -- *(faucet)* initial distributions in background -- *(faucet)* create distributions for maid addrs - -### Other -- *(ci)* make deps optional if used only inside a feature -- *(faucet)* fix typo/clippy/fmt after rebase - -## [0.3.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.41...sn_faucet-v0.3.42) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.40...sn_faucet-v0.3.41) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.39...sn_faucet-v0.3.40) - 2024-02-05 - -### Other -- update dependencies - -## [0.3.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.38...sn_faucet-v0.3.39) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.37...sn_faucet-v0.3.38) - 2024-02-02 - -### Other -- update dependencies - -## [0.3.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.36...sn_faucet-v0.3.37) - 2024-02-02 - -### Added -- make token distribution an option - -### Fixed -- minreq as optional dep - -## [0.3.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.35...sn_faucet-v0.3.36) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.34...sn_faucet-v0.3.35) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.33...sn_faucet-v0.3.34) - 2024-02-01 - -### Other -- update dependencies - -## [0.3.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.32...sn_faucet-v0.3.33) - 2024-01-31 - -### Other -- remove the `sn_testnet` crate - -## [0.3.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.31...sn_faucet-v0.3.32) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.30...sn_faucet-v0.3.31) - 2024-01-31 - -### Other -- update dependencies - -## [0.3.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.29...sn_faucet-v0.3.30) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.28...sn_faucet-v0.3.29) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.27...sn_faucet-v0.3.28) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.26...sn_faucet-v0.3.27) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.25...sn_faucet-v0.3.26) - 2024-01-30 - -### Other -- update dependencies - -## [0.3.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.24...sn_faucet-v0.3.25) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.23...sn_faucet-v0.3.24) - 2024-01-29 - -### Added -- *(faucet)* add Snapshot type -- *(faucet)* get pubkeys from repo not pastebin -- *(faucet)* custom types for maid values -- *(faucet)* load public keys for distribution -- *(faucet)* snapshot is a hashmap - -## [0.3.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.22...sn_faucet-v0.3.23) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.21...sn_faucet-v0.3.22) - 2024-01-29 - -### Other -- update dependencies - -## [0.3.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.20...sn_faucet-v0.3.21) - 2024-01-26 - -### Other -- update dependencies - -## [0.3.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.19...sn_faucet-v0.3.20) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.18...sn_faucet-v0.3.19) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.17...sn_faucet-v0.3.18) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.16...sn_faucet-v0.3.17) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.15...sn_faucet-v0.3.16) - 2024-01-25 - -### Added -- client webtransport-websys feat - -## [0.3.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.14...sn_faucet-v0.3.15) - 2024-01-25 - -### Other -- update dependencies - -## [0.3.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.13...sn_faucet-v0.3.14) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.12...sn_faucet-v0.3.13) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.11...sn_faucet-v0.3.12) - 2024-01-24 - -### Other -- update dependencies - -## [0.3.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.10...sn_faucet-v0.3.11) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.9...sn_faucet-v0.3.10) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.8...sn_faucet-v0.3.9) - 2024-01-23 - -### Other -- update dependencies - -## [0.3.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.7...sn_faucet-v0.3.8) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.6...sn_faucet-v0.3.7) - 2024-01-22 - -### Other -- update dependencies - -## [0.3.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.5...sn_faucet-v0.3.6) - 2024-01-21 - -### Other -- update dependencies - -## [0.3.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.4...sn_faucet-v0.3.5) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.3...sn_faucet-v0.3.4) - 2024-01-18 - -### Other -- update dependencies - -## [0.3.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.2...sn_faucet-v0.3.3) - 2024-01-18 - -### Added -- set quic as default transport - -## [0.3.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.1...sn_faucet-v0.3.2) - 2024-01-18 - -### Added -- *(faucet)* download snapshot of maid balances - -## [0.3.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.3.0...sn_faucet-v0.3.1) - 2024-01-17 - -### Other -- update dependencies - -## [0.3.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.22...sn_faucet-v0.3.0) - 2024-01-17 - -### Other -- *(client)* [**breaking**] move out client connection progress bar - -## [0.2.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.21...sn_faucet-v0.2.22) - 2024-01-17 - -### Other -- update dependencies - -## [0.2.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.20...sn_faucet-v0.2.21) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.19...sn_faucet-v0.2.20) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.18...sn_faucet-v0.2.19) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.17...sn_faucet-v0.2.18) - 2024-01-16 - -### Other -- update dependencies - -## [0.2.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.16...sn_faucet-v0.2.17) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.15...sn_faucet-v0.2.16) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.14...sn_faucet-v0.2.15) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.13...sn_faucet-v0.2.14) - 2024-01-15 - -### Other -- update dependencies - -## [0.2.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.12...sn_faucet-v0.2.13) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.11...sn_faucet-v0.2.12) - 2024-01-12 - -### Other -- update dependencies - -## [0.2.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.10...sn_faucet-v0.2.11) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.9...sn_faucet-v0.2.10) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.8...sn_faucet-v0.2.9) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.7...sn_faucet-v0.2.8) - 2024-01-11 - -### Other -- update dependencies - -## [0.2.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.6...sn_faucet-v0.2.7) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.5...sn_faucet-v0.2.6) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.4...sn_faucet-v0.2.5) - 2024-01-10 - -### Other -- update dependencies - -## [0.2.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.3...sn_faucet-v0.2.4) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.2...sn_faucet-v0.2.3) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.1...sn_faucet-v0.2.2) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.2.0...sn_faucet-v0.2.1) - 2024-01-09 - -### Other -- update dependencies - -## [0.2.0](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.126...sn_faucet-v0.2.0) - 2024-01-08 - -### Added -- provide `--first` argument for `safenode` - -## [0.1.126](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.125...sn_faucet-v0.1.126) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.125](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.124...sn_faucet-v0.1.125) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.124](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.123...sn_faucet-v0.1.124) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.123](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.122...sn_faucet-v0.1.123) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.122](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.121...sn_faucet-v0.1.122) - 2024-01-08 - -### Other -- update dependencies - -## [0.1.121](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.120...sn_faucet-v0.1.121) - 2024-01-06 - -### Other -- update dependencies - -## [0.1.120](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.119...sn_faucet-v0.1.120) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.119](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.118...sn_faucet-v0.1.119) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.118](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.117...sn_faucet-v0.1.118) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.117](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.116...sn_faucet-v0.1.117) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.116](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.115...sn_faucet-v0.1.116) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.115](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.114...sn_faucet-v0.1.115) - 2024-01-05 - -### Other -- update dependencies - -## [0.1.114](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.113...sn_faucet-v0.1.114) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.113](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.112...sn_faucet-v0.1.113) - 2024-01-04 - -### Other -- update dependencies - -## [0.1.112](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.111...sn_faucet-v0.1.112) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.111](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.110...sn_faucet-v0.1.111) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.110](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.109...sn_faucet-v0.1.110) - 2024-01-03 - -### Other -- update dependencies - -## [0.1.109](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.108...sn_faucet-v0.1.109) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.108](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.107...sn_faucet-v0.1.108) - 2024-01-02 - -### Other -- update dependencies - -## [0.1.107](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.106...sn_faucet-v0.1.107) - 2023-12-29 - -### Added -- restart faucet_server from breaking point - -## [0.1.106](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.105...sn_faucet-v0.1.106) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.105](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.104...sn_faucet-v0.1.105) - 2023-12-29 - -### Other -- update dependencies - -## [0.1.104](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.103...sn_faucet-v0.1.104) - 2023-12-26 - -### Other -- update dependencies - -## [0.1.103](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.102...sn_faucet-v0.1.103) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.102](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.101...sn_faucet-v0.1.102) - 2023-12-22 - -### Other -- update dependencies - -## [0.1.101](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.100...sn_faucet-v0.1.101) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.100](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.99...sn_faucet-v0.1.100) - 2023-12-21 - -### Other -- update dependencies - -## [0.1.99](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.98...sn_faucet-v0.1.99) - 2023-12-20 - -### Other -- update dependencies - -## [0.1.98](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.97...sn_faucet-v0.1.98) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.97](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.96...sn_faucet-v0.1.97) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.96](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.95...sn_faucet-v0.1.96) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.95](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.94...sn_faucet-v0.1.95) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.94](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.93...sn_faucet-v0.1.94) - 2023-12-19 - -### Other -- update dependencies - -## [0.1.93](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.92...sn_faucet-v0.1.93) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.92](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.91...sn_faucet-v0.1.92) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.91](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.90...sn_faucet-v0.1.91) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.90](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.89...sn_faucet-v0.1.90) - 2023-12-18 - -### Other -- update dependencies - -## [0.1.89](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.88...sn_faucet-v0.1.89) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.88](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.87...sn_faucet-v0.1.88) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.87](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.86...sn_faucet-v0.1.87) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.86](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.85...sn_faucet-v0.1.86) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.85](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.84...sn_faucet-v0.1.85) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.84](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.83...sn_faucet-v0.1.84) - 2023-12-14 - -### Other -- update dependencies - -## [0.1.83](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.82...sn_faucet-v0.1.83) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.82](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.81...sn_faucet-v0.1.82) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.81](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.80...sn_faucet-v0.1.81) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.80](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.79...sn_faucet-v0.1.80) - 2023-12-13 - -### Other -- update dependencies - -## [0.1.79](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.78...sn_faucet-v0.1.79) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.78](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.77...sn_faucet-v0.1.78) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.77](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.76...sn_faucet-v0.1.77) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.76](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.75...sn_faucet-v0.1.76) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.75](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.74...sn_faucet-v0.1.75) - 2023-12-12 - -### Other -- update dependencies - -## [0.1.74](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.73...sn_faucet-v0.1.74) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.73](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.72...sn_faucet-v0.1.73) - 2023-12-11 - -### Other -- update dependencies - -## [0.1.72](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.71...sn_faucet-v0.1.72) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.71](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.70...sn_faucet-v0.1.71) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.70](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.69...sn_faucet-v0.1.70) - 2023-12-08 - -### Other -- update dependencies - -## [0.1.69](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.68...sn_faucet-v0.1.69) - 2023-12-07 - -### Other -- update dependencies - -## [0.1.68](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.67...sn_faucet-v0.1.68) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.67](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.66...sn_faucet-v0.1.67) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.66](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.65...sn_faucet-v0.1.66) - 2023-12-06 - -### Other -- update dependencies - -## [0.1.65](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.64...sn_faucet-v0.1.65) - 2023-12-06 - -### Other -- remove needless pass by value -- use inline format args -- add boilerplate for workspace lints - -## [0.1.64](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.63...sn_faucet-v0.1.64) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.63](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.62...sn_faucet-v0.1.63) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.62](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.61...sn_faucet-v0.1.62) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.61](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.60...sn_faucet-v0.1.61) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.60](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.59...sn_faucet-v0.1.60) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.59](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.58...sn_faucet-v0.1.59) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.58](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.57...sn_faucet-v0.1.58) - 2023-12-05 - -### Other -- update dependencies - -## [0.1.57](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.56...sn_faucet-v0.1.57) - 2023-12-04 - -### Added -- *(testnet)* wait till faucet server starts - -### Other -- *(faucet)* print on claim genesis error - -## [0.1.56](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.55...sn_faucet-v0.1.56) - 2023-12-01 - -### Other -- update dependencies - -## [0.1.55](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.54...sn_faucet-v0.1.55) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.54](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.53...sn_faucet-v0.1.54) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.53](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.52...sn_faucet-v0.1.53) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.52](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.51...sn_faucet-v0.1.52) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.51](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.50...sn_faucet-v0.1.51) - 2023-11-29 - -### Other -- update dependencies - -## [0.1.50](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.49...sn_faucet-v0.1.50) - 2023-11-29 - -### Added -- add missing quic features - -## [0.1.49](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.48...sn_faucet-v0.1.49) - 2023-11-29 - -### Added -- verify spends through the cli - -## [0.1.48](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.47...sn_faucet-v0.1.48) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.47](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.46...sn_faucet-v0.1.47) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.46](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.45...sn_faucet-v0.1.46) - 2023-11-28 - -### Other -- update dependencies - -## [0.1.45](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.44...sn_faucet-v0.1.45) - 2023-11-27 - -### Other -- update dependencies - -## [0.1.44](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.43...sn_faucet-v0.1.44) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.43](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.42...sn_faucet-v0.1.43) - 2023-11-24 - -### Other -- update dependencies - -## [0.1.42](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.41...sn_faucet-v0.1.42) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.41](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.40...sn_faucet-v0.1.41) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.40](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.39...sn_faucet-v0.1.40) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.39](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.38...sn_faucet-v0.1.39) - 2023-11-23 - -### Other -- update dependencies - -## [0.1.38](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.37...sn_faucet-v0.1.38) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.37](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.36...sn_faucet-v0.1.37) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.36](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.35...sn_faucet-v0.1.36) - 2023-11-22 - -### Other -- update dependencies - -## [0.1.35](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.34...sn_faucet-v0.1.35) - 2023-11-21 - -### Added -- make joining gossip for clients and rpc nodes optional - -## [0.1.34](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.33...sn_faucet-v0.1.34) - 2023-11-21 - -### Other -- update dependencies - -## [0.1.33](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.32...sn_faucet-v0.1.33) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.32](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.31...sn_faucet-v0.1.32) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.31](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.30...sn_faucet-v0.1.31) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.30](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.29...sn_faucet-v0.1.30) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.29](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.28...sn_faucet-v0.1.29) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.28](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.27...sn_faucet-v0.1.28) - 2023-11-20 - -### Other -- update dependencies - -## [0.1.27](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.26...sn_faucet-v0.1.27) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.26](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.25...sn_faucet-v0.1.26) - 2023-11-17 - -### Other -- update dependencies - -## [0.1.25](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.24...sn_faucet-v0.1.25) - 2023-11-16 - -### Other -- update dependencies - -## [0.1.24](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.23...sn_faucet-v0.1.24) - 2023-11-16 - -### Added -- massive cleaning to prepare for quotes - -## [0.1.23](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.22...sn_faucet-v0.1.23) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.22](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.21...sn_faucet-v0.1.22) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.21](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.20...sn_faucet-v0.1.21) - 2023-11-15 - -### Other -- update dependencies - -## [0.1.20](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.19...sn_faucet-v0.1.20) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.19](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.18...sn_faucet-v0.1.19) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.18](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.17...sn_faucet-v0.1.18) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.17](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.16...sn_faucet-v0.1.17) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.16](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.15...sn_faucet-v0.1.16) - 2023-11-14 - -### Other -- update dependencies - -## [0.1.15](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.14...sn_faucet-v0.1.15) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.14](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.13...sn_faucet-v0.1.14) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.13](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.12...sn_faucet-v0.1.13) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.12](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.11...sn_faucet-v0.1.12) - 2023-11-13 - -### Other -- update dependencies - -## [0.1.11](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.10...sn_faucet-v0.1.11) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.10](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.9...sn_faucet-v0.1.10) - 2023-11-10 - -### Other -- update dependencies - -## [0.1.9](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.8...sn_faucet-v0.1.9) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.8](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.7...sn_faucet-v0.1.8) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.7](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.6...sn_faucet-v0.1.7) - 2023-11-09 - -### Other -- update dependencies - -## [0.1.6](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.5...sn_faucet-v0.1.6) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.5](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.4...sn_faucet-v0.1.5) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.4](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.3...sn_faucet-v0.1.4) - 2023-11-08 - -### Other -- update dependencies - -## [0.1.3](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.2...sn_faucet-v0.1.3) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.2](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.1...sn_faucet-v0.1.2) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.1](https://github.com/maidsafe/safe_network/compare/sn_faucet-v0.1.0...sn_faucet-v0.1.1) - 2023-11-07 - -### Other -- update dependencies - -## [0.1.0](https://github.com/maidsafe/safe_network/releases/tag/sn_faucet-v0.1.0) - 2023-11-07 - -### Fixed -- CI errors - -### Other -- move sn_faucet to its own crate diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml deleted file mode 100644 index 9fed6af601..0000000000 --- a/sn_faucet/Cargo.toml +++ /dev/null @@ -1,58 +0,0 @@ -[package] -authors = ["MaidSafe Developers "] -description = "The Safe Network Faucet" -documentation = "https://docs.rs/sn_node" -edition = "2021" -homepage = "https://maidsafe.net" -license = "GPL-3.0" -name = "sn_faucet" -readme = "README.md" -repository = "https://github.com/maidsafe/safe_network" -version = "0.5.3" - -[features] -default = ["gifting"] -distribution = ["base64", "bitcoin", "minreq"] -gifting = [] -initial-data = ["reqwest", "futures"] -nightly = [] - -[[bin]] -path = "src/main.rs" -name = "faucet" - -[dependencies] -warp = "0.3" -assert_fs = "1.0.0" -base64 = { version = "0.22.0", optional = true } -bitcoin = { version = "0.31.0", features = [ - "rand-std", - "base64", -], optional = true } -bls = { package = "blsttc", version = "8.0.1" } -clap = { version = "4.2.1", features = ["derive"] } -color-eyre = "0.6.2" -dirs-next = "~2.0.0" -hex = "0.4.3" -indicatif = { version = "0.17.5", features = ["tokio"] } -minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } -serde = { version = "1.0.193", features = ["derive"] } -serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.15" } -sn_cli = { path = "../sn_cli", version = "0.95.3" } -sn_client = { path = "../sn_client", version = "0.110.4" } -sn_logging = { path = "../sn_logging", version = "0.2.36" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.3" } -sn_protocol = { path = "../sn_protocol", version = "0.17.11" } -sn_transfers = { path = "../sn_transfers", version = "0.19.3" } -tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } -tracing = { version = "~0.1.26" } -url = "2.5.0" -fs2 = "0.4.3" -reqwest = { version = "0.12.4", default-features = false, features = [ - "rustls-tls", -], optional = true } -futures = { version = "0.3.30", optional = true } - -[lints] -workspace = true diff --git a/sn_faucet/README.md b/sn_faucet/README.md deleted file mode 100644 index 041edc921d..0000000000 --- a/sn_faucet/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Safe Network Faucet -This is a command line application that allows you to run a Safe Network Faucet. - -## Usage -Run `cargo run -- ` to start the application. Some of the commands available are: - -- `ClaimGenesis`: Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. -- `Send`: Send a specified amount of tokens to a specified wallet. -- `Server`: Starts an http server that will send tokens to anyone who requests them. - -For more information about each command, run `cargo run -- --help`. diff --git a/sn_faucet/maid_address_claims.csv b/sn_faucet/maid_address_claims.csv deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sn_faucet/src/faucet_server.rs b/sn_faucet/src/faucet_server.rs deleted file mode 100644 index 0147b434e3..0000000000 --- a/sn_faucet/src/faucet_server.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::claim_genesis; -#[cfg(feature = "gifting")] -use crate::send_tokens; -#[cfg(feature = "distribution")] -use crate::token_distribution; -use color_eyre::eyre::Result; -use fs2::FileExt; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, - Client, -}; -use sn_transfers::{ - get_faucet_data_dir, wallet_lockfile_name, NanoTokens, Transfer, WALLET_DIR_NAME, -}; -use std::path::Path; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::Semaphore; -use tracing::{debug, error, info, warn}; -use warp::{ - http::{Response, StatusCode}, - Filter, Reply, -}; - -#[cfg(feature = "initial-data")] -use crate::gutenberger::{download_book, State}; -#[cfg(feature = "initial-data")] -use reqwest::Client as ReqwestClient; -#[cfg(feature = "initial-data")] -use sn_cli::FilesUploader; -#[cfg(feature = "initial-data")] -use sn_client::{UploadCfg, BATCH_SIZE}; -#[cfg(feature = "initial-data")] -use sn_protocol::storage::{ChunkAddress, RetryStrategy}; -#[cfg(feature = "initial-data")] -use std::{fs::File, path::PathBuf}; -#[cfg(feature = "initial-data")] -use tokio::{fs, io::AsyncWriteExt}; - -/// Run the faucet server. -/// -/// This will listen on port 8000 and send a transfer of tokens as response to any GET request. -/// -/// # Example -/// -/// ```bash -/// # run faucet server -/// cargo run --features="local" --bin faucet --release -- server -/// -/// # query faucet server for money for our address `get local wallet address` -/// curl "localhost:8000/`cargo run --features="local" --bin safe --release wallet address | tail -n 1`" > transfer_hex -/// -/// # receive transfer with our wallet -/// cargo run --features="local" --bin safe --release wallet receive --file transfer_hex -/// -/// # balance should be updated -/// ``` -pub async fn run_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None)?; - claim_genesis(client, wallet).await.inspect_err(|_err| { - println!("Faucet Server couldn't start as we failed to claim Genesis"); - eprintln!("Faucet Server couldn't start as we failed to claim Genesis"); - error!("Faucet Server couldn't start as we failed to claim Genesis"); - })?; - - #[cfg(feature = "initial-data")] - { - let _ = upload_initial_data(client, &root_dir).await; - } - - startup_server(client.clone()).await -} - -#[cfg(feature = "initial-data")] -/// Trigger one by one uploading of intitial data packets to the entwork. -async fn upload_initial_data(client: &Client, root_dir: &Path) -> Result<()> { - let temp_dir = std::env::temp_dir(); - let state_file = temp_dir.join("state.json"); - let uploaded_books_file = temp_dir.join("uploaded_books.json"); - let mut state = State::load_from_file(&state_file)?; - - let reqwest_client = ReqwestClient::new(); - - let mut uploaded_books: Vec<(String, String)> = if uploaded_books_file.exists() { - let file = File::open(&uploaded_books_file)?; - serde_json::from_reader(file)? - } else { - vec![] - }; - - println!("Previous upload state restored"); - info!("Previous upload state restored"); - - for book_id in state.max_seen()..u16::MAX as u32 { - if state.has_seen(book_id) { - println!("Already seen book ID: {book_id}"); - info!("Already seen book ID: {book_id}"); - continue; - } - - match download_book(&reqwest_client, book_id).await { - Ok(data) => { - println!("Downloaded book ID: {book_id}"); - info!("Downloaded book ID: {book_id}"); - - let fname = format!("{book_id}.book"); - let fpath = temp_dir.join(fname.clone()); - - match mark_download_progress(book_id, &fpath, data, &mut state, &state_file).await { - Ok(_) => { - println!("Marked download progress book ID: {book_id} completed"); - info!("Marked download progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking download progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - - match upload_downloaded_book(client, root_dir, fpath).await { - Ok(head_addresses) => { - println!("Uploaded book ID: {book_id}"); - info!("Uploaded book ID: {book_id}"); - - // There shall be just one - for head_address in head_addresses { - uploaded_books.push((fname.clone(), head_address.to_hex())); - - match mark_upload_progress(&uploaded_books_file, &uploaded_books) { - Ok(_) => { - println!("Marked upload progress book ID: {book_id} completed"); - info!("Marked upload progress book ID: {book_id} completed"); - } - Err(err) => { - println!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - error!("When marking upload progress book ID: {book_id}, encountered error {err:?}"); - continue; - } - } - } - } - Err(err) => { - println!("Failed to upload book ID: {book_id} with error {err:?}"); - info!("Failed to upload book ID: {book_id} with error {err:?}"); - } - } - - println!("Sleeping for 1 minutes..."); - tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - } - Err(e) => { - eprintln!("Failed to download book ID {book_id}: {e:?}"); - } - } - } - - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn mark_download_progress( - book_id: u32, - fpath: &Path, - data: Vec, - state: &mut State, - state_file: &Path, -) -> Result<()> { - let mut dest = fs::File::create(fpath).await?; - dest.write_all(&data).await?; - - state.mark_seen(book_id); - state.save_to_file(state_file)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -fn mark_upload_progress(fpath: &Path, uploaded_books: &Vec<(String, String)>) -> Result<()> { - let file = File::create(fpath)?; - serde_json::to_writer(file, &uploaded_books)?; - Ok(()) -} - -#[cfg(feature = "initial-data")] -async fn upload_downloaded_book( - client: &Client, - root_dir: &Path, - file_path: PathBuf, -) -> Result> { - let upload_cfg = UploadCfg { - batch_size: BATCH_SIZE, - verify_store: true, - retry_strategy: RetryStrategy::Quick, - ..Default::default() - }; - - let files_uploader = FilesUploader::new(client.clone(), root_dir.to_path_buf()) - .set_make_data_public(true) - .set_upload_cfg(upload_cfg) - .insert_path(&file_path); - - let summary = match files_uploader.start_upload().await { - Ok(summary) => summary, - Err(err) => { - println!("Failed to upload {file_path:?} with error {err:?}"); - return Ok(vec![]); - } - }; - - info!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - println!( - "File {file_path:?} uploaded completed with summary {:?}", - summary.upload_summary - ); - - let mut head_addresses = vec![]; - for (_, file_name, head_address) in summary.completed_files.iter() { - info!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - println!( - "Head address of {file_name:?} is {:?}", - head_address.to_hex() - ); - head_addresses.push(*head_address); - } - - Ok(head_addresses) -} - -pub async fn restart_faucet_server(client: &Client) -> Result<()> { - let root_dir = get_faucet_data_dir(); - println!("Loading the previous wallet at {root_dir:?}"); - debug!("Loading the previous wallet at {root_dir:?}"); - - deposit(&root_dir)?; - - println!("Previous wallet loaded"); - debug!("Previous wallet loaded"); - - startup_server(client.clone()).await -} - -#[cfg(feature = "distribution")] -async fn respond_to_distribution_request( - client: Client, - query: HashMap, - balances: HashMap, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due to locked wallet"); - - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let r = - match token_distribution::handle_distribution_req(&client, query, balances.clone()).await { - Ok(distribution) => Response::new(distribution.to_string()), - Err(err) => { - eprintln!("Failed to get distribution: {err}"); - error!("Failed to get distribution: {err}"); - Response::new(format!("Failed to get distribution: {err}")) - } - }; - - Ok(r) -} - -fn is_wallet_locked() -> bool { - info!("Checking if wallet is locked"); - let root_dir = get_faucet_data_dir(); - - let wallet_dir = root_dir.join(WALLET_DIR_NAME); - let wallet_lockfile_name = wallet_lockfile_name(&wallet_dir); - let file_result = std::fs::OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .open(wallet_lockfile_name) - .and_then(|file| file.try_lock_exclusive()); - info!("After if wallet is locked"); - - if file_result.is_err() { - // Either opening the file or locking it failed, indicating rate limiting should occur - return true; - } - - false -} - -async fn respond_to_donate_request( - client: Client, - transfer_str: String, - semaphore: Arc, -) -> std::result::Result { - let permit = semaphore.try_acquire(); - info!("Got donate request with: {transfer_str}"); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - let faucet_root = get_faucet_data_dir(); - let mut wallet = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - if let Err(err) = fund_faucet_from_genesis_wallet(&client, &mut wallet).await { - eprintln!("Failed to load + fund faucet wallet: {err}"); - error!("Failed to load + fund faucet wallet: {err}"); - let mut response = Response::new(format!("Failed to load faucet wallet: {err}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - }; - - // return key is Transfer is empty - if transfer_str.is_empty() { - let address = wallet.address().to_hex(); - return Ok(Response::new(format!("Faucet wallet address: {address}"))); - } - - // parse transfer - let transfer = match Transfer::from_hex(&transfer_str) { - Ok(t) => t, - Err(err) => { - eprintln!("Failed to parse transfer: {err}"); - error!("Failed to parse transfer {transfer_str}: {err}"); - let mut response = Response::new(format!("Failed to parse transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - return Ok(response); - } - }; - - // receive transfer - let res = client.receive(&transfer, &wallet).await; - match res { - Ok(cashnotes) => { - let old_balance = wallet.balance(); - if let Err(e) = wallet.deposit_and_store_to_disk(&cashnotes) { - eprintln!("Failed to store deposited amount: {e}"); - error!("Failed to store deposited amount: {e}"); - let mut response = Response::new(format!("Failed to store deposited amount: {e}")); - *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; - return Ok(response); - } - let new_balance = wallet.balance(); - - info!("Successfully stored cash_note to wallet dir"); - info!("Old balance: {old_balance}, new balance: {new_balance}"); - - Ok(Response::new("Thank you!".to_string())) - } - Err(err) => { - eprintln!("Failed to verify and redeem transfer: {err}"); - error!("Failed to verify and redeem transfer: {err}"); - let mut response = - Response::new(format!("Failed to verify and redeem transfer: {err}")); - *response.status_mut() = StatusCode::BAD_REQUEST; - Ok(response) - } - } -} - -#[cfg(not(feature = "gifting"))] -#[expect(clippy::unused_async)] -async fn respond_to_gift_request( - _client: Client, - _key: String, - _semaphore: Arc, -) -> std::result::Result { - let mut response = Response::new("Gifting not enabled".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - Ok(response) -} - -#[cfg(feature = "gifting")] -async fn respond_to_gift_request( - client: Client, - key: String, - semaphore: Arc, -) -> std::result::Result { - let faucet_root = get_faucet_data_dir(); - - let from = match load_account_wallet_or_create_with_mnemonic(&faucet_root, None) { - Ok(wallet) => wallet, - Err(_error) => { - let mut response = Response::new("Could not load wallet".to_string()); - *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - }; - - let permit = semaphore.try_acquire(); - - // some rate limiting - if is_wallet_locked() || permit.is_err() { - warn!("Rate limited request due"); - let mut response = Response::new("Rate limited".to_string()); - *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; - - // Either opening the file or locking it failed, indicating rate limiting should occur - return Ok(response); - } - - const GIFT_AMOUNT_SNT: &str = "1"; - match send_tokens(&client, from, GIFT_AMOUNT_SNT, &key).await { - Ok(transfer) => { - println!("Sent tokens to {key}"); - debug!("Sent tokens to {key}"); - Ok(Response::new(transfer.to_string())) - } - Err(err) => { - eprintln!("Failed to send tokens to {key}: {err}"); - error!("Failed to send tokens to {key}: {err}"); - Ok(Response::new(format!("Failed to send tokens: {err}"))) - } - } -} - -async fn startup_server(client: Client) -> Result<()> { - // Create a semaphore with a single permit - let semaphore = Arc::new(Semaphore::new(1)); - - #[expect(unused)] - let mut balances = HashMap::::new(); - #[cfg(feature = "distribution")] - { - balances = token_distribution::load_maid_snapshot()?; - let keys = token_distribution::load_maid_claims()?; - // Each distribution takes about 500ms to create, so for thousands of - // initial distributions this takes many minutes. This is run in the - // background instead of blocking the server from starting. - tokio::spawn(token_distribution::distribute_from_maid_to_tokens( - client.clone(), - balances.clone(), - keys, - )); - } - - let gift_client = client.clone(); - let donation_client = client.clone(); - let donation_addr_client = client.clone(); - let donation_semaphore = Arc::clone(&semaphore); - let donation_addr_semaphore = Arc::clone(&semaphore); - #[cfg(feature = "distribution")] - let semaphore_dist = Arc::clone(&semaphore); - - // GET /distribution/address=address&wallet=wallet&signature=signature - #[cfg(feature = "distribution")] - let distribution_route = warp::get() - .and(warp::path("distribution")) - .and(warp::query::>()) - .map(|query| { - debug!("Received distribution request: {query:?}"); - query - }) - .and_then(move |query| { - let semaphore = Arc::clone(&semaphore_dist); - let client = client.clone(); - respond_to_distribution_request(client, query, balances.clone(), semaphore) - }); - - // GET /key - let gift_route = warp::get() - .and(warp::path!(String)) - .map(|query| { - debug!("Gift distribution request: {query}"); - query - }) - .and_then(move |key| { - let client = gift_client.clone(); - let semaphore = Arc::clone(&semaphore); - - respond_to_gift_request(client, key, semaphore) - }); - - // GET /donate - let donation_addr = warp::get().and(warp::path("donate")).and_then(move || { - debug!("Donation address request"); - let client = donation_addr_client.clone(); - let semaphore = Arc::clone(&donation_addr_semaphore); - - respond_to_donate_request(client, String::new(), semaphore) - }); - - // GET /donate/transfer - let donation_route = warp::get() - .and(warp::path!("donate" / String)) - .map(|query| { - debug!("Donation request: {query}"); - query - }) - .and_then(move |transfer| { - let client = donation_client.clone(); - let semaphore = Arc::clone(&donation_semaphore); - - respond_to_donate_request(client, transfer, semaphore) - }); - - println!("Starting http server listening on port 8000..."); - debug!("Starting http server listening on port 8000..."); - - #[cfg(feature = "distribution")] - warp::serve( - distribution_route - .or(donation_route) - .or(donation_addr) - .or(gift_route), - ) - // warp::serve(gift_route) - .run(([0, 0, 0, 0], 8000)) - .await; - - #[cfg(not(feature = "distribution"))] - warp::serve(donation_route.or(donation_addr).or(gift_route)) - .run(([0, 0, 0, 0], 8000)) - .await; - - debug!("Server closed"); - Ok(()) -} - -fn deposit(root_dir: &Path) -> Result<()> { - let mut wallet = load_account_wallet_or_create_with_mnemonic(root_dir, None)?; - - let previous_balance = wallet.balance(); - - wallet.try_load_cash_notes()?; - - let deposited = NanoTokens::from(wallet.balance().as_nano() - previous_balance.as_nano()); - if deposited.is_zero() { - println!("Nothing deposited."); - } else if let Err(err) = wallet.deposit_and_store_to_disk(&vec![]) { - println!("Failed to store deposited ({deposited}) amount: {err:?}"); - } else { - println!("Deposited {deposited}."); - } - - Ok(()) -} diff --git a/sn_faucet/src/gutenberger.rs b/sn_faucet/src/gutenberger.rs deleted file mode 100644 index 4968c93cc4..0000000000 --- a/sn_faucet/src/gutenberger.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use color_eyre::eyre::Result; -use reqwest::Client; -use serde::{Deserialize, Serialize}; -use std::collections::HashSet; -use std::fs::File; -use std::path::Path; - -#[derive(Serialize, Deserialize)] -pub(crate) struct State { - seen_books: HashSet, -} - -impl State { - pub(crate) fn new() -> Self { - State { - seen_books: HashSet::new(), - } - } - - pub(crate) fn load_from_file(path: &Path) -> Result { - if path.exists() { - let file = File::open(path)?; - let state: State = serde_json::from_reader(file)?; - Ok(state) - } else { - Ok(Self::new()) - } - } - - pub(crate) fn save_to_file(&self, path: &Path) -> Result<()> { - let file = File::create(path)?; - serde_json::to_writer(file, self)?; - Ok(()) - } - - pub(crate) fn mark_seen(&mut self, book_id: u32) { - self.seen_books.insert(book_id); - } - - pub(crate) fn has_seen(&self, book_id: u32) -> bool { - if book_id == 0 && self.seen_books.is_empty() { - return true; - } - self.seen_books.contains(&book_id) - } - - pub(crate) fn max_seen(&self) -> u32 { - if let Some(result) = self.seen_books.iter().max() { - *result - } else { - 0 - } - } -} - -pub(crate) async fn download_book(client: &Client, book_id: u32) -> Result> { - let url = format!("http://www.gutenberg.org/ebooks/{book_id}.txt.utf-8"); - let response = client.get(&url).send().await?.bytes().await?; - Ok(response.to_vec()) -} diff --git a/sn_faucet/src/main.rs b/sn_faucet/src/main.rs deleted file mode 100644 index ad1bf336f9..0000000000 --- a/sn_faucet/src/main.rs +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod faucet_server; -#[cfg(feature = "initial-data")] -pub(crate) mod gutenberger; -#[cfg(feature = "distribution")] -mod token_distribution; - -use clap::{Parser, Subcommand}; -use color_eyre::eyre::{bail, eyre, Result}; -use faucet_server::{restart_faucet_server, run_faucet_server}; -use indicatif::ProgressBar; -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, fund_faucet_from_genesis_wallet, send, - Client, ClientEvent, ClientEventsBroadcaster, ClientEventsReceiver, -}; -use sn_logging::{Level, LogBuilder, LogOutputDest}; -use sn_peers_acquisition::PeersArgs; -use sn_protocol::version::IDENTIFY_PROTOCOL_STR; -use sn_transfers::{get_faucet_data_dir, HotWallet, MainPubkey, NanoTokens, Transfer}; -use std::{path::PathBuf, time::Duration}; -use tokio::{sync::broadcast::error::RecvError, task::JoinHandle}; -use tracing::{error, info}; - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - - if opt.version { - println!( - "{}", - sn_build_info::version_string( - "Autonomi Test Faucet", - env!("CARGO_PKG_VERSION"), - Some(&IDENTIFY_PROTOCOL_STR.to_string()) - ) - ); - return Ok(()); - } - - if opt.crate_version { - println!("Crate version: {}", env!("CARGO_PKG_VERSION")); - return Ok(()); - } - - if opt.protocol_version { - println!("Network version: {}", *IDENTIFY_PROTOCOL_STR); - return Ok(()); - } - - #[cfg(not(feature = "nightly"))] - if opt.package_version { - println!("Package version: {}", sn_build_info::package_version()); - return Ok(()); - } - - let bootstrap_peers = opt.peers.get_peers().await?; - let bootstrap_peers = if bootstrap_peers.is_empty() { - // empty vec is returned if `local` flag is provided - None - } else { - Some(bootstrap_peers) - }; - - let logging_targets = vec![ - // TODO: Reset to nice and clean defaults once we have a better idea of what we want - ("faucet".to_string(), Level::TRACE), - ("sn_client".to_string(), Level::TRACE), - ("sn_faucet".to_string(), Level::TRACE), - ("sn_networking".to_string(), Level::DEBUG), - ("sn_build_info".to_string(), Level::TRACE), - ("sn_logging".to_string(), Level::TRACE), - ("sn_peers_acquisition".to_string(), Level::TRACE), - ("sn_protocol".to_string(), Level::TRACE), - ("sn_registers".to_string(), Level::TRACE), - ("sn_transfers".to_string(), Level::TRACE), - ]; - - let mut log_builder = LogBuilder::new(logging_targets); - log_builder.output_dest(opt.log_output_dest); - let _log_handles = log_builder.initialize()?; - - sn_build_info::log_version_info(env!("CARGO_PKG_VERSION"), &IDENTIFY_PROTOCOL_STR); - - info!("Instantiating a SAFE Test Faucet..."); - - let secret_key = bls::SecretKey::random(); - let broadcaster = ClientEventsBroadcaster::default(); - let (progress_bar, handle) = spawn_connection_progress_bar(broadcaster.subscribe()); - let result = Client::new(secret_key, bootstrap_peers, None, Some(broadcaster)).await; - let client = match result { - Ok(client) => client, - Err(err) => { - // clean up progress bar - progress_bar.finish_with_message("Could not connect to the network"); - error!("Failed to get Client with err {err:?}"); - return Err(err.into()); - } - }; - handle.await?; - - let root_dir = get_faucet_data_dir(); - let mut funded_faucet = match load_account_wallet_or_create_with_mnemonic(&root_dir, None) { - Ok(wallet) => wallet, - Err(err) => { - println!("failed to load wallet for faucet! with error {err:?}"); - error!("failed to load wallet for faucet! with error {err:?}"); - return Err(err.into()); - } - }; - - fund_faucet_from_genesis_wallet(&client, &mut funded_faucet).await?; - - if let Err(err) = faucet_cmds(opt.cmd.clone(), &client, funded_faucet).await { - error!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - eprintln!("Failed to run faucet cmd {:?} with err {err:?}", opt.cmd); - } - - Ok(()) -} - -/// Helper to subscribe to the client events broadcaster and spin up a progress bar that terminates when the -/// client successfully connects to the network or if it errors out. -fn spawn_connection_progress_bar(mut rx: ClientEventsReceiver) -> (ProgressBar, JoinHandle<()>) { - // Network connection progress bar - let progress_bar = ProgressBar::new_spinner(); - let progress_bar_clone = progress_bar.clone(); - progress_bar.enable_steady_tick(Duration::from_millis(120)); - progress_bar.set_message("Connecting to The SAFE Network..."); - let new_style = progress_bar.style().tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈🔗"); - progress_bar.set_style(new_style); - - progress_bar.set_message("Connecting to The SAFE Network..."); - - let handle = tokio::spawn(async move { - let mut peers_connected = 0; - loop { - match rx.recv().await { - Ok(ClientEvent::ConnectedToNetwork) => { - progress_bar.finish_with_message("Connected to the Network"); - break; - } - Ok(ClientEvent::PeerAdded { - max_peers_to_connect, - }) => { - peers_connected += 1; - progress_bar.set_message(format!( - "{peers_connected}/{max_peers_to_connect} initial peers found.", - )); - } - Err(RecvError::Lagged(_)) => { - // Even if the receiver is lagged, we would still get the ConnectedToNetwork during each new - // connection. Thus it would be okay to skip this error. - } - Err(RecvError::Closed) => { - progress_bar.finish_with_message("Could not connect to the network"); - break; - } - _ => {} - } - } - }); - (progress_bar_clone, handle) -} - -#[derive(Parser)] -#[command(disable_version_flag = true)] -struct Opt { - /// Specify the logging output destination. - /// - /// Valid values are "stdout", "data-dir", or a custom path. - /// - /// `data-dir` is the default value. - /// - /// The data directory location is platform specific: - /// - Linux: $HOME/.local/share/safe/client/logs - /// - macOS: $HOME/Library/Application Support/safe/client/logs - /// - Windows: C:\Users\\AppData\Roaming\safe\client\logs - #[clap(long, value_parser = parse_log_output, verbatim_doc_comment, default_value = "data-dir")] - pub log_output_dest: LogOutputDest, - - #[command(flatten)] - peers: PeersArgs, - - /// Available sub commands. - #[clap(subcommand)] - pub cmd: Option, - - /// Print the crate version - #[clap(long)] - crate_version: bool, - - /// Print the protocol version - #[clap(long)] - protocol_version: bool, - - /// Print the package version - #[cfg(not(feature = "nightly"))] - #[clap(long)] - package_version: bool, - - /// Print version information. - #[clap(long)] - version: bool, -} - -#[derive(Subcommand, Debug, Clone)] -enum SubCmd { - /// Claim the amount in the genesis CashNote and deposit it to the faucet local wallet. - /// This needs to be run before a testnet is opened to the public, as to not have - /// the genesis claimed by someone else (the key and cash_note are public for audit). - ClaimGenesis, - Send { - /// This shall be the number of nanos to send. - #[clap(name = "amount")] - amount: String, - /// This must be a hex-encoded `MainPubkey`. - #[clap(name = "to")] - to: String, - }, - /// Starts an http server that will send tokens to anyone who requests them. - /// curl http://localhost:8000/your-hex-encoded-wallet-public-address - Server, - /// Restart the faucet_server from the last breaking point. - /// - /// Before firing this cmd, ensure: - /// 1, The previous faucet_server has been stopped. - /// 2, Invalid cash_notes have been removed from the cash_notes folder. - /// 3, The old `wallet` and `wallet.lock` files shall also be removed. - /// The command will create a new wallet with the same key, - /// then deposit all valid cash_notes into wallet and startup the faucet_server. - RestartServer, -} - -async fn faucet_cmds( - cmds: Option, - client: &Client, - funded_wallet: HotWallet, -) -> Result<()> { - if let Some(cmds) = cmds { - match cmds { - SubCmd::ClaimGenesis => { - claim_genesis(client, funded_wallet).await?; - } - SubCmd::Send { amount, to } => { - send_tokens(client, funded_wallet, &amount, &to).await?; - } - SubCmd::Server => { - run_faucet_server(client).await?; - } - SubCmd::RestartServer => { - restart_faucet_server(client).await?; - } - } - } else { - // Handle the case when no subcommand is provided - println!("No subcommand provided. Use --help for more information."); - } - Ok(()) -} - -async fn claim_genesis(client: &Client, mut wallet: HotWallet) -> Result<()> { - for i in 1..6 { - if let Err(e) = fund_faucet_from_genesis_wallet(client, &mut wallet).await { - println!("Failed to claim genesis: {e}"); - } else { - println!("Genesis claimed!"); - return Ok(()); - } - println!("Trying to claiming genesis... attempt {i}"); - } - bail!("Failed to claim genesis") -} - -/// returns the hex-encoded transfer -async fn send_tokens(client: &Client, from: HotWallet, amount: &str, to: &str) -> Result { - let to = MainPubkey::from_hex(to)?; - use std::str::FromStr; - let amount = NanoTokens::from_str(amount)?; - if amount.as_nano() == 0 { - println!("Invalid format or zero amount passed in. Nothing sent."); - return Err(eyre!( - "Invalid format or zero amount passed in. Nothing sent." - )); - } - - let cash_note = send(from, amount, to, client, true).await?; - let transfer_hex = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?; - println!("{transfer_hex}"); - - Ok(transfer_hex) -} - -fn parse_log_output(val: &str) -> Result { - match val { - "stdout" => Ok(LogOutputDest::Stdout), - "data-dir" => { - let dir = get_faucet_data_dir().join("logs"); - Ok(LogOutputDest::Path(dir)) - } - // The path should be a directory, but we can't use something like `is_dir` to check - // because the path doesn't need to exist. We can create it for the user. - value => Ok(LogOutputDest::Path(PathBuf::from(value))), - } -} diff --git a/sn_faucet/src/token_distribution.rs b/sn_faucet/src/token_distribution.rs deleted file mode 100644 index 76e7b46a9f..0000000000 --- a/sn_faucet/src/token_distribution.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crate::send_tokens; -#[cfg(feature = "distribution")] -use base64::Engine; -use color_eyre::eyre::{eyre, Result}; -use serde::{Deserialize, Serialize}; -use sn_client::acc_packet::load_account_wallet_or_create_with_mnemonic; -use sn_client::Client; -use sn_transfers::{get_faucet_data_dir, MainPubkey, NanoTokens}; -use std::str::FromStr; -use std::{collections::HashMap, path::PathBuf}; -use tracing::info; - -const SNAPSHOT_FILENAME: &str = "snapshot.json"; -const SNAPSHOT_URL: &str = "https://api.omniexplorer.info/ask.aspx?api=getpropertybalances&prop=3"; -const CLAIMS_URL: &str = - "https://github.com/maidsafe/safe_network/raw/main/sn_faucet/maid_address_claims.csv"; -const HTTP_STATUS_OK: i32 = 200; - -type MaidAddress = String; // base58 encoded -type Snapshot = HashMap; - -// Parsed from json in SNAPSHOT_URL -#[derive(Serialize, Deserialize)] -struct MaidBalance { - address: MaidAddress, - balance: String, - reserved: String, -} - -// Maid owners supply info that allows the faucet to distribute their funds. -// They sign a safe wallet address using their maid key to prove ownership of -// the maid. -// The faucet will distribute SNT directly to that safe wallet address. -pub struct MaidClaim { - address: String, // base58 encoded bitcoin address owning omni maid - pubkey: String, // hex encoded bitcoin public key - wallet: String, // hex encoded safe wallet address - signature: String, // base64 encoded bitcoin signature of the wallet hex -} - -impl MaidClaim { - pub fn new(address: MaidAddress, wallet: String, signature: String) -> Result { - let pubkey = match pubkey_from_signature(&wallet, &signature) { - Ok(pk) => pk, - Err(err) => { - return Err(eyre!("Invalid signature: {err}")); - } - }; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - let mc = MaidClaim { - address, - pubkey: pubkey_hex, - wallet, - signature, - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn from_csv_line(line: &str) -> Result { - let cells = line.trim().split(',').collect::>(); - if cells.len() != 4 { - let msg = format!("Invalid claim csv: {line}"); - return Err(eyre!(msg.to_string())); - } - let mc = MaidClaim { - address: cells[0].to_string(), - pubkey: cells[1].to_string(), - wallet: cells[2].to_string(), - signature: cells[3].to_string(), - }; - mc.is_valid()?; - Ok(mc) - } - - pub fn to_csv_line(&self) -> String { - format!( - "{},{},{},{}", - self.address, self.pubkey, self.wallet, self.signature - ) - } - - pub fn is_valid(&self) -> Result<()> { - // check signature is correct - check_signature(&self.address, &self.wallet, &self.signature)?; - // check pk matches address - if !maid_pk_matches_address(&self.address, &self.pubkey) { - return Err(eyre!("Claim public key does not match address")); - } - // check wallet is a valid bls pubkey - if MainPubkey::from_hex(&self.wallet).is_err() { - return Err(eyre!("Invalid bls public key")); - }; - // if all the checks are ok, it's valid - Ok(()) - } - - pub fn save_to_file(&self) -> Result<()> { - // check it's valid before we write it, can't know for sure it was - // already validated - self.is_valid()?; - // if it already exists, overwrite it - let addr_path = get_claims_data_dir_path()?.join(self.address.clone()); - let csv_line = self.to_csv_line(); - std::fs::write(addr_path, csv_line)?; - Ok(()) - } -} - -// This is different to test_faucet_data_dir because it should *not* be -// removed when --clean flag is specified. -fn get_snapshot_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_claims_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("claims"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -fn get_distributions_data_dir_path() -> Result { - let dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe_snapshot") - .join("distributions"); - std::fs::create_dir_all(dir.clone())?; - Ok(dir.to_path_buf()) -} - -pub fn load_maid_snapshot() -> Result { - // If the faucet restarts there will be an existing snapshot which should - // be used to avoid conflicts in the balances between two different - // snapshots. - // Check if a previous snapshot already exists - let root_dir = get_snapshot_data_dir_path()?; - let filename = root_dir.join(SNAPSHOT_FILENAME); - if std::fs::metadata(filename.clone()).is_ok() { - info!("Using existing maid snapshot from {:?}", filename); - maid_snapshot_from_file(filename) - } else { - info!("Fetching snapshot from {}", SNAPSHOT_URL); - maid_snapshot_from_internet(filename) - } -} - -fn maid_snapshot_from_file(snapshot_path: PathBuf) -> Result { - let content = std::fs::read_to_string(snapshot_path)?; - parse_snapshot(content) -} - -fn maid_snapshot_from_internet(snapshot_path: PathBuf) -> Result { - // make the request - let response = minreq::get(SNAPSHOT_URL).send()?; - // check the request is ok - if response.status_code != HTTP_STATUS_OK { - let msg = format!("Snapshot failed with http status {}", response.status_code); - return Err(eyre!(msg)); - } - // write the response to file - let body = response.as_str()?; - info!("Writing snapshot to {:?}", snapshot_path); - std::fs::write(snapshot_path.clone(), body)?; - info!("Saved snapshot to {:?}", snapshot_path); - // parse the json response - parse_snapshot(body.to_string()) -} - -fn parse_snapshot(json_str: String) -> Result { - let balances: Vec = serde_json::from_str(&json_str)?; - let mut balances_map: Snapshot = Snapshot::new(); - // verify the snapshot is ok - // balances must match the ico amount, which is slightly higher than - // 2^32/10 because of the ico process. - // see https://omniexplorer.info/asset/3 - let supply = NanoTokens::from(452_552_412_000_000_000); - let mut total = NanoTokens::zero(); - for b in &balances { - // The reserved amount is the amount currently for sale on omni dex. - // If it's not included the total is lower than expected. - // So the amount of maid an address owns is balance + reserved. - let balance = NanoTokens::from_str(&b.balance)?; - let reserved = NanoTokens::from_str(&b.reserved)?; - let address_balance = match balance.checked_add(reserved) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {balance} + {reserved}"); - return Err(eyre!(msg)); - } - }; - total = match total.checked_add(address_balance) { - Some(b) => b, - None => { - let msg = format!("Nanos overflowed adding maid {total} + {address_balance}"); - return Err(eyre!(msg)); - } - }; - balances_map.insert(b.address.clone(), address_balance); - } - if total != supply { - let msg = format!("Incorrect snapshot total, got {total} want {supply}"); - return Err(eyre!(msg)); - } - // log the total number of balances that were parsed - info!("Parsed {} maid balances from the snapshot", balances.len()); - Ok(balances_map) -} - -fn load_maid_claims_from_local() -> Result> { - let mut claims = HashMap::new(); - // load from existing files - let claims_dir = get_claims_data_dir_path()?; - let file_list = std::fs::read_dir(claims_dir)?; - for file in file_list { - // add to hashmap - let file = file?; - let claim_csv = std::fs::read_to_string(file.path())?; - let claim = MaidClaim::from_csv_line(&claim_csv)?; - claims.insert(claim.address.clone(), claim); - } - Ok(claims) -} - -pub fn load_maid_claims() -> Result> { - info!("Loading claims for distributions"); - let mut claims = match load_maid_claims_from_local() { - Ok(claims) => claims, - Err(err) => { - info!("Failed to load claims from local, {err:?}"); - HashMap::new() - } - }; - info!("{} claims after reading existing files", claims.len()); - - // load from list on internet - info!("Fetching claims from {CLAIMS_URL}"); - let response = minreq::get(CLAIMS_URL).send()?; - // check the request is ok - if response.status_code != 200 { - println!( - "Claims request failed with http status {}", - response.status_code - ); - // The existing data is ok, no need to fail to start the server here - return Ok(claims); - } - // parse the response as csv, each row has format: - // address,pkhex,wallet,signature - let body = response.as_str()?; - let lines: Vec<&str> = body.trim().split('\n').collect(); - info!("{} claims rows from {CLAIMS_URL}", lines.len()); - for line in lines { - let claim = match MaidClaim::from_csv_line(line) { - Ok(c) => c, - Err(_) => { - continue; - } - }; - // validate this claim info all matches correctly - if claim.is_valid().is_err() { - continue; - } - // save this cliam to the file system - if claim.save_to_file().is_err() { - println!("Error saving claim to file"); - continue; - } - // add this claim to the hashmap - claims.insert(claim.address.clone(), claim); - } - info!("{} claims after reading from online list", claims.len()); - Ok(claims) -} - -fn maid_pk_matches_address(address: &str, pk_hex: &str) -> bool { - // parse the address - let addr = match bitcoin::Address::from_str(address) { - Ok(a) => a, - Err(_) => return false, - }; - let btc_addr = match addr.clone().require_network(bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - // parse the public key - let pk = match bitcoin::PublicKey::from_str(pk_hex) { - Ok(p) => p, - Err(_) => return false, - }; - // The public key may be for a p2pkh address (starting with 1) or a p2wpkh - // address (starting with 3) so we need to check both. - let is_p2pkh = btc_addr.is_related_to_pubkey(&pk); - if is_p2pkh { - return true; - } - let p2wpkh_addr = match bitcoin::Address::p2shwpkh(&pk, bitcoin::Network::Bitcoin) { - Ok(a) => a, - Err(_) => return false, - }; - let is_p2wpkh = p2wpkh_addr == addr; - if is_p2wpkh { - return true; - } - false -} - -fn check_signature(address: &MaidAddress, msg: &str, signature: &str) -> Result<()> { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = bitcoin::sign_message::MessageSignature::from_str(signature)?; - // Signatures doesn't work with p2wpkh-p2sh so always use p2pkh addr. - // This was double checked with electrum signature validation. - let mut addr = - bitcoin::Address::from_str(address)?.require_network(bitcoin::Network::Bitcoin)?; - let pubkey = pubkey_from_signature(msg, signature)?; - if address.starts_with('3') { - addr = bitcoin::Address::p2pkh(&pubkey, bitcoin::Network::Bitcoin); - } - // check the signature is correct - if !sig.is_signed_by_address(&secp, &addr, msg_hash)? { - return Err(eyre!("Invalid signature")); - } - // Check the pubkey in the signature matches the address. - // This prevents someone submitting a valid signature from a pubkey that - // doesn't match the address for the snapshot. - let pubkey_hex = hex::encode(pubkey.to_bytes()); - if !maid_pk_matches_address(address, &pubkey_hex) { - return Err(eyre!("Public key does not match address")); - } - Ok(()) -} - -fn pubkey_from_signature(msg: &str, signature: &str) -> Result { - let secp = bitcoin::secp256k1::Secp256k1::new(); // DevSkim: ignore DS440100 - let msg_hash = bitcoin::sign_message::signed_msg_hash(msg); - let sig = match bitcoin::sign_message::MessageSignature::from_base64(signature) { - Ok(s) => s, - Err(err) => { - let msg = format!("Error parsing signature: {err}"); - return Err(eyre!(msg)); - } - }; - let pubkey = sig.recover_pubkey(&secp, msg_hash)?; - Ok(pubkey) -} - -pub async fn distribute_from_maid_to_tokens( - client: Client, - snapshot: Snapshot, - claims: HashMap, -) { - for (addr, amount) in snapshot { - // check if this snapshot address has a pubkey - if !claims.contains_key(&addr) { - continue; - } - let claim = &claims[&addr]; - match create_distribution(&client, claim, &amount).await { - Ok(_) => {} - Err(err) => { - info!( - "Error creating distribution: {0} {err}", - claim.to_csv_line() - ); - } - } - } -} - -pub async fn handle_distribution_req( - client: &Client, - query: HashMap, - balances: Snapshot, -) -> Result { - let address = query - .get("address") - .ok_or(eyre!("Missing address in querystring"))? - .to_string(); - let wallet = query - .get("wallet") - .ok_or(eyre!("Missing wallet in querystring"))? - .to_string(); - let signature = query - .get("signature") - .ok_or(eyre!("Missing signature in querystring"))? - .to_string(); - let amount = balances - .get(&address) - .ok_or(eyre!("Address not in snapshot"))?; - // Bitcoin expects base64 standard encoding but the query string has - // base64 url encoding, so the sig is converted to standard encoding - let sig_bytes = base64::engine::general_purpose::URL_SAFE.decode(signature)?; - let sig = base64::engine::general_purpose::STANDARD.encode(sig_bytes); - let claim = MaidClaim::new(address, wallet, sig)?; - create_distribution(client, &claim, amount).await -} - -async fn create_distribution( - client: &Client, - claim: &MaidClaim, - amount: &NanoTokens, -) -> Result { - // validate the claim - if claim.is_valid().is_err() { - let claim_csv = claim.to_csv_line(); - let msg = format!("Not creating distribution for invalid claim: {claim_csv}"); - info!(msg); - return Err(eyre!(msg)); - } - // save this claim to file - claim.save_to_file()?; - // check if this distribution has already been created - let root = get_distributions_data_dir_path()?; - let dist_path = root.join(&claim.address); - if dist_path.exists() { - let dist_hex = match std::fs::read_to_string(dist_path.clone()) { - Ok(content) => content, - Err(err) => { - let msg = format!( - "Error reading distribution file {}: {}", - dist_path.display(), - err - ); - info!(msg); - return Err(eyre!(msg)); - } - }; - return Ok(dist_hex); - } - info!( - "Distributing {} for {} to {}", - amount, claim.address, claim.wallet - ); - - let faucet_dir = get_faucet_data_dir(); - let faucet_wallet = load_account_wallet_or_create_with_mnemonic(&faucet_dir, None)?; - // create a transfer to the claim wallet - let transfer_hex = - match send_tokens(client, faucet_wallet, &amount.to_string(), &claim.wallet).await { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed send for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - let _ = match hex::decode(transfer_hex.clone()) { - Ok(t) => t, - Err(err) => { - let msg = format!("Failed to decode transfer for {0}: {err}", claim.address); - info!(msg); - return Err(eyre!(msg)); - } - }; - // save the transfer - match std::fs::write(dist_path.clone(), transfer_hex.clone()) { - Ok(_) => {} - Err(err) => { - let msg = format!( - "Failed to write transfer to file {}: {}", - dist_path.display(), - err - ); - info!(msg); - info!("The transfer hex that failed to write to file:"); - info!(transfer_hex); - return Err(eyre!(msg)); - } - }; - Ok(transfer_hex) -} - -#[cfg(all(test, feature = "distribution"))] -mod tests { - use super::*; - - use assert_fs::TempDir; - use bitcoin::{ - hashes::Hash, - secp256k1::{rand, Secp256k1}, - Address, Network, PublicKey, - }; - use sn_logging::LogBuilder; - use sn_transfers::{HotWallet, MainSecretKey, Transfer}; - - // This test is to confirm fetching 'MAID snapshop` and `Maid claims` list from website - // is working properly and giving consistent and expected result. - // - // Note: the current list will grow as testnets collect more claims - #[test] - fn fetching_from_network() -> Result<()> { - let snapshot = load_maid_snapshot()?; - println!("Maid snapshot got {:?} entries", snapshot.len()); - assert!(!snapshot.is_empty()); - - let claims = load_maid_claims()?; - println!("Got {:?} distribution claims", claims.len()); - - Ok(()) - } - - // This test will simulate a token distribution. - #[tokio::test] - async fn token_distribute_to_user() -> Result<()> { - let _log_guards = - LogBuilder::init_single_threaded_tokio_test("token_distribute_to_user test", true); - - let amount = NanoTokens::from(10); - - let secp = Secp256k1::new(); // DevSkim: ignore DS440100 - let (maid_secret_key, maid_public_key) = secp.generate_keypair(&mut rand::thread_rng()); - let maid_address = Address::p2pkh(&PublicKey::new(maid_public_key), Network::Bitcoin); - - let client_token_issuer = Client::quick_start(None).await?; - - // wallet comes from `safe wallet address` - let wallet_sk = bls::SecretKey::random(); - let wallet_pk_hex = wallet_sk.public_key().to_hex(); - // signature comes from bitcoin signing like electrum or trezor - let msg_hash = bitcoin::sign_message::signed_msg_hash(&wallet_pk_hex); - let msg = bitcoin::secp256k1::Message::from_digest(msg_hash.to_byte_array()); // DevSkim: ignore DS440100 - let secp_sig = secp.sign_ecdsa_recoverable(&msg, &maid_secret_key); - let signature = bitcoin::sign_message::MessageSignature { - signature: secp_sig, - compressed: true, - }; - let claim = MaidClaim::new( - maid_address.to_string(), - wallet_pk_hex, - signature.to_string(), - )?; - - let transfer_hex = create_distribution(&client_token_issuer, &claim, &amount).await?; - - let transfer = Transfer::from_hex(&transfer_hex)?; - - assert!(transfer - .cashnote_redemptions(&MainSecretKey::new(wallet_sk.clone())) - .is_ok()); - - let receiver_client = Client::new(bls::SecretKey::random(), None, None, None).await?; - let tmp_path = TempDir::new()?.path().to_owned(); - let receiver_wallet = - HotWallet::load_from_path(&tmp_path, Some(MainSecretKey::new(wallet_sk)))?; - - let mut cash_notes = receiver_client.receive(&transfer, &receiver_wallet).await?; - assert_eq!(cash_notes.len(), 1); - let cash_note = cash_notes.pop().unwrap(); - - assert_eq!(cash_note.value(), amount); - - Ok(()) - } - - #[test] - fn maidclaim_isvalid() -> Result<()> { - // Signatures generated using electrum to ensure interoperability. - - // prvkey for addr 17ig7... is L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - // sig is valid for wallet_a signed by addr_a - const MAID_ADDR_A: &str = "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG"; - const MAID_PUBKEY_A: &str = - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc"; // DevSkim: ignore DS173237 - const WALLET_A: &str = "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a"; // DevSkim: ignore DS173237 - const SIG_A: &str = "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM="; - - // prvkey for addr 1EbjF... is L2gzGZUqifkBG3jwwkyyfos8A67VvFhyrtqKU5cWkfEpySkFbaBR - // sig is valid for wallet_b signed by addr_b - const MAID_PUBKEY_B: &str = - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f"; // DevSkim: ignore DS173237 - const WALLET_B: &str = "915d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_B: &str = "Hy3zUK3YiEidzE+HpdgeoRoH3lkCrOoTh59TvoOiUdfJVKKLAVUuAydgIJkOTVU8JKdvbYPGiQhf7KCiNtLRIVU="; - - // not a valid bls wallet (starting with 0) - // sig is valid for wallet_c signed by addr_a - const WALLET_C: &str = "015d803d302bc1270e20de34413c270bdc4be632880e577719c2bf7d22e2c7b44388feef17fe5ac86b5d561697f2b3bf"; // DevSkim: ignore DS173237 - const SIG_C: &str = "IE8y8KSRKw3hz/rd9dzrJLOu24sAspuJgYr6VVGCga3FQQhzOEFDKZoDdrJORRI4Rvv7vFqRARQVaBKCobYh9sc="; - - // MaidClaim::new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_A.to_string(), - ); - assert!(mc.is_ok()); - - // MaidClaim::new will fail if inputs are incorrect - // because new calls is_valid - let mc = MaidClaim::new( - MAID_ADDR_A.to_string(), - WALLET_A.to_string(), - SIG_B.to_string(), - ); - assert!(mc.is_err()); - - // valid - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_ok()); - - // pk not matching address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_A.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature not matching message - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_A.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // signature matches message but not address - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_B.to_string(), - wallet: WALLET_B.to_string(), - signature: SIG_B.to_string(), - }; - assert!(mc.is_valid().is_err()); - - // wallet is not a valid bls key - let mc = MaidClaim { - address: MAID_ADDR_A.to_string(), - pubkey: MAID_PUBKEY_A.to_string(), - wallet: WALLET_C.to_string(), - signature: SIG_C.to_string(), - }; - assert!(mc.is_valid().is_err()); - - Ok(()) - } - - #[test] - fn pk_matches_addr() -> Result<()> { - // p2pkh compressed - assert!(maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc", // DevSkim: ignore DS173237 - )); - - // p2pkh uncompressed - assert!(maid_pk_matches_address( - "1QK8WWMcDEFUVV2zKU8GSCwwuvAFWEs2QW", - "0483f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc4327efb5ba23543c8a6e63ddc09618e11b5d0d184bb69f964712d0894c005655", // DevSkim: ignore DS173237 - )); - - // p2wpkh-p2sh - assert!(maid_pk_matches_address( - "3GErA71Kz6Tn4QCLqoaDvMxD5cLgqQLykv", - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a", // DevSkim: ignore DS173237 - )); - - // mismatched returns false - assert!(!maid_pk_matches_address( - "17ig7FYbSDaZZqVEjFmrGv7GSXBNLeJPNG", - "031bc89b9279ae36795910c0d173002504f2c22dd45368263a5f30ce68e8696e0f", // DevSkim: ignore DS173237 - )); - - Ok(()) - } - - #[test] - fn pubkey_from_sig() -> Result<()> { - // Valid message and signature produces the corresponding public key. - // Signatures generated using electrum to ensure interoperability - - // p2pkh compressed - // electrum import key - // L4DDUabuAU9AxVepwNkLBDmvrG4TXLJFDHoKPtkJdyDAPM3zHQhu - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "HxaGOcmLu1BrSwzBi+KazC6XHbX/6B1Eyf9CnJrxB/OeKdJP9Jp38s+eqfBZ73wLG1OJW0mURhAmZkCsvBJayPM=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "0383f4c6f1a3624140ba587e4ea5c6264a94d4077c1cf4ca7714bb93c67b3262bc" // DevSkim: ignore DS173237 - ); - - // p2pkh uncompressed - // electrum import key - // 5Jz2acAoqLr57YXzQuoiNS8sQtZQ3TBcVcaKsX5ybp9HtJiUSXq - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Gw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "04952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a0418114ad86aeda109dd924629bbf929e82c6ce5be948e4d21a95575a53e1f73" // DevSkim: ignore DS173237 - ); - - // p2wpkh-p2sh uncompressed - // electrum import key - // p2wpkh-p2sh:L2NhyLEHiNbb9tBnQY5BbbwjWSZzhpZqfJ26Hynxpf5bXL9sUm73 - let pubkey = pubkey_from_signature( - "ac1e81dd3ccb28d4e7d8e551e953279d8af1ede5bbdbbb71aefb78a43206ca7827a3279160da4ee8c7296dfac72f8c8a", // DevSkim: ignore DS173237 - "Hw2YmGq5cbXVOCZKd1Uwku/kn9UWJ8QYGlho+FTXokfeNbQzINKli73rvoi39ssVN825kn5LgSdNu800e3w+eXE=", - )?; - let pubkey_hex = hex::encode(pubkey.to_bytes()); - assert_eq!( - pubkey_hex, - "03952005f63e148735d244dc52253586c6ed89d1692599452e7daaa2a63a88619a" // DevSkim: ignore DS173237 - ); - - Ok(()) - } -}