diff --git a/Cargo.toml b/Cargo.toml index c0a32e11c..cd4ebdb08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -121,3 +121,29 @@ inefficient_to_string = "deny" invalid_upcast_comparisons = "deny" large_stack_arrays = "deny" linkedlist = "deny" +macro_use_imports = "deny" +manual_instant_elapsed = "deny" +manual_let_else = "deny" +manual_ok_or = "deny" +manual_string_new = "deny" +map_unwrap_or = "deny" +match_bool = "deny" +match_same_arms = "deny" +missing_fields_in_debug = "deny" +needless_continue = "deny" +needless_pass_by_value = "deny" +ptr_cast_constness = "deny" +range_minus_one = "deny" +range_plus_one = "deny" +redundant_closure_for_method_calls = "deny" +redundant_else = "deny" +string_add_assign = "deny" +unchecked_duration_subtraction = "deny" +uninlined_format_args = "deny" +unnecessary_box_returns = "deny" +unnecessary_join = "deny" +unnecessary_wraps = "deny" +unnested_or_patterns = "deny" +unused_async = "deny" +unused_self = "deny" +zero_sized_map_values = "deny" diff --git a/coins/bitcoin/src/tests/crypto.rs b/coins/bitcoin/src/tests/crypto.rs index 78a7215b9..2170219c0 100644 --- a/coins/bitcoin/src/tests/crypto.rs +++ b/coins/bitcoin/src/tests/crypto.rs @@ -29,9 +29,9 @@ fn test_algorithm() { Schnorr::::new(RecommendedTranscript::new(b"bitcoin-serai sign test")); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), Hash::hash(MESSAGE).as_ref(), ); diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index 5c32af4e5..9d98f9fad 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -122,7 +122,7 @@ impl SignableTransaction { pub fn new( mut inputs: Vec, payments: &[(Address, u64)], - change: Option
, + change: Option<&Address>, data: Option>, fee_per_weight: u64, ) -> Result { @@ -140,7 +140,7 @@ impl SignableTransaction { } } - if data.as_ref().map(|data| data.len()).unwrap_or(0) > 80 { + if data.as_ref().map_or(0, Vec::len) > 80 { Err(TransactionError::TooMuchData)?; } @@ -212,7 +212,7 @@ impl SignableTransaction { } // If there's a change address, check if there's change to give it - if let Some(change) = change.as_ref() { + if let Some(change) = change { let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change)); let fee_with_change = fee_per_weight * weight_with_change; if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { @@ -263,7 +263,7 @@ impl SignableTransaction { /// Returns None if the wrong keys are used. pub fn multisig( self, - keys: ThresholdKeys, + keys: &ThresholdKeys, mut transcript: RecommendedTranscript, ) -> Option { transcript.domain_separate(b"bitcoin_transaction"); diff --git a/coins/bitcoin/tests/wallet.rs b/coins/bitcoin/tests/wallet.rs index e44759339..9eca20c78 100644 --- a/coins/bitcoin/tests/wallet.rs +++ b/coins/bitcoin/tests/wallet.rs @@ -91,14 +91,14 @@ fn keys() -> (HashMap>, ProjectivePoint) { fn sign( keys: &HashMap>, - tx: SignableTransaction, + tx: &SignableTransaction, ) -> Transaction { let mut machines = HashMap::new(); for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) { machines.insert( i, tx.clone() - .multisig(keys[&i].clone(), RecommendedTranscript::new(b"bitcoin-serai Test Transaction")) + .multisig(&keys[&i].clone(), RecommendedTranscript::new(b"bitcoin-serai Test Transaction")) .unwrap(), ); } @@ -206,7 +206,7 @@ async_sequential! { // No change assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); // Consolidation TX - assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); + assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok()); // Data assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); // No outputs @@ -229,7 +229,7 @@ async_sequential! { ); assert_eq!( - SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), + SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0), Err(TransactionError::TooLowFee), ); @@ -274,13 +274,13 @@ async_sequential! { let tx = SignableTransaction::new( vec![output.clone(), offset_output.clone()], &payments, - Some(change_addr.clone()), + Some(&change_addr), None, FEE ).unwrap(); let needed_fee = tx.needed_fee(); let expected_id = tx.txid(); - let tx = sign(&keys, tx); + let tx = sign(&keys, &tx); assert_eq!(tx.output.len(), 3); @@ -341,10 +341,10 @@ async_sequential! { let tx = sign( &keys, - SignableTransaction::new( + &SignableTransaction::new( vec![output], &[], - Some(Address::::new(Network::Regtest, address_payload(key).unwrap())), + Some(&Address::::new(Network::Regtest, address_payload(key).unwrap())), Some(data.clone()), FEE ).unwrap() diff --git a/coins/ethereum/tests/contract.rs b/coins/ethereum/tests/contract.rs index 5577744ab..378758190 100644 --- a/coins/ethereum/tests/contract.rs +++ b/coins/ethereum/tests/contract.rs @@ -112,9 +112,9 @@ async fn test_ecrecover_hack() { let algo = IetfSchnorr::::ietf(); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), full_message, ); let mut processed_sig = diff --git a/coins/ethereum/tests/crypto.rs b/coins/ethereum/tests/crypto.rs index e531e4b56..f1ab08b03 100644 --- a/coins/ethereum/tests/crypto.rs +++ b/coins/ethereum/tests/crypto.rs @@ -47,13 +47,8 @@ fn test_signing() { const MESSAGE: &[u8] = b"Hello, World!"; let algo = IetfSchnorr::::ietf(); - let _sig = sign( - &mut OsRng, - algo, - keys.clone(), - algorithm_machines(&mut OsRng, IetfSchnorr::::ietf(), &keys), - MESSAGE, - ); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); } #[test] @@ -79,9 +74,9 @@ fn test_ecrecover_hack() { let algo = IetfSchnorr::::ietf(); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), full_message, ); diff --git a/coins/monero/build.rs b/coins/monero/build.rs index db15c1cfd..b10e956a5 100644 --- a/coins/monero/build.rs +++ b/coins/monero/build.rs @@ -28,10 +28,10 @@ fn serialize(generators_string: &mut String, points: &[EdwardsPoint]) { fn generators(prefix: &'static str, path: &str) { let generators = bulletproofs_generators(prefix.as_bytes()); #[allow(non_snake_case)] - let mut G_str = "".to_string(); + let mut G_str = String::new(); serialize(&mut G_str, &generators.G); #[allow(non_snake_case)] - let mut H_str = "".to_string(); + let mut H_str = String::new(); serialize(&mut H_str, &generators.H); let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path); diff --git a/coins/monero/src/bin/reserialize_chain.rs b/coins/monero/src/bin/reserialize_chain.rs index 9f55073a9..01f94a482 100644 --- a/coins/monero/src/bin/reserialize_chain.rs +++ b/coins/monero/src/bin/reserialize_chain.rs @@ -239,7 +239,7 @@ mod binaries { assert!(batch.verify_vartime()); } - println!("Deserialized, hashed, and reserialized {block_i} with {} TXs", txs_len); + println!("Deserialized, hashed, and reserialized {block_i} with {txs_len} TXs"); } } diff --git a/coins/monero/src/ringct/bulletproofs/mod.rs b/coins/monero/src/ringct/bulletproofs/mod.rs index 1529680dc..df0c6ff8a 100644 --- a/coins/monero/src/ringct/bulletproofs/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/mod.rs @@ -7,7 +7,7 @@ use std_shims::{ use rand_core::{RngCore, CryptoRng}; -use zeroize::Zeroize; +use zeroize::{Zeroize, Zeroizing}; use curve25519_dalek::edwards::EdwardsPoint; use multiexp::BatchVerifier; @@ -91,7 +91,7 @@ impl Bulletproofs { Bulletproofs::Plus( AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) .unwrap() - .prove(rng, AggregateRangeWitness::new(outputs).unwrap()) + .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap())) .unwrap(), ) }) diff --git a/coins/monero/src/ringct/bulletproofs/original.rs b/coins/monero/src/ringct/bulletproofs/original.rs index 7c1439d31..5e50c02ea 100644 --- a/coins/monero/src/ringct/bulletproofs/original.rs +++ b/coins/monero/src/ringct/bulletproofs/original.rs @@ -223,7 +223,7 @@ impl OriginalStruct { let A = normalize(&self.A); let S = normalize(&self.S); - let commitments = commitments.iter().map(|c| c.mul_by_cofactor()).collect::>(); + let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::>(); // Verify it let mut proof = Vec::with_capacity(4 + commitments.len()); diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index b99e5f52e..859cb1e44 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -2,7 +2,7 @@ use std_shims::vec::Vec; use rand_core::{RngCore, CryptoRng}; -use zeroize::{Zeroize, ZeroizeOnDrop}; +use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use multiexp::{multiexp, multiexp_vartime, BatchVerifier}; use group::{ @@ -142,7 +142,7 @@ impl AggregateRangeStatement { A_terms.push((y_mn_plus_one, commitment_accum)); A_terms.push(( ((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * z.square())), - generators.g(), + Generators::g(), )); (y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms)) @@ -151,7 +151,7 @@ impl AggregateRangeStatement { pub(crate) fn prove( self, rng: &mut R, - witness: AggregateRangeWitness, + witness: &AggregateRangeWitness, ) -> Option { // Check for consistency with the witness if self.V.len() != witness.values.len() { @@ -202,7 +202,7 @@ impl AggregateRangeStatement { for (i, a_r) in a_r.0.iter().enumerate() { A_terms.push((*a_r, generators.generator(GeneratorsList::HBold1, i))); } - A_terms.push((alpha, generators.h())); + A_terms.push((alpha, Generators::h())); let mut A = multiexp(&A_terms); A_terms.zeroize(); @@ -222,7 +222,7 @@ impl AggregateRangeStatement { Some(AggregateRangeProof { A, wip: WipStatement::new(generators, A_hat, y) - .prove(rng, transcript, WipWitness::new(a_l, a_r, alpha).unwrap()) + .prove(rng, transcript, &Zeroizing::new(WipWitness::new(a_l, a_r, alpha).unwrap())) .unwrap(), }) } diff --git a/coins/monero/src/ringct/bulletproofs/plus/mod.rs b/coins/monero/src/ringct/bulletproofs/plus/mod.rs index f52677eec..6a2d7b9c4 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/mod.rs @@ -31,8 +31,6 @@ pub(crate) enum GeneratorsList { // TODO: Table these #[derive(Clone, Debug)] pub(crate) struct Generators { - g: EdwardsPoint, - g_bold1: &'static [EdwardsPoint], h_bold1: &'static [EdwardsPoint], } @@ -47,18 +45,18 @@ impl Generators { #[allow(clippy::new_without_default)] pub(crate) fn new() -> Self { let gens = generators::GENERATORS(); - Generators { g: dalek_ff_group::EdwardsPoint(crate::H()), g_bold1: &gens.G, h_bold1: &gens.H } + Generators { g_bold1: &gens.G, h_bold1: &gens.H } } pub(crate) fn len(&self) -> usize { self.g_bold1.len() } - pub(crate) fn g(&self) -> EdwardsPoint { - self.g + pub(crate) fn g() -> EdwardsPoint { + dalek_ff_group::EdwardsPoint(crate::H()) } - pub(crate) fn h(&self) -> EdwardsPoint { + pub(crate) fn h() -> EdwardsPoint { EdwardsPoint::generator() } @@ -74,11 +72,7 @@ impl Generators { let generators = padded_pow_of_2(generators); assert!(generators <= self.g_bold1.len()); - Generators { - g: self.g, - g_bold1: &self.g_bold1[.. generators], - h_bold1: &self.h_bold1[.. generators], - } + Generators { g_bold1: &self.g_bold1[.. generators], h_bold1: &self.h_bold1[.. generators] } } } diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 8ef1aa90f..1bc1e85da 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -189,7 +189,7 @@ impl WipStatement { self, rng: &mut R, mut transcript: Scalar, - witness: WipWitness, + witness: &WipWitness, ) -> Option { let WipStatement { generators, P, mut y } = self; #[cfg(not(debug_assertions))] @@ -198,7 +198,7 @@ impl WipStatement { if generators.len() != witness.a.len() { return None; } - let (g, h) = (generators.g(), generators.h()); + let (g, h) = (Generators::g(), Generators::h()); let mut g_bold = vec![]; let mut h_bold = vec![]; for i in 0 .. generators.len() { @@ -345,7 +345,7 @@ impl WipStatement { ) -> bool { let WipStatement { generators, P, y } = self; - let (g, h) = (generators.g(), generators.h()); + let (g, h) = (Generators::g(), Generators::h()); // Verify the L/R lengths { diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 0a6141b2d..1290e3e38 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -96,7 +96,7 @@ fn core( msg: &[u8; 32], D: &EdwardsPoint, s: &[Scalar], - A_c1: Mode, + A_c1: &Mode, ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { let n = ring.len(); @@ -164,7 +164,7 @@ fn core( Mode::Verify(c1) => { start = 0; end = n; - c = c1; + c = *c1; } } @@ -226,7 +226,7 @@ impl Clsag { s.push(random_scalar(rng)); } let ((D, p, c), c1) = - core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, Mode::Sign(r, A, AH)); + core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH)); (Clsag { D, s, c1 }, pseudo_out, p, c * z) } @@ -301,7 +301,7 @@ impl Clsag { Err(ClsagError::InvalidD)?; } - let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, Mode::Verify(self.c1)); + let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, &Mode::Verify(self.c1)); if c1 != self.c1 { Err(ClsagError::InvalidC1)?; } diff --git a/coins/monero/src/ringct/mod.rs b/coins/monero/src/ringct/mod.rs index c86f96765..4c5a3a0b0 100644 --- a/coins/monero/src/ringct/mod.rs +++ b/coins/monero/src/ringct/mod.rs @@ -104,13 +104,11 @@ impl RctType { pub fn compact_encrypted_amounts(&self) -> bool { match self { - RctType::Null => false, - RctType::MlsagAggregate => false, - RctType::MlsagIndividual => false, + RctType::Null | + RctType::MlsagAggregate | + RctType::MlsagIndividual | RctType::Bulletproofs => false, - RctType::BulletproofsCompactAmount => true, - RctType::Clsag => true, - RctType::BulletproofsPlus => true, + RctType::BulletproofsCompactAmount | RctType::Clsag | RctType::BulletproofsPlus => true, } } } @@ -151,9 +149,7 @@ impl RctBase { RctType::from_byte(read_byte(r)?).ok_or_else(|| io::Error::other("invalid RCT type"))?; match rct_type { - RctType::Null => {} - RctType::MlsagAggregate => {} - RctType::MlsagIndividual => {} + RctType::Null | RctType::MlsagAggregate | RctType::MlsagIndividual => {} RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::Clsag | @@ -325,7 +321,7 @@ impl RctPrunable { RctPrunable::MlsagBorromean { borromean, .. } => { borromean.iter().try_for_each(|rs| rs.write(w)) } - RctPrunable::MlsagBulletproofs { bulletproofs, .. } => bulletproofs.signature_write(w), + RctPrunable::MlsagBulletproofs { bulletproofs, .. } | RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w), } } @@ -386,8 +382,8 @@ impl RctSignatures { serialized } - pub fn read(decoys: Vec, outputs: usize, r: &mut R) -> io::Result { + pub fn read(decoys: &[usize], outputs: usize, r: &mut R) -> io::Result { let base = RctBase::read(decoys.len(), outputs, r)?; - Ok(RctSignatures { base: base.0, prunable: RctPrunable::read(base.1, &decoys, outputs, r)? }) + Ok(RctSignatures { base: base.0, prunable: RctPrunable::read(base.1, decoys, outputs, r)? }) } } diff --git a/coins/monero/src/rpc/http.rs b/coins/monero/src/rpc/http.rs index e588a2625..4ed349a5c 100644 --- a/coins/monero/src/rpc/http.rs +++ b/coins/monero/src/rpc/http.rs @@ -97,7 +97,7 @@ impl HttpRpc { Err(RpcError::ConnectionError("invalid amount of passwords".to_string()))?; } - let client = Client::without_connection_pool(url.clone()) + let client = Client::without_connection_pool(&url) .map_err(|_| RpcError::ConnectionError("invalid URL".to_string()))?; // Obtain the initial challenge, which also somewhat validates this connection let challenge = Self::digest_auth_challenge( diff --git a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs index 34aa84784..a50b9d407 100644 --- a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs @@ -23,7 +23,7 @@ fn test_aggregate_range_proof() { let statement = AggregateRangeStatement::new(commitment_points).unwrap(); let witness = AggregateRangeWitness::new(&commitments).unwrap(); - let proof = statement.clone().prove(&mut OsRng, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), proof); } assert!(verifier.verify_vartime()); diff --git a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs index 3da9c6ade..7db2ecc8c 100644 --- a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs @@ -23,7 +23,7 @@ fn test_zero_weighted_inner_product() { let witness = WipWitness::new(ScalarVector::new(1), ScalarVector::new(1), Scalar::ZERO).unwrap(); let transcript = Scalar::random(&mut OsRng); - let proof = statement.clone().prove(&mut OsRng, transcript, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap(); let mut verifier = BatchVerifier::new(1); statement.verify(&mut OsRng, &mut verifier, (), transcript, proof); @@ -37,8 +37,8 @@ fn test_weighted_inner_product() { let generators = Generators::new(); for i in [1, 2, 4, 8, 16, 32] { let generators = generators.reduce(i); - let g = generators.g(); - let h = generators.h(); + let g = Generators::g(); + let h = Generators::h(); assert_eq!(generators.len(), i); let mut g_bold = vec![]; let mut h_bold = vec![]; @@ -75,7 +75,7 @@ fn test_weighted_inner_product() { let witness = WipWitness::new(a, b, alpha).unwrap(); let transcript = Scalar::random(&mut OsRng); - let proof = statement.clone().prove(&mut OsRng, transcript, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), transcript, proof); } assert!(verifier.verify_vartime()); diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index f816170c2..59e41ebf5 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -119,9 +119,9 @@ fn clsag_multisig() { sign( &mut OsRng, - algorithm.clone(), + &algorithm, keys.clone(), - algorithm_machines(&mut OsRng, algorithm, &keys), + algorithm_machines(&mut OsRng, &algorithm, &keys), &[1; 32], ); } diff --git a/coins/monero/src/tests/seed.rs b/coins/monero/src/tests/seed.rs index 777daf364..646441184 100644 --- a/coins/monero/src/tests/seed.rs +++ b/coins/monero/src/tests/seed.rs @@ -320,7 +320,7 @@ fn test_polyseed() { let seed_without_accents = |seed: &str| { seed .split_whitespace() - .map(|w| w.chars().filter(|c| c.is_ascii()).collect::()) + .map(|w| w.chars().filter(char::is_ascii).collect::()) .collect::>() .join(" ") }; diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index dceccfb27..116e0f6a0 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -331,14 +331,14 @@ impl Transaction { } } else if prefix.version == 2 { rct_signatures = RctSignatures::read( - prefix + &prefix .inputs .iter() .map(|input| match input { Input::Gen(_) => 0, Input::ToKey { key_offsets, .. } => key_offsets.len(), }) - .collect(), + .collect::>(), prefix.outputs.len(), r, )?; diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index c819eb135..71e949909 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -356,17 +356,19 @@ impl Scanner { let output_key = output_key.unwrap(); for key in [Some(Some(&tx_key)), additional.as_ref().map(|additional| additional.get(o))] { - let key = if let Some(Some(key)) = key { - key - } else if let Some(None) = key { - // This is non-standard. There were additional keys, yet not one for this output - // https://github.com/monero-project/monero/ - // blob/04a1e2875d6e35e27bb21497988a6c822d319c28/ - // src/cryptonote_basic/cryptonote_format_utils.cpp#L1062 - // TODO: Should this return? Where does Monero set the trap handler for this exception? - continue; - } else { - break; + let key = match key { + Some(Some(key)) => key, + Some(None) => { + // This is non-standard. There were additional keys, yet not one for this output + // https://github.com/monero-project/monero/ + // blob/04a1e2875d6e35e27bb21497988a6c822d319c28/ + // src/cryptonote_basic/cryptonote_format_utils.cpp#L1062 + // TODO: Should this return? Where does Monero set the trap handler for this exception? + continue; + } + None => { + break; + } }; let (view_tag, shared_key, payment_id_xor) = shared_key( if self.burning_bug.is_none() { Some(uniqueness(&tx.prefix.inputs)) } else { None }, diff --git a/coins/monero/src/wallet/seed/classic.rs b/coins/monero/src/wallet/seed/classic.rs index ffaf214e3..80c11ab32 100644 --- a/coins/monero/src/wallet/seed/classic.rs +++ b/coins/monero/src/wallet/seed/classic.rs @@ -134,6 +134,7 @@ fn checksum_index(words: &[Zeroizing], lang: &WordList) -> usize { } // Convert a private key to a seed +#[allow(clippy::needless_pass_by_value)] fn key_to_seed(lang: Language, key: Zeroizing) -> ClassicSeed { let bytes = Zeroizing::new(key.to_bytes()); @@ -282,6 +283,7 @@ impl ClassicSeed { key_to_seed(lang, Zeroizing::new(random_scalar(rng))) } + #[allow(clippy::needless_pass_by_value)] pub fn from_string(words: Zeroizing) -> Result { let (lang, entropy) = seed_to_bytes(&words)?; @@ -297,6 +299,7 @@ impl ClassicSeed { Ok(Self::from_entropy(lang, entropy).unwrap()) } + #[allow(clippy::needless_pass_by_value)] pub fn from_entropy(lang: Language, entropy: Zeroizing<[u8; 32]>) -> Option { Option::from(Scalar::from_canonical_bytes(*entropy)) .map(|scalar| key_to_seed(lang, Zeroizing::new(scalar))) diff --git a/coins/monero/src/wallet/seed/polyseed.rs b/coins/monero/src/wallet/seed/polyseed.rs index c6338ba50..a4f62506b 100644 --- a/coins/monero/src/wallet/seed/polyseed.rs +++ b/coins/monero/src/wallet/seed/polyseed.rs @@ -262,6 +262,7 @@ impl Polyseed { } /// Create a new `Polyseed` from a String. + #[allow(clippy::needless_pass_by_value)] pub fn from_string(seed: Zeroizing) -> Result { // Decode the seed into its polynomial coefficients let mut poly = [0; POLYSEED_LENGTH]; @@ -302,7 +303,7 @@ impl Polyseed { } let Some(coeff) = (if lang.has_accent { - let ascii = |word: &str| word.chars().filter(|c| c.is_ascii()).collect::(); + let ascii = |word: &str| word.chars().filter(char::is_ascii).collect::(); check_if_matches( lang.has_prefix, lang.words.iter().map(|lang_word| ascii(lang_word)), diff --git a/coins/monero/src/wallet/send/builder.rs b/coins/monero/src/wallet/send/builder.rs index eaa199c41..55d0fc29c 100644 --- a/coins/monero/src/wallet/send/builder.rs +++ b/coins/monero/src/wallet/send/builder.rs @@ -136,7 +136,7 @@ impl SignableTransactionBuilder { read.r_seed.clone(), read.inputs.clone(), read.payments.clone(), - read.change_address.clone(), + &read.change_address, read.data.clone(), read.fee_rate, ) diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 616158666..9553d1877 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -415,7 +415,7 @@ impl SignableTransaction { r_seed: Option>, inputs: Vec<(SpendableOutput, Decoys)>, payments: Vec<(MoneroAddress, u64)>, - change: Change, + change: &Change, data: Vec>, fee_rate: Fee, ) -> Result { diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index fdb23746e..f3c437e56 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -70,7 +70,7 @@ impl SignableTransaction { /// The height is the Monero blockchain height to synchronize around. pub fn multisig( self, - keys: ThresholdKeys, + keys: &ThresholdKeys, mut transcript: RecommendedTranscript, ) -> Result { let mut inputs = vec![]; diff --git a/coins/monero/tests/runner.rs b/coins/monero/tests/runner.rs index cb0a38088..fa0b48df4 100644 --- a/coins/monero/tests/runner.rs +++ b/coins/monero/tests/runner.rs @@ -241,7 +241,7 @@ macro_rules! test { tx .clone() .multisig( - keys[&i].clone(), + &keys[&i], RecommendedTranscript::new(b"Monero Serai Test Transaction"), ) .unwrap(), diff --git a/common/request/src/lib.rs b/common/request/src/lib.rs index 4c738e2ef..63fb7c8d5 100644 --- a/common/request/src/lib.rs +++ b/common/request/src/lib.rs @@ -63,7 +63,7 @@ impl Client { } } - pub fn without_connection_pool(host: String) -> Result { + pub fn without_connection_pool(host: &str) -> Result { Ok(Client { connection: Connection::Connection { connector: Self::connector(), diff --git a/common/request/src/request.rs b/common/request/src/request.rs index 1117e9fd6..f6ca6f447 100644 --- a/common/request/src/request.rs +++ b/common/request/src/request.rs @@ -18,7 +18,7 @@ impl Request { let mut userpass_iter = userpass.split(':'); let username = userpass_iter.next().unwrap().to_string(); - let password = userpass_iter.next().map(str::to_string).unwrap_or_else(String::new); + let password = userpass_iter.next().map_or_else(String::new, str::to_string); zeroize::Zeroize::zeroize(&mut userpass); return Ok((username, password)); diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 810bc2751..09eab1732 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -85,13 +85,13 @@ impl FirstPreprocessDb { network: NetworkId, id_type: RecognizedIdType, id: &[u8], - preprocess: Vec>, + preprocess: &Vec>, ) { if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) { - assert_eq!(existing, preprocess, "saved a distinct first preprocess"); + assert_eq!(&existing, preprocess, "saved a distinct first preprocess"); return; } - FirstPreprocessDb::set(txn, network, id_type, id, &preprocess); + FirstPreprocessDb::set(txn, network, id_type, id, preprocess); } } @@ -114,7 +114,7 @@ impl HandoverBatchDb { } } impl QueuedBatchesDb { - pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: Transaction) { + pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) { let mut batches = Self::get(txn, set).unwrap_or_default(); batch.write(&mut batches).unwrap(); Self::set(txn, set, &batches); diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index efec10104..b05ba39a3 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -159,17 +159,17 @@ async fn handle_processor_message( // We'll only receive these if we fired GenerateKey, which we'll only do if if we're // in-set, making the Tributary relevant ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.session), - key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.session), - key_gen::ProcessorMessage::Shares { id, .. } => Some(id.session), - key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.session), - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session), + key_gen::ProcessorMessage::Commitments { id, .. } | + key_gen::ProcessorMessage::InvalidCommitments { id, .. } | + key_gen::ProcessorMessage::Shares { id, .. } | + key_gen::ProcessorMessage::InvalidShare { id, .. } | + key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), }, ProcessorMessage::Sign(inner_msg) => match inner_msg { // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing - sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session), - sign::ProcessorMessage::Preprocess { id, .. } => Some(id.session), + sign::ProcessorMessage::InvalidParticipant { id, .. } | + sign::ProcessorMessage::Preprocess { id, .. } | sign::ProcessorMessage::Share { id, .. } => Some(id.session), // While the Processor's Scanner will always emit Completed, that's routed through the // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and @@ -233,9 +233,9 @@ async fn handle_processor_message( None } // We'll only fire these if we are the Substrate signer, making the Tributary relevant - coordinator::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session), - coordinator::ProcessorMessage::CosignPreprocess { id, .. } => Some(id.session), - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => Some(id.session), + coordinator::ProcessorMessage::InvalidParticipant { id, .. } | + coordinator::ProcessorMessage::CosignPreprocess { id, .. } | + coordinator::ProcessorMessage::BatchPreprocess { id, .. } | coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session), coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => { let cosigned_block = CosignedBlock { @@ -486,7 +486,7 @@ async fn handle_processor_message( network, RecognizedIdType::Plan, &id.id, - preprocesses, + &preprocesses, ); vec![] @@ -566,7 +566,7 @@ async fn handle_processor_message( }; id.to_le_bytes() }, - preprocesses.into_iter().map(Into::into).collect(), + &preprocesses.into_iter().map(Into::into).collect::>(), ); let intended = Transaction::Batch { @@ -611,8 +611,7 @@ async fn handle_processor_message( // the prior Batch hasn't been verified yet... if (last_received != 0) && LastVerifiedBatchDb::get(&txn, msg.network) - .map(|last_verified| last_verified < (last_received - 1)) - .unwrap_or(true) + .map_or(true, |last_verified| last_verified < (last_received - 1)) { // Withhold this TX until we verify all prior `Batch`s queue = true; @@ -620,7 +619,7 @@ async fn handle_processor_message( } if queue { - QueuedBatchesDb::queue(&mut txn, spec.set(), intended); + QueuedBatchesDb::queue(&mut txn, spec.set(), &intended); vec![] } else { // Because this is post-verification of the handover batch, take all queued `Batch`s @@ -650,10 +649,11 @@ async fn handle_processor_message( signed: Transaction::empty_signed(), })] } + #[allow(clippy::match_same_arms)] // Allowed to preserve layout coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(), }, ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { .. } => unreachable!(), + processor_messages::substrate::ProcessorMessage::Batch { .. } | processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(), }, }; @@ -823,9 +823,8 @@ async fn handle_cosigns_and_batch_publication( let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; let mut txn = db.txn(); let mut to_publish = vec![]; - let start_id = LastVerifiedBatchDb::get(&txn, network) - .map(|already_verified| already_verified + 1) - .unwrap_or(0); + let start_id = + LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1); if let Some(last_id) = substrate::verify_published_batches::(&mut txn, network, u32::MAX).await { @@ -847,7 +846,7 @@ async fn handle_cosigns_and_batch_publication( to_publish.push((set.session, queued.remove(0))); // Re-queue the remaining batches for remaining in queued { - QueuedBatchesDb::queue(&mut txn, set, remaining); + QueuedBatchesDb::queue(&mut txn, set, &remaining); } } diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 8d83b7d37..582012327 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -59,11 +59,10 @@ pub enum P2pMessageKind { impl P2pMessageKind { fn genesis(&self) -> Option<[u8; 32]> { match self { - P2pMessageKind::KeepAlive => None, - P2pMessageKind::Tributary(genesis) => Some(*genesis), - P2pMessageKind::Heartbeat(genesis) => Some(*genesis), + P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None, + P2pMessageKind::Tributary(genesis) | + P2pMessageKind::Heartbeat(genesis) | P2pMessageKind::Block(genesis) => Some(*genesis), - P2pMessageKind::CosignedBlock => None, } } @@ -303,7 +302,7 @@ impl LibP2p { let mut time_of_last_p2p_message = Instant::now(); #[allow(clippy::needless_pass_by_ref_mut)] // False positive - async fn broadcast_raw( + fn broadcast_raw( p2p: &mut Swarm, time_of_last_p2p_message: &mut Instant, genesis: Option<[u8; 32]>, @@ -364,7 +363,7 @@ impl LibP2p { &mut time_of_last_p2p_message, genesis, msg, - ).await; + ); } // Handle new incoming messages @@ -416,7 +415,7 @@ impl LibP2p { &mut time_of_last_p2p_message, None, P2pMessageKind::KeepAlive.serialize() - ).await; + ); } } } @@ -689,16 +688,8 @@ pub async fn handle_p2p_task( let msg = p2p.receive().await; match msg.kind { P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - P2pMessageKind::Heartbeat(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } + P2pMessageKind::Tributary(genesis) | + P2pMessageKind::Heartbeat(genesis) | P2pMessageKind::Block(genesis) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); diff --git a/coordinator/src/substrate/db.rs b/coordinator/src/substrate/db.rs index 0f1a05647..6f000f760 100644 --- a/coordinator/src/substrate/db.rs +++ b/coordinator/src/substrate/db.rs @@ -18,7 +18,7 @@ pub use inner_db::{NextBlock, BatchInstructionsHashDb}; pub struct HandledEvent; impl HandledEvent { fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 { - inner_db::HandledEvent::get(getter, block).map(|last| last + 1).unwrap_or(0) + inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1) } pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool { let next = Self::next_to_handle_event(getter, block); diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index da48423d0..93b49a451 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -396,9 +396,8 @@ pub async fn scan_task( Ok(latest) => { if latest.header.number >= next_substrate_block { return latest; - } else { - sleep(Duration::from_secs(3)).await; } + sleep(Duration::from_secs(3)).await; } Err(e) => { log::error!("couldn't communicate with serai node: {e}"); @@ -493,7 +492,7 @@ pub(crate) async fn verify_published_batches( ) -> Option { // TODO: Localize from MainDb to SubstrateDb let last = crate::LastVerifiedBatchDb::get(txn, network); - for id in last.map(|last| last + 1).unwrap_or(0) ..= optimistic_up_to { + for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to { let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else { break; }; diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs index 513418bdd..d41867c95 100644 --- a/coordinator/src/tests/tributary/mod.rs +++ b/coordinator/src/tests/tributary/mod.rs @@ -60,7 +60,7 @@ fn random_sign_data(value: RW) { - assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); +fn test_read_write(value: &RW) { + assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); } #[test] @@ -102,36 +102,36 @@ fn tx_size_limit() { #[test] fn serialize_sign_data() { - fn test_read_write(value: SignData) { + fn test_read_write(value: &SignData) { let mut buf = vec![]; value.write(&mut buf).unwrap(); - assert_eq!(value, SignData::read(&mut buf.as_slice()).unwrap()) + assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap()) } let mut plan = [0; 3]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 8]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 24]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, @@ -140,7 +140,7 @@ fn serialize_sign_data() { #[test] fn serialize_transaction() { - test_read_write(Transaction::RemoveParticipantDueToDkg { + test_read_write(&Transaction::RemoveParticipantDueToDkg { attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(), participant: frost::Participant::new( u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), @@ -155,7 +155,7 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut temp); commitments.push(temp); } - test_read_write(Transaction::DkgCommitments { + test_read_write(&Transaction::DkgCommitments { attempt: random_u32(&mut OsRng), commitments, signed: random_signed_with_nonce(&mut OsRng, 0), @@ -170,7 +170,7 @@ fn serialize_transaction() { // Create a valid vec of shares let mut shares = vec![]; // Create up to 150 participants - for _ in 0 .. ((OsRng.next_u64() % 150) + 1) { + for _ in 0 ..= (OsRng.next_u64() % 150) { // Give each sender multiple shares let mut sender_shares = vec![]; for _ in 0 .. amount_of_shares { @@ -181,7 +181,7 @@ fn serialize_transaction() { shares.push(sender_shares); } - test_read_write(Transaction::DkgShares { + test_read_write(&Transaction::DkgShares { attempt: random_u32(&mut OsRng), shares, confirmation_nonces: { @@ -194,7 +194,7 @@ fn serialize_transaction() { } for i in 0 .. 2 { - test_read_write(Transaction::InvalidDkgShare { + test_read_write(&Transaction::InvalidDkgShare { attempt: random_u32(&mut OsRng), accuser: frost::Participant::new( u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), @@ -213,7 +213,7 @@ fn serialize_transaction() { }); } - test_read_write(Transaction::DkgConfirmed { + test_read_write(&Transaction::DkgConfirmed { attempt: random_u32(&mut OsRng), confirmation_share: { let mut share = [0; 32]; @@ -226,20 +226,20 @@ fn serialize_transaction() { { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); - test_read_write(Transaction::CosignSubstrateBlock(block)); + test_read_write(&Transaction::CosignSubstrateBlock(block)); } { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(Transaction::Batch { block, batch }); + test_read_write(&Transaction::Batch { block, batch }); } - test_read_write(Transaction::SubstrateBlock(OsRng.next_u64())); + test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64())); { let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(Transaction::SubstrateSign(random_sign_data( + test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(batch), Label::Preprocess, @@ -247,7 +247,7 @@ fn serialize_transaction() { } { let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(Transaction::SubstrateSign(random_sign_data( + test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(batch), Label::Share, @@ -257,12 +257,12 @@ fn serialize_transaction() { { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); + test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); + test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); } { @@ -270,7 +270,7 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut plan); let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()]; OsRng.fill_bytes(&mut tx_hash); - test_read_write(Transaction::SignCompleted { + test_read_write(&Transaction::SignCompleted { plan, tx_hash, first_signer: random_signed_with_nonce(&mut OsRng, 2).signer, diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index a7f3ea928..8ac1d2c38 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -204,18 +204,18 @@ impl< Accumulation::NotReady } - async fn handle_data( + fn handle_data( &mut self, removed: &[::G], data_spec: &DataSpecification, - bytes: Vec, + bytes: &Vec, signed: &Signed, ) -> Accumulation { let genesis = self.spec.genesis(); let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else { // Premature publication of a valid ID/publication of an invalid ID - self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt").await; + self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt"); return Accumulation::NotReady; }; @@ -223,7 +223,7 @@ impl< // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a // cheap check to leave in for safety if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { - self.fatal_slash(signed.signer.to_bytes(), "published data multiple times").await; + self.fatal_slash(signed.signer.to_bytes(), "published data multiple times"); return Accumulation::NotReady; } @@ -239,12 +239,10 @@ impl< } // If the attempt is greater, this is a premature publication, full slash if data_spec.attempt > curr_attempt { - self - .fatal_slash( - signed.signer.to_bytes(), - "published data with an attempt which hasn't started", - ) - .await; + self.fatal_slash( + signed.signer.to_bytes(), + "published data with an attempt which hasn't started", + ); return Accumulation::NotReady; } @@ -254,10 +252,10 @@ impl< // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data - self.accumulate(removed, data_spec, signed.signer, &bytes) + self.accumulate(removed, data_spec, signed.signer, bytes) } - async fn check_sign_data_len( + fn check_sign_data_len( &mut self, removed: &[::G], signer: ::G, @@ -265,12 +263,10 @@ impl< ) -> Result<(), ()> { let signer_i = self.spec.i(removed, signer).unwrap(); if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { - self - .fatal_slash( - signer.to_bytes(), - "signer published a distinct amount of sign data than they had shares", - ) - .await; + self.fatal_slash( + signer.to_bytes(), + "signer published a distinct amount of sign data than they had shares", + ); Err(())?; } Ok(()) @@ -292,34 +288,28 @@ impl< } match tx { - Transaction::RemoveParticipantDueToDkg { attempt, participant } => { - self - .fatal_slash_with_participant_index( - &removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| { - panic!( - "removed a participant due to a provided transaction with an attempt not {}", - "locally handled?" - ) - }), - participant, - "RemoveParticipantDueToDkg Provided TX", - ) - .await - } + Transaction::RemoveParticipantDueToDkg { attempt, participant } => self + .fatal_slash_with_participant_index( + &removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| { + panic!( + "removed a participant due to a provided transaction with an attempt not {}", + "locally handled?" + ) + }), + participant, + "RemoveParticipantDueToDkg Provided TX", + ), Transaction::DkgCommitments { attempt, commitments, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt") - .await; + self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); return; }; - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()).await - else { + let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { return; }; let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; - match self.handle_data(&removed, &data_spec, commitments.encode(), &signed).await { + match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { Accumulation::Ready(DataSet::Participating(mut commitments)) => { log::info!("got all DkgCommitments for {}", hex::encode(genesis)); unflatten(self.spec, &removed, &mut commitments); @@ -343,12 +333,10 @@ impl< Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt") - .await; + self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); return; }; - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()).await else { + let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { return; }; @@ -359,7 +347,7 @@ impl< let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); for shares in &shares { if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { - self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares").await; + self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); return; } } @@ -419,7 +407,7 @@ impl< let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); - match self.handle_data(&removed, &data_spec, encoded_data, &signed).await { + match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { log::info!("got all DkgShares for {}", hex::encode(genesis)); @@ -479,34 +467,27 @@ impl< Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { self - .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt") - .await; + .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); return; }; let range = self.spec.i(&removed, signed.signer).unwrap(); if !range.contains(&accuser) { - self - .fatal_slash( - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ) - .await; + self.fatal_slash( + signed.signer.to_bytes(), + "accused with a Participant index which wasn't theirs", + ); return; } if range.contains(&faulty) { - self - .fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare") - .await; + self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); return; } let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { - self - .fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare had a non-existent faulty participant", - ) - .await; + self.fatal_slash( + signed.signer.to_bytes(), + "InvalidDkgShare had a non-existent faulty participant", + ); return; }; self @@ -526,15 +507,13 @@ impl< Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt") - .await; + self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); return; }; let data_spec = DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; - match self.handle_data(&removed, &data_spec, confirmation_share.to_vec(), &signed).await { + match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { Accumulation::Ready(DataSet::Participating(shares)) => { log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); @@ -556,9 +535,7 @@ impl< let sig = match confirmer.complete(preprocesses, &key_pair, shares) { Ok(sig) => sig, Err(p) => { - self - .fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share") - .await; + self.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share"); return; } }; @@ -641,16 +618,14 @@ impl< let Some(removed) = crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) else { - self - .fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ) - .await; + self.fatal_slash( + data.signed.signer.to_bytes(), + "signing despite not having set keys on substrate", + ); return; }; let signer = data.signed.signer; - let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()).await else { + let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { return; }; let expected_len = match data.label { @@ -659,12 +634,10 @@ impl< }; for data in &data.data { if data.len() != expected_len { - self - .fatal_slash( - signer.to_bytes(), - "unexpected length data for substrate signing protocol", - ) - .await; + self.fatal_slash( + signer.to_bytes(), + "unexpected length data for substrate signing protocol", + ); return; } } @@ -675,7 +648,7 @@ impl< attempt: data.attempt, }; let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await + self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) else { return; }; @@ -703,16 +676,13 @@ impl< let Some(removed) = crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) else { - self - .fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ) - .await; + self.fatal_slash( + data.signed.signer.to_bytes(), + "signing despite not having set keys on substrate", + ); return; }; - let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()).await - else { + let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { return; }; @@ -722,7 +692,7 @@ impl< attempt: data.attempt, }; if let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await + self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) { unflatten(self.spec, &removed, &mut results); let id = @@ -750,9 +720,7 @@ impl< ); if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() { - self - .fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed") - .await; + self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed"); return; }; diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index a8b9b54c6..e17f9c4f8 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -192,7 +192,7 @@ impl< P: P2p, > TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P> { - pub async fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { + pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { // TODO: If this fatal slash puts the remaining set below the threshold, spin let genesis = self.spec.genesis(); @@ -209,7 +209,7 @@ impl< // Tributary post-DKG // https://github.com/serai-dex/serai/issues/426 - pub async fn fatal_slash_with_participant_index( + pub fn fatal_slash_with_participant_index( &mut self, removed: &[::G], i: Participant, @@ -227,7 +227,7 @@ impl< } let validator = validator.unwrap(); - self.fatal_slash(validator.to_bytes(), reason).await; + self.fatal_slash(validator.to_bytes(), reason); } async fn handle(mut self) { @@ -240,10 +240,9 @@ impl< // Since the evidence is on the chain, it should already have been validated // We can just punish the signer let data = match ev { - Evidence::ConflictingMessages(first, second) => (first, Some(second)), + Evidence::ConflictingMessages(first, second) | Evidence::ConflictingPrecommit(first, second) => (first, Some(second)), - Evidence::InvalidPrecommit(first) => (first, None), - Evidence::InvalidValidRound(first) => (first, None), + Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None), }; let msgs = ( decode_signed_message::>(&data.0).unwrap(), @@ -259,9 +258,7 @@ impl< // Since anything with evidence is fundamentally faulty behavior, not just temporal // errors, mark the node as fatally slashed - self - .fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {:?}", msgs)) - .await; + self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}")); } TributaryTransaction::Application(tx) => { self.handle_application_tx(tx).await; @@ -348,8 +345,7 @@ impl< // Check if the cosigner has a signature from our set for this block/a newer one let latest_cosign = crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) - .map(|cosign| cosign.block_number) - .unwrap_or(0); + .map_or(0, |cosign| cosign.block_number); if latest_cosign < block_number { // Instruct the processor to start the next attempt self diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs index e327ed3a5..45d95b1a9 100644 --- a/coordinator/src/tributary/signing_protocol.rs +++ b/coordinator/src/tributary/signing_protocol.rs @@ -184,7 +184,6 @@ impl SigningProtocol<'_, T, C> { } fn complete_internal( - &mut self, machine: AlgorithmSignatureMachine, shares: HashMap>, ) -> Result<[u8; 64], Participant> { @@ -251,6 +250,8 @@ fn threshold_i_map_to_keys_and_musig_i_map( (participants, map) } +type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>; + pub(crate) struct DkgConfirmer<'a, T: DbTxn> { key: &'a Zeroizing<::F>, spec: &'a TributarySpec, @@ -271,7 +272,7 @@ impl DkgConfirmer<'_, T> { let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; Some(DkgConfirmer { key, spec, removed, txn, attempt }) } - fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> { + fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { let context = (b"DkgConfirmer", self.attempt); SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } } @@ -323,6 +324,6 @@ impl DkgConfirmer<'_, T> { .expect("trying to complete a machine which failed to preprocess") .0; - self.signing_protocol().complete_internal(machine, shares) + DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares) } } diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs index 185bb7541..7749102ab 100644 --- a/coordinator/src/tributary/transaction.rs +++ b/coordinator/src/tributary/transaction.rs @@ -489,7 +489,7 @@ impl ReadWrite for Transaction { writer.write_all(&u16::from(*faulty).to_le_bytes())?; // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0); + assert!(blame.as_ref().map_or(1, Vec::len) != 0); let blame_len = u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); writer.write_all(&blame_len.to_le_bytes())?; @@ -547,15 +547,9 @@ impl TransactionTrait for Transaction { match self { Transaction::RemoveParticipantDueToDkg { .. } => TransactionKind::Provided("remove"), - Transaction::DkgCommitments { attempt, commitments: _, signed } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::DkgShares { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::InvalidDkgShare { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } + Transaction::DkgCommitments { attempt, commitments: _, signed } | + Transaction::DkgShares { attempt, signed, .. } | + Transaction::InvalidDkgShare { attempt, signed, .. } | Transaction::DkgConfirmed { attempt, signed, .. } => { TransactionKind::Signed((b"dkg", attempt).encode(), signed) } @@ -625,8 +619,7 @@ impl Transaction { Transaction::DkgCommitments { .. } => 0, Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } => 2, - Transaction::DkgConfirmed { .. } => 2, + Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), @@ -635,6 +628,7 @@ impl Transaction { Transaction::SubstrateSign(data) => data.label.nonce(), Transaction::Sign(data) => data.label.nonce(), + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), }; @@ -643,9 +637,9 @@ impl Transaction { match tx { Transaction::RemoveParticipantDueToDkg { .. } => panic!("signing RemoveParticipant"), - Transaction::DkgCommitments { ref mut signed, .. } => signed, - Transaction::DkgShares { ref mut signed, .. } => signed, - Transaction::InvalidDkgShare { ref mut signed, .. } => signed, + Transaction::DkgCommitments { ref mut signed, .. } | + Transaction::DkgShares { ref mut signed, .. } | + Transaction::InvalidDkgShare { ref mut signed, .. } | Transaction::DkgConfirmed { ref mut signed, .. } => signed, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), @@ -655,6 +649,7 @@ impl Transaction { Transaction::SubstrateSign(ref mut data) => &mut data.signed, Transaction::Sign(ref mut data) => &mut data.signed, + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), }, ) diff --git a/coordinator/tributary/src/block.rs b/coordinator/tributary/src/block.rs index f218671c3..6b9a0543f 100644 --- a/coordinator/tributary/src/block.rs +++ b/coordinator/tributary/src/block.rs @@ -174,7 +174,7 @@ impl Block { last_block: [u8; 32], mut locally_provided: HashMap<&'static str, VecDeque>, get_and_increment_nonce: &mut G, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, commit: impl Fn(u32) -> Option>, unsigned_in_chain: impl Fn([u8; 32]) -> bool, provided_in_chain: impl Fn([u8; 32]) -> bool, // TODO: merge this with unsigned_on_chain? @@ -217,7 +217,7 @@ impl Block { Err(BlockError::ProvidedAlreadyIncluded)?; } - if let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) { + if let Some(local) = locally_provided.get_mut(order).and_then(VecDeque::pop_front) { // Since this was a provided TX, it must be an application TX let Transaction::Application(tx) = tx else { Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? @@ -250,12 +250,10 @@ impl Block { last_tx_order = current_tx_order; match tx { - Transaction::Tendermint(tx) => { - match verify_tendermint_tx::(tx, schema.clone(), &commit) { - Ok(()) => {} - Err(e) => Err(BlockError::TransactionError(e))?, - } - } + Transaction::Tendermint(tx) => match verify_tendermint_tx::(tx, schema, &commit) { + Ok(()) => {} + Err(e) => Err(BlockError::TransactionError(e))?, + }, Transaction::Application(tx) => { match verify_transaction(tx, genesis, get_and_increment_nonce) { Ok(()) => {} diff --git a/coordinator/tributary/src/blockchain.rs b/coordinator/tributary/src/blockchain.rs index 8b8653a94..7063cea9b 100644 --- a/coordinator/tributary/src/blockchain.rs +++ b/coordinator/tributary/src/blockchain.rs @@ -139,25 +139,23 @@ impl Blockchain { order: &str, ) -> bool { let local_key = ProvidedTransactions::::locally_provided_quantity_key(genesis, order); - let local = - db.get(local_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + let local = db.get(local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let block_key = ProvidedTransactions::::block_provided_quantity_key(genesis, block, order); - let block = - db.get(block_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + let block = db.get(block_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); local >= block } pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] { - db.get(Self::tip_key(genesis)).map(|bytes| bytes.try_into().unwrap()).unwrap_or(genesis) + db.get(Self::tip_key(genesis)).map_or(genesis, |bytes| bytes.try_into().unwrap()) } pub(crate) fn add_transaction( &mut self, internal: bool, tx: Transaction, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, ) -> Result { let db = self.db.as_ref().unwrap(); let genesis = self.genesis; @@ -177,8 +175,7 @@ impl Blockchain { if self.participants.contains(&signer) { Some( db.get(Self::next_nonce_key(&self.genesis, &signer, &order)) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0), + .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None @@ -211,15 +208,14 @@ impl Blockchain { .as_ref() .unwrap() .get(Self::next_nonce_key(&self.genesis, signer, order)) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0), + .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None } } - pub(crate) fn build_block(&mut self, schema: N::SignatureScheme) -> Block { + pub(crate) fn build_block(&mut self, schema: &N::SignatureScheme) -> Block { let block = Block::new( self.tip, self.provided.transactions.values().flatten().cloned().collect(), @@ -233,7 +229,7 @@ impl Blockchain { pub(crate) fn verify_block( &self, block: &Block, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, allow_non_local_provided: bool, ) -> Result<(), BlockError> { let db = self.db.as_ref().unwrap(); @@ -258,8 +254,7 @@ impl Blockchain { let key = Self::next_nonce_key(&self.genesis, signer, order); let next = txn .get(&key) - .map(|next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap())) - .unwrap_or(0); + .map_or(0, |next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap())); txn.put(key, (next + 1).to_le_bytes()); Some(next) } else { @@ -282,7 +277,7 @@ impl Blockchain { &mut self, block: &Block, commit: Vec, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, ) -> Result<(), BlockError> { self.verify_block::(block, schema, true)?; diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 4610f5f29..dac7f4beb 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -190,7 +190,7 @@ impl Tributary { start_time }; let proposal = TendermintBlock( - blockchain.build_block::>(validators.clone()).serialize(), + blockchain.build_block::>(&validators).serialize(), ); let blockchain = Arc::new(RwLock::new(blockchain)); @@ -273,7 +273,7 @@ impl Tributary { let res = self.network.blockchain.write().await.add_transaction::>( true, tx, - self.network.signature_scheme(), + &self.network.signature_scheme(), ); if res == Ok(true) { self.network.p2p.broadcast(self.genesis, to_broadcast).await; @@ -344,7 +344,7 @@ impl Tributary { self.network.blockchain.write().await.add_transaction::>( false, tx, - self.network.signature_scheme(), + &self.network.signature_scheme(), ); log::debug!("received transaction message. valid new transaction: {res:?}"); res == Ok(true) diff --git a/coordinator/tributary/src/mempool.rs b/coordinator/tributary/src/mempool.rs index a723529ce..344d45436 100644 --- a/coordinator/tributary/src/mempool.rs +++ b/coordinator/tributary/src/mempool.rs @@ -112,7 +112,7 @@ impl Mempool { blockchain_next_nonce: F, internal: bool, tx: Transaction, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, unsigned_in_chain: impl Fn([u8; 32]) -> bool, commit: impl Fn(u32) -> Option>, ) -> Result { diff --git a/coordinator/tributary/src/merkle.rs b/coordinator/tributary/src/merkle.rs index e9322b70e..2a3ee3a12 100644 --- a/coordinator/tributary/src/merkle.rs +++ b/coordinator/tributary/src/merkle.rs @@ -17,13 +17,7 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] { [ b"branch_hash".as_ref(), hashes[i].as_ref(), - hashes - .get(i + 1) - .map(|hash| { - let res: &[u8] = hash.as_ref(); - res - }) - .unwrap_or(zero.as_ref()), + hashes.get(i + 1).map_or(zero.as_ref(), AsRef::as_ref), ] .concat(), )); @@ -33,5 +27,5 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] { hashes = interim; } - hashes.first().copied().map(Into::into).unwrap_or(zero) + hashes.first().copied().map_or(zero, Into::into) } diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 42e1e6ae6..103286afb 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -103,17 +103,11 @@ impl ProvidedTransactions { // get local and on-chain tx numbers let local_key = Self::locally_provided_quantity_key(&self.genesis, order); - let mut local_quantity = self - .db - .get(&local_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let mut local_quantity = + self.db.get(&local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); - let on_chain_quantity = self - .db - .get(on_chain_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let on_chain_quantity = + self.db.get(on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let current_provided_key = self.current_provided_key(); @@ -158,7 +152,7 @@ impl ProvidedTransactions { block: [u8; 32], tx: [u8; 32], ) { - if let Some(next_tx) = self.transactions.get_mut(order).and_then(|queue| queue.pop_front()) { + if let Some(next_tx) = self.transactions.get_mut(order).and_then(VecDeque::pop_front) { assert_eq!(next_tx.hash(), tx); let current_provided_key = self.current_provided_key(); @@ -184,11 +178,8 @@ impl ProvidedTransactions { // bump the on-chain tx number. let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); let block_order_key = Self::block_provided_quantity_key(&self.genesis, &block, order); - let mut on_chain_quantity = self - .db - .get(&on_chain_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let mut on_chain_quantity = + self.db.get(&on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let this_provided_id = on_chain_quantity; txn.put(Self::on_chain_provided_key(&self.genesis, order, this_provided_id), tx); diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index 5662c1ed6..36f381c97 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -348,7 +348,7 @@ impl Network for TendermintNetwork if self.blockchain.write().await.add_transaction::( true, Transaction::Tendermint(tx), - self.signature_scheme(), + &self.signature_scheme(), ) == Ok(true) { self.p2p.broadcast(signer.genesis, to_broadcast).await; @@ -362,7 +362,7 @@ impl Network for TendermintNetwork .blockchain .read() .await - .verify_block::(&block, self.signature_scheme(), false) + .verify_block::(&block, &self.signature_scheme(), false) .map_err(|e| match e { BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, _ => { @@ -398,7 +398,7 @@ impl Network for TendermintNetwork let block_res = self.blockchain.write().await.add_block::( &block, encoded_commit.clone(), - self.signature_scheme(), + &self.signature_scheme(), ); match block_res { Ok(()) => { @@ -425,7 +425,7 @@ impl Network for TendermintNetwork *self.to_rebroadcast.write().await = vec![]; Some(TendermintBlock( - self.blockchain.write().await.build_block::(self.signature_scheme()).serialize(), + self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), )) } } diff --git a/coordinator/tributary/src/tendermint/tx.rs b/coordinator/tributary/src/tendermint/tx.rs index 99d6015d2..328ff3868 100644 --- a/coordinator/tributary/src/tendermint/tx.rs +++ b/coordinator/tributary/src/tendermint/tx.rs @@ -88,7 +88,7 @@ fn decode_and_verify_signed_message( // re-implements an entire foreign library's checks for malicious behavior). pub(crate) fn verify_tendermint_tx( tx: &TendermintTx, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, commit: impl Fn(u32) -> Option>, ) -> Result<(), TransactionError> { tx.verify()?; @@ -98,8 +98,8 @@ pub(crate) fn verify_tendermint_tx( TendermintTx::SlashEvidence(ev) => { match ev { Evidence::ConflictingMessages(first, second) => { - let first = decode_and_verify_signed_message::(first, &schema)?.msg; - let second = decode_and_verify_signed_message::(second, &schema)?.msg; + let first = decode_and_verify_signed_message::(first, schema)?.msg; + let second = decode_and_verify_signed_message::(second, schema)?.msg; // Make sure they're distinct messages, from the same sender, within the same block if (first == second) || (first.sender != second.sender) || (first.block != second.block) { @@ -112,8 +112,8 @@ pub(crate) fn verify_tendermint_tx( } } Evidence::ConflictingPrecommit(first, second) => { - let first = decode_and_verify_signed_message::(first, &schema)?.msg; - let second = decode_and_verify_signed_message::(second, &schema)?.msg; + let first = decode_and_verify_signed_message::(first, schema)?.msg; + let second = decode_and_verify_signed_message::(second, schema)?.msg; if (first.sender != second.sender) || (first.block != second.block) { Err(TransactionError::InvalidContent)?; @@ -136,7 +136,7 @@ pub(crate) fn verify_tendermint_tx( Err(TransactionError::InvalidContent)? } Evidence::InvalidPrecommit(msg) => { - let msg = decode_and_verify_signed_message::(msg, &schema)?.msg; + let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Precommit(Some((id, sig))) = &msg.data else { Err(TransactionError::InvalidContent)? @@ -173,7 +173,7 @@ pub(crate) fn verify_tendermint_tx( } } Evidence::InvalidValidRound(msg) => { - let msg = decode_and_verify_signed_message::(msg, &schema)?.msg; + let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Proposal(Some(vr), _) = &msg.data else { Err(TransactionError::InvalidContent)? diff --git a/coordinator/tributary/src/tests/block.rs b/coordinator/tributary/src/tests/block.rs index 2e16f6605..0df72e6da 100644 --- a/coordinator/tributary/src/tests/block.rs +++ b/coordinator/tributary/src/tests/block.rs @@ -89,7 +89,7 @@ fn empty_block() { LAST, HashMap::new(), &mut |_, _| None, - validators, + &validators, commit, unsigned_in_chain, provided_in_chain, @@ -129,7 +129,7 @@ fn duplicate_nonces() { last_nonce += 1; Some(res) }, - validators.clone(), + &validators, commit, unsigned_in_chain, provided_in_chain, diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary/src/tests/blockchain.rs index a7ef1e877..137ed222d 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary/src/tests/blockchain.rs @@ -44,12 +44,12 @@ fn block_addition() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (db, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(block.header.parent, genesis); assert_eq!(block.header.transactions, [0; 32]); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.block_number(), 1); assert_eq!( @@ -64,21 +64,21 @@ fn invalid_block() { let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); // Mutate parent { #[allow(clippy::redundant_clone)] // False positive let mut block = block.clone(); block.header.parent = Blake2s256::digest(block.header.parent).into(); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } // Mutate tranactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } let key = Zeroizing::new(::F::random(&mut OsRng)); @@ -89,7 +89,7 @@ fn invalid_block() { // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } // Run the rest of the tests with them as a participant @@ -99,22 +99,22 @@ fn invalid_block() { { let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); } { // Add a valid transaction let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); blockchain - .add_transaction::(true, Transaction::Application(tx.clone()), validators.clone()) + .add_transaction::(true, Transaction::Application(tx.clone()), &validators) .unwrap(); - let mut block = blockchain.build_block::(validators.clone()); + let mut block = blockchain.build_block::(&validators); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // And verify mutating the transactions merkle now causes a failure block.header.transactions = merkle(&[]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { @@ -122,24 +122,22 @@ fn invalid_block() { let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5); // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { // Invalid signature let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); - blockchain - .add_transaction::(true, Transaction::Application(tx), validators.clone()) - .unwrap(); - let mut block = blockchain.build_block::(validators.clone()); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); + let mut block = blockchain.build_block::(&validators); + blockchain.verify_block::(&block, &validators, false).unwrap(); match &mut block.transactions[0] { Transaction::Application(tx) => { tx.1.signature.s += ::F::ONE; } _ => panic!("non-signed tx found"), } - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Make sure this isn't because the merkle changed due to the transaction hash including the // signature (which it explicitly isn't allowed to anyways) @@ -166,12 +164,10 @@ fn signed_transaction() { panic!("tendermint tx found"); }; let next_nonce = blockchain.next_nonce(&signer, &[]).unwrap(); - blockchain - .add_transaction::(true, Transaction::Application(tx), validators.clone()) - .unwrap(); + blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); assert_eq!(next_nonce + 1, blockchain.next_nonce(&signer, &[]).unwrap()); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(block, Block::new(blockchain.tip(), vec![], mempool.clone())); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -185,8 +181,8 @@ fn signed_transaction() { ); // Verify and add the block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -233,21 +229,21 @@ fn provided_transaction() { { // Non-provided transactions should fail verification because we don't have them locally. let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Provided transactions should pass verification blockchain.provide_transaction(tx.clone()).unwrap(); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // add_block should work for verified blocks - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); // The provided transaction should no longer considered provided but added to chain, // causing this error assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false), + blockchain.verify_block::(&block, &validators, false), Err(BlockError::ProvidedAlreadyIncluded) ); } @@ -262,11 +258,11 @@ fn provided_transaction() { // add_block DOES NOT fail for unverified provided transactions if told to add them, // since now we can have them later. let block1 = Block::new(blockchain.tip(), vec![tx1.clone(), tx3.clone()], vec![]); - assert!(blockchain.add_block::(&block1, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block1, vec![], &validators).is_ok()); // in fact, we can have many blocks that have provided txs that we don't have locally. let block2 = Block::new(blockchain.tip(), vec![tx2.clone(), tx4.clone()], vec![]); - assert!(blockchain.add_block::(&block2, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block2, vec![], &validators).is_ok()); // make sure we won't return ok for the block before we actually got the txs let TransactionKind::Provided(order) = tx1.kind() else { panic!("tx wasn't provided") }; @@ -357,11 +353,9 @@ async fn tendermint_evidence_tx() { let Transaction::Tendermint(tx) = tx else { panic!("non-tendermint tx found"); }; - blockchain - .add_transaction::(true, Transaction::Tendermint(tx), validators.clone()) - .unwrap(); + blockchain.add_transaction::(true, Transaction::Tendermint(tx), &validators).unwrap(); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -371,8 +365,8 @@ async fn tendermint_evidence_tx() { } // Verify and add the block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -467,7 +461,7 @@ async fn block_tx_ordering() { let signed_tx = Transaction::Application(SignedTx::Signed(Box::new( crate::tests::signed_transaction(&mut OsRng, genesis, &key, i), ))); - blockchain.add_transaction::(true, signed_tx.clone(), validators.clone()).unwrap(); + blockchain.add_transaction::(true, signed_tx.clone(), &validators).unwrap(); mempool.push(signed_tx); let unsigned_tx = Transaction::Tendermint( @@ -477,7 +471,7 @@ async fn block_tx_ordering() { ) .await, ); - blockchain.add_transaction::(true, unsigned_tx.clone(), validators.clone()).unwrap(); + blockchain.add_transaction::(true, unsigned_tx.clone(), &validators).unwrap(); mempool.push(unsigned_tx); let provided_tx = @@ -485,7 +479,7 @@ async fn block_tx_ordering() { blockchain.provide_transaction(provided_tx.clone()).unwrap(); provided_txs.push(provided_tx); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -509,7 +503,7 @@ async fn block_tx_ordering() { } // should be a valid block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // Unsigned before Provided { @@ -518,7 +512,7 @@ async fn block_tx_ordering() { let unsigned = block.transactions.remove(128); block.transactions.insert(0, unsigned); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -529,7 +523,7 @@ async fn block_tx_ordering() { let signed = block.transactions.remove(256); block.transactions.insert(0, signed); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -539,7 +533,7 @@ async fn block_tx_ordering() { let mut block = block; block.transactions.swap(128, 256); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } diff --git a/coordinator/tributary/src/tests/mempool.rs b/coordinator/tributary/src/tests/mempool.rs index 9d8590c2d..34ed4cf98 100644 --- a/coordinator/tributary/src/tests/mempool.rs +++ b/coordinator/tributary/src/tests/mempool.rs @@ -47,7 +47,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -63,7 +63,7 @@ async fn mempool_addition() { &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -78,7 +78,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -89,7 +89,7 @@ async fn mempool_addition() { &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -103,7 +103,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -115,7 +115,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -133,7 +133,7 @@ async fn mempool_addition() { &|_, _| Some(2), true, Transaction::Application(tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit ) @@ -173,7 +173,7 @@ fn too_many_mempool() { &|_, _| Some(0), false, Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -190,7 +190,7 @@ fn too_many_mempool() { &key, ACCOUNT_MEMPOOL_LIMIT )), - validators.clone(), + &validators, unsigned_in_chain, commit, ), diff --git a/coordinator/tributary/src/tests/transaction/tendermint.rs b/coordinator/tributary/src/tests/transaction/tendermint.rs index aba077676..e701f1361 100644 --- a/coordinator/tributary/src/tests/transaction/tendermint.rs +++ b/coordinator/tributary/src/tests/transaction/tendermint.rs @@ -57,13 +57,13 @@ async fn invalid_valid_round() { // This should be invalid evidence if a valid valid round is specified let (_, tx) = valid_round_tx(None).await; - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // If an invalid valid round is specified (>= current), this should be invalid evidence let (mut signed, tx) = valid_round_tx(Some(RoundNumber(0))).await; // should pass - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // change the signature let mut random_sig = [0u8; 64]; @@ -72,7 +72,7 @@ async fn invalid_valid_round() { let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())); // should fail - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } #[tokio::test] @@ -94,7 +94,7 @@ async fn invalid_precommit_signature() { }; // Empty Precommit should fail. - assert!(verify_tendermint_tx::(&precommit(None).await.1, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&precommit(None).await.1, &validators, commit).is_err()); // valid precommit signature should fail. let block_id = [0x22u8; 32]; @@ -105,7 +105,7 @@ async fn invalid_precommit_signature() { assert!(verify_tendermint_tx::( &precommit(Some((block_id, signer.clone().sign(&commit_msg).await))).await.1, - validators.clone(), + &validators, commit ) .is_err()); @@ -113,14 +113,14 @@ async fn invalid_precommit_signature() { // any other signature can be used as evidence. { let (mut signed, tx) = precommit(Some((block_id, signer.sign(&[]).await))).await; - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // So long as we can authenticate where it came from let mut random_sig = [0u8; 64]; OsRng.fill_bytes(&mut random_sig); signed.sig = random_sig; let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())); - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } @@ -170,10 +170,10 @@ async fn evidence_with_prevote() { // No prevote message alone should be valid as slash evidence at this time for prevote in prevote(None).await { - assert!(verify_tendermint_tx::(&prevote, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } for prevote in prevote(Some([0x22u8; 32])).await { - assert!(verify_tendermint_tx::(&prevote, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } } @@ -199,7 +199,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; @@ -207,7 +207,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) @@ -216,7 +216,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; @@ -224,7 +224,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // Prevote @@ -235,7 +235,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await; @@ -243,7 +243,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) @@ -252,7 +252,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await; @@ -260,7 +260,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // Precommit @@ -272,7 +272,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // For precommit, the round number is ignored let signed_2 = signed_for_b_r(0, 1, Data::Precommit(Some(([0x22; 32], sig)))).await; @@ -280,7 +280,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Yet the block number isn't let signed_2 = signed_for_b_r(1, 0, Data::Precommit(Some(([0x22; 32], sig)))).await; @@ -288,7 +288,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } // msgs from different senders should fail @@ -320,7 +320,7 @@ async fn conflicting_msgs_evidence_tx() { let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap()); - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } // msgs with different steps should fail @@ -331,6 +331,6 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 1aa37c59e..a773daa7a 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -191,8 +191,7 @@ pub(crate) fn verify_transaction( tx.verify()?; match tx.kind() { - TransactionKind::Provided(_) => {} - TransactionKind::Unsigned => {} + TransactionKind::Provided(_) | TransactionKind::Unsigned => {} TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { if let Some(next_nonce) = get_and_increment_nonce(signer, &order) { if *nonce != next_nonce { diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 188849e89..c54160997 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -543,8 +543,7 @@ impl TendermintMachine { self.slash(sender, slash).await } - Err(TendermintError::Temporal) => (), - Err(TendermintError::AlreadyHandled) => (), + Err(TendermintError::Temporal | TendermintError::AlreadyHandled) => (), } } } @@ -627,7 +626,7 @@ impl TendermintMachine { // Uses a junk signature since message equality disregards the signature if self.block.log.has_consensus( msg.round, - Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), + &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), ) { // If msg.round is in the future, these Precommits won't have their inner signatures // verified @@ -714,7 +713,7 @@ impl TendermintMachine { // of the round map if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { let (participation, weight) = - self.block.log.message_instances(self.block.round().number, Data::Prevote(None)); + self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); // 34-35 if participation >= self.weights.threshold() { self.block.round_mut().set_timeout(Step::Prevote); @@ -767,7 +766,7 @@ impl TendermintMachine { // 23 and 29. If it's some, both are satisfied if they're for the same ID. If it's some // with different IDs, the function on 22 rejects yet the function on 28 has one other // condition - let locked = self.block.locked.as_ref().map(|(_, id)| id == &block.id()).unwrap_or(true); + let locked = self.block.locked.as_ref().map_or(true, |(_, id)| id == &block.id()); let mut vote = raw_vote.filter(|_| locked); if let Some(vr) = vr { @@ -780,7 +779,7 @@ impl TendermintMachine { ))?; } - if self.block.log.has_consensus(*vr, Data::Prevote(Some(block.id()))) { + if self.block.log.has_consensus(*vr, &Data::Prevote(Some(block.id()))) { // Allow differing locked values if the proposal has a newer valid round // This is the other condition described above if let Some((locked_round, _)) = self.block.locked.as_ref() { @@ -798,25 +797,18 @@ impl TendermintMachine { return Ok(None); } - if self - .block - .valid - .as_ref() - .map(|(round, _)| round != &self.block.round().number) - .unwrap_or(true) - { + if self.block.valid.as_ref().map_or(true, |(round, _)| round != &self.block.round().number) { // 36-43 // The run once condition is implemented above. Since valid will always be set by this, it // not being set, or only being set historically, means this has yet to be run - if self.block.log.has_consensus(self.block.round().number, Data::Prevote(Some(block.id()))) { + if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { match self.network.validate(block).await { - Ok(()) => (), // BlockError::Temporal is due to a temporal error we have, yet a supermajority of the // network does not, Because we do not believe this block to be fatally invalid, and // because a supermajority deems it valid, accept it. - Err(BlockError::Temporal) => (), + Ok(()) | Err(BlockError::Temporal) => (), Err(BlockError::Fatal) => { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); // TODO: Produce evidence of this for the higher level code to decide what to do with diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index f663dfc8f..85f4cf926 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -64,14 +64,14 @@ impl MessageLog { // For a given round, return the participating weight for this step, and the weight agreeing with // the data. - pub(crate) fn message_instances(&self, round: RoundNumber, data: DataFor) -> (u64, u64) { + pub(crate) fn message_instances(&self, round: RoundNumber, data: &DataFor) -> (u64, u64) { let mut participating = 0; let mut weight = 0; for (participant, msgs) in &self.log[&round] { if let Some(msg) = msgs.get(&data.step()) { let validator_weight = self.weights.weight(*participant); participating += validator_weight; - if data == msg.msg.data { + if data == &msg.msg.data { weight += validator_weight; } } @@ -102,7 +102,7 @@ impl MessageLog { } // Check if consensus has been reached on a specific piece of data - pub(crate) fn has_consensus(&self, round: RoundNumber, data: DataFor) -> bool { + pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor) -> bool { let (_, weight) = self.message_instances(round, data); weight >= self.weights.threshold() } diff --git a/crypto/dkg/src/promote.rs b/crypto/dkg/src/promote.rs index ac94beb6d..010abf80f 100644 --- a/crypto/dkg/src/promote.rs +++ b/crypto/dkg/src/promote.rs @@ -19,7 +19,7 @@ pub trait CiphersuitePromote { fn promote(self) -> ThresholdKeys; } -fn transcript(key: G, i: Participant) -> RecommendedTranscript { +fn transcript(key: &G, i: Participant) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2"); transcript.append_message(b"group_key", key.to_bytes()); transcript.append_message(b"participant", i.to_bytes()); @@ -79,7 +79,7 @@ where share: C2::generator() * base.secret_share().deref(), proof: DLEqProof::prove( rng, - &mut transcript(base.core.group_key(), base.params().i), + &mut transcript(&base.core.group_key(), base.params().i), &[C1::generator(), C2::generator()], base.secret_share(), ), @@ -105,7 +105,7 @@ where proof .proof .verify( - &mut transcript(self.base.core.group_key(), i), + &mut transcript(&self.base.core.group_key(), i), &[C1::generator(), C2::generator()], &[original_shares[&i], proof.share], ) diff --git a/crypto/dkg/src/tests/frost.rs b/crypto/dkg/src/tests/frost.rs index 92f687c44..01af35626 100644 --- a/crypto/dkg/src/tests/frost.rs +++ b/crypto/dkg/src/tests/frost.rs @@ -135,10 +135,10 @@ mod literal { const TWO: Participant = Participant(2); fn test_blame( - commitment_msgs: HashMap>>, + commitment_msgs: &HashMap>>, machines: Vec>, - msg: FrostEncryptedMessage, - blame: Option>, + msg: &FrostEncryptedMessage, + blame: &Option>, ) { for machine in machines { let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone()); @@ -188,7 +188,7 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] @@ -228,7 +228,7 @@ mod literal { .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); - test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } // This should be largely equivalent to the prior test @@ -263,7 +263,7 @@ mod literal { .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); - test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } #[test] @@ -296,7 +296,7 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] @@ -329,6 +329,6 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } } diff --git a/crypto/dleq/src/cross_group/aos.rs b/crypto/dleq/src/cross_group/aos.rs index 5a32bf136..dac3356ac 100644 --- a/crypto/dleq/src/cross_group/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -102,7 +102,7 @@ where #[allow(non_snake_case)] pub(crate) fn prove( rng: &mut R, - transcript: T, + transcript: &T, generators: (Generators, Generators), ring: &[(G0, G1)], mut actual: usize, @@ -122,7 +122,7 @@ where #[allow(non_snake_case)] let mut R = original_R; - for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) { + for i in ((actual + 1) ..= (actual + RING_LEN)).map(|i| i % RING_LEN) { let e = Self::nonces(transcript.clone(), R); if i == 0 { match Re_0 { @@ -144,11 +144,10 @@ where r.0.zeroize(); r.1.zeroize(); break; - // Generate a decoy response - } else { - s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); } + // Generate a decoy response + s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); R = Self::R(generators, s[i], ring[i], e); } @@ -159,7 +158,7 @@ where pub(crate) fn verify( &self, rng: &mut R, - transcript: T, + transcript: &T, generators: (Generators, Generators), batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), ring: &[(G0, G1)], diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index d818abced..a5de897a0 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -47,10 +47,8 @@ impl BitSignature { pub(crate) const fn bits(&self) -> u8 { match self { - BitSignature::ClassicLinear => 1, - BitSignature::ConciseLinear => 2, - BitSignature::EfficientLinear => 1, - BitSignature::CompromiseLinear => 2, + BitSignature::ClassicLinear | BitSignature::EfficientLinear => 1, + BitSignature::ConciseLinear | BitSignature::CompromiseLinear => 2, } } @@ -60,10 +58,8 @@ impl BitSignature { fn aos_form(&self) -> Re { match self { - BitSignature::ClassicLinear => Re::e_default(), - BitSignature::ConciseLinear => Re::e_default(), - BitSignature::EfficientLinear => Re::R_default(), - BitSignature::CompromiseLinear => Re::R_default(), + BitSignature::ClassicLinear | BitSignature::ConciseLinear => Re::e_default(), + BitSignature::EfficientLinear | BitSignature::CompromiseLinear => Re::R_default(), } } } @@ -129,7 +125,7 @@ where let signature = Aos::prove( rng, - transcript.clone(), + transcript, generators, &Self::ring(*pow_2, commitments), usize::from(bits), @@ -155,7 +151,7 @@ where self.signature.verify( rng, - transcript.clone(), + transcript, generators, batch, &Self::ring(*pow_2, self.commitments), diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index de8256cf0..77569c7ca 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -408,10 +408,8 @@ where Self::transcript(transcript, generators, keys); let batch_capacity = match BitSignature::from(SIGNATURE) { - BitSignature::ClassicLinear => 3, - BitSignature::ConciseLinear => 3, - BitSignature::EfficientLinear => (self.bits.len() + 1) * 3, - BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3, + BitSignature::ClassicLinear | BitSignature::ConciseLinear => 3, + BitSignature::EfficientLinear | BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3, }; let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity)); diff --git a/crypto/dleq/src/tests/cross_group/aos.rs b/crypto/dleq/src/tests/cross_group/aos.rs index 69139a93b..cf3182890 100644 --- a/crypto/dleq/src/tests/cross_group/aos.rs +++ b/crypto/dleq/src/tests/cross_group/aos.rs @@ -11,14 +11,14 @@ use crate::{ #[allow(non_snake_case)] #[cfg(feature = "serialize")] -fn test_aos_serialization(proof: Aos, Re_0: Re) { +fn test_aos_serialization(proof: &Aos, Re_0: Re) { let mut buf = vec![]; proof.write(&mut buf).unwrap(); let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap(); - assert_eq!(proof, deserialized); + assert_eq!(proof, &deserialized); } -fn test_aos(default: Re) { +fn test_aos(default: &Re) { let generators = generators(); let mut ring_keys = [(::Scalar::ZERO, ::Scalar::ZERO); RING_LEN]; @@ -34,7 +34,7 @@ fn test_aos(default: Re) { for (actual, key) in ring_keys.iter_mut().enumerate() { let proof = Aos::<_, _, RING_LEN>::prove( &mut OsRng, - transcript(), + &transcript(), generators, &ring, actual, @@ -43,25 +43,25 @@ fn test_aos(default: Re) { ); let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0)); - proof.verify(&mut OsRng, transcript(), generators, &mut batch, &ring).unwrap(); + proof.verify(&mut OsRng, &transcript(), generators, &mut batch, &ring).unwrap(); // For e, these should have nothing. For R, these should have 6 elements each which sum to 0 assert!(batch.0.verify_vartime()); assert!(batch.1.verify_vartime()); #[cfg(feature = "serialize")] - test_aos_serialization(proof, default.clone()); + test_aos_serialization(&proof, default.clone()); } } #[test] fn test_aos_e() { - test_aos::<2>(Re::e_default()); - test_aos::<4>(Re::e_default()); + test_aos::<2>(&Re::e_default()); + test_aos::<4>(&Re::e_default()); } #[allow(non_snake_case)] #[test] fn test_aos_R() { // Batch verification appreciates the longer vectors, which means not batching bits - test_aos::<2>(Re::R_default()); + test_aos::<2>(&Re::R_default()); } diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs index c80115dee..412dfcaf3 100644 --- a/crypto/dleq/src/tests/mod.rs +++ b/crypto/dleq/src/tests/mod.rs @@ -117,7 +117,7 @@ fn test_multi_dleq() { // 0: 0 // 1: 1, 2 // 2: 2, 3, 4 - let key_generators = generators[i .. (i + i + 1)].to_vec(); + let key_generators = generators[i ..= (i + i)].to_vec(); let mut these_pub_keys = vec![]; for generator in &key_generators { these_pub_keys.push(generator * key.deref()); diff --git a/crypto/ff-group-tests/src/field.rs b/crypto/ff-group-tests/src/field.rs index e34f4c813..cece37a01 100644 --- a/crypto/ff-group-tests/src/field.rs +++ b/crypto/ff-group-tests/src/field.rs @@ -130,8 +130,8 @@ pub fn test_sqrt() { assert_eq!(root * root, has_root, "sqrt(x)^2 != x"); let check = |value: (_, _), expected: (_, F), msg| { - assert_eq!(bool::from(value.0), bool::from(expected.0), "{}", msg); - assert!((value.1 == expected.1) || (value.1 == -expected.1), "{}", msg); + assert_eq!(bool::from(value.0), bool::from(expected.0), "{msg}"); + assert!((value.1 == expected.1) || (value.1 == -expected.1), "{msg}"); }; check( F::sqrt_ratio(&has_root, &F::ONE), diff --git a/crypto/frost/src/tests/literal/dalek.rs b/crypto/frost/src/tests/literal/dalek.rs index 9a11c5d2d..e9f5a0f4a 100644 --- a/crypto/frost/src/tests/literal/dalek.rs +++ b/crypto/frost/src/tests/literal/dalek.rs @@ -10,7 +10,7 @@ use crate::{ fn ristretto_vectors() { test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-ristretto255-sha512.json" )) @@ -24,7 +24,7 @@ fn ristretto_vectors() { fn ed25519_vectors() { test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed25519-sha512.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/literal/ed448.rs b/crypto/frost/src/tests/literal/ed448.rs index 79b7679b6..95a315058 100644 --- a/crypto/frost/src/tests/literal/ed448.rs +++ b/crypto/frost/src/tests/literal/ed448.rs @@ -57,7 +57,7 @@ fn ed448_8032_vector() { fn ed448_vectors() { test_with_vectors::<_, Ed448, IetfEd448Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed448-shake256.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 175039e4d..99bdc1570 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -13,7 +13,7 @@ use crate::curve::{P256, IetfP256Hram}; fn secp256k1_vectors() { test_with_vectors::<_, Secp256k1, IetfSecp256k1Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-secp256k1-sha256.json" )) @@ -27,7 +27,7 @@ fn secp256k1_vectors() { fn p256_vectors() { test_with_vectors::<_, P256, IetfP256Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-p256-sha256.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 2659c1c7b..e457c7037 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -39,7 +39,7 @@ pub fn clone_without( /// Spawn algorithm machines for a random selection of signers, each executing the given algorithm. pub fn algorithm_machines>( rng: &mut R, - algorithm: A, + algorithm: &A, keys: &HashMap>, ) -> HashMap> { let mut included = vec![]; @@ -167,7 +167,7 @@ pub fn sign_without_caching( /// successfully. pub fn sign( rng: &mut R, - params: >::Params, + params: &>::Params, mut keys: HashMap>::Keys>, machines: HashMap, msg: &[u8], @@ -195,12 +195,12 @@ pub fn sign( /// Test a basic Schnorr signature with the provided keys. pub fn test_schnorr_with_keys>( rng: &mut R, - keys: HashMap>, + keys: &HashMap>, ) { const MSG: &[u8] = b"Hello, World!"; - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); - let sig = sign(&mut *rng, IetfSchnorr::::ietf(), keys.clone(), machines, MSG); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), keys); + let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(group_key, H::hram(&sig.R, &group_key, MSG))); } @@ -208,13 +208,13 @@ pub fn test_schnorr_with_keys>( /// Test a basic Schnorr signature. pub fn test_schnorr>(rng: &mut R) { let keys = key_gen(&mut *rng); - test_schnorr_with_keys::<_, _, H>(&mut *rng, keys) + test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys) } /// Test a basic Schnorr signature, yet with MuSig. pub fn test_musig_schnorr>(rng: &mut R) { let keys = musig_key_gen(&mut *rng); - test_schnorr_with_keys::<_, _, H>(&mut *rng, keys) + test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys) } /// Test an offset Schnorr signature. @@ -231,8 +231,8 @@ pub fn test_offset_schnorr>(rng: &m assert_eq!(keys.group_key(), offset_key); } - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); - let sig = sign(&mut *rng, IetfSchnorr::::ietf(), keys.clone(), machines, MSG); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); + let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(offset_key, H::hram(&sig.R, &group_key, MSG))); } @@ -242,7 +242,7 @@ pub fn test_schnorr_blame>(rng: &mu const MSG: &[u8] = b"Hello, World!"; let keys = key_gen(&mut *rng); - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); let (mut machines, shares) = preprocess_and_shares(&mut *rng, machines, |_, _| {}, MSG); diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index 99be83ff7..ee060befd 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -154,14 +154,14 @@ impl Algorithm for MultiNonce { // 3) Provide algorithms with nonces which match the group nonces pub fn test_multi_nonce(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); - sign(&mut *rng, MultiNonce::::new(), keys.clone(), machines, &[]); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); + sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } /// Test malleating a commitment for a nonce across generators causes the preprocess to error. pub fn test_invalid_commitment(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); // Select a random participant to give an invalid commitment @@ -193,7 +193,7 @@ pub fn test_invalid_commitment(rng: &mut R) { /// Test malleating the DLEq proof for a preprocess causes it to error. pub fn test_invalid_dleq_proof(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); // Select a random participant to give an invalid DLEq proof diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index f653513ec..275e36f52 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -143,12 +143,12 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap>( rng: &mut R, - vectors: Vectors, + vectors: &Vectors, ) { test_ciphersuite::(rng); // Test against the vectors - let keys = vectors_to_multisig_keys::(&vectors); + let keys = vectors_to_multisig_keys::(vectors); { let group_key = ::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref()) diff --git a/crypto/schnorr/src/lib.rs b/crypto/schnorr/src/lib.rs index 77d033d05..282f7c18e 100644 --- a/crypto/schnorr/src/lib.rs +++ b/crypto/schnorr/src/lib.rs @@ -69,6 +69,7 @@ impl SchnorrSignature { /// This challenge must be properly crafted, which means being binding to the public key, nonce, /// and any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. + #[allow(clippy::needless_pass_by_value)] // Prevents further-use of this single-use value pub fn sign( private_key: &Zeroizing, nonce: Zeroizing, diff --git a/crypto/schnorrkel/src/tests.rs b/crypto/schnorrkel/src/tests.rs index 2b01ad43f..2f3c758b3 100644 --- a/crypto/schnorrkel/src/tests.rs +++ b/crypto/schnorrkel/src/tests.rs @@ -17,8 +17,9 @@ fn test() { let keys = key_gen(&mut OsRng); let key = keys[&Participant::new(1).unwrap()].group_key(); - let machines = algorithm_machines(&mut OsRng, Schnorrkel::new(CONTEXT), &keys); - let signature = sign(&mut OsRng, Schnorrkel::new(CONTEXT), keys, machines, MSG); + let algorithm = Schnorrkel::new(CONTEXT); + let machines = algorithm_machines(&mut OsRng, &algorithm, &keys); + let signature = sign(&mut OsRng, &algorithm, keys, machines, MSG); let key = PublicKey::from_bytes(key.to_bytes().as_ref()).unwrap(); key.verify(&mut SigningContext::new(CONTEXT).bytes(MSG), &signature).unwrap() diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 66d5d7cfb..3956f51d9 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -50,6 +50,7 @@ pub trait Transcript: Send + Clone { fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } +#[derive(Clone, Copy)] enum DigestTranscriptMember { Name, Domain, diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index 94e89a3ff..754528af3 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -54,7 +54,7 @@ pub(crate) use self::clippy::*; */ pub(crate) fn queue_message( db: &mut Db, - meta: Metadata, + meta: &Metadata, msg: Vec, sig: SchnorrSignature, ) { @@ -115,7 +115,7 @@ pub(crate) fn queue_message( pub(crate) fn get_next_message(from: Service, to: Service) -> Option { let queue_outer = QUEUES.read().unwrap(); let queue = queue_outer[&(from, to)].read().unwrap(); - let next = queue.last_acknowledged().map(|i| i + 1).unwrap_or(0); + let next = queue.last_acknowledged().map_or(0, |i| i + 1); queue.get_message(next) } @@ -246,7 +246,7 @@ async fn main() { MessageQueueRequest::Queue { meta, msg, sig } => { queue_message( &mut db, - meta, + &meta, msg, SchnorrSignature::::read(&mut sig.as_slice()).unwrap(), ); diff --git a/message-queue/src/queue.rs b/message-queue/src/queue.rs index 46148d414..d8d6ca79c 100644 --- a/message-queue/src/queue.rs +++ b/message-queue/src/queue.rs @@ -16,8 +16,7 @@ impl Queue { self .0 .get(self.message_count_key()) - .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0) + .map_or(0, |bytes| u64::from_le_bytes(bytes.try_into().unwrap())) } fn last_acknowledged_key(&self) -> Vec { diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 828145acd..2983fc235 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -130,8 +130,8 @@ pub mod sign { pub fn session(&self) -> Session { match self { - CoordinatorMessage::Preprocesses { id, .. } => id.session, - CoordinatorMessage::Shares { id, .. } => id.session, + CoordinatorMessage::Preprocesses { id, .. } | + CoordinatorMessage::Shares { id, .. } | CoordinatorMessage::Reattempt { id } => id.session, CoordinatorMessage::Completed { session, .. } => *session, } @@ -193,12 +193,7 @@ pub mod coordinator { // network *and the local node* // This synchrony obtained lets us ignore the synchrony requirement offered here pub fn required_block(&self) -> Option { - match self { - CoordinatorMessage::CosignSubstrateBlock { .. } => None, - CoordinatorMessage::SubstratePreprocesses { .. } => None, - CoordinatorMessage::SubstrateShares { .. } => None, - CoordinatorMessage::BatchReattempt { .. } => None, - } + None } } @@ -240,7 +235,7 @@ pub mod substrate { impl CoordinatorMessage { pub fn required_block(&self) -> Option { let context = match self { - CoordinatorMessage::ConfirmKeyPair { context, .. } => context, + CoordinatorMessage::ConfirmKeyPair { context, .. } | CoordinatorMessage::SubstrateBlock { context, .. } => context, }; Some(context.network_latest_finalized_block) diff --git a/processor/src/batch_signer.rs b/processor/src/batch_signer.rs index d4f662b1c..6110b84fe 100644 --- a/processor/src/batch_signer.rs +++ b/processor/src/batch_signer.rs @@ -111,7 +111,7 @@ impl BatchSigner { } #[must_use] - async fn attempt( + fn attempt( &mut self, txn: &mut D::Transaction<'_>, id: u32, @@ -189,11 +189,7 @@ impl BatchSigner { } #[must_use] - pub async fn sign( - &mut self, - txn: &mut D::Transaction<'_>, - batch: Batch, - ) -> Option { + pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { debug_assert_eq!(self.network, batch.network); let id = batch.id; if CompletedDb::get(txn, id).is_some() { @@ -203,11 +199,11 @@ impl BatchSigner { } self.signable.insert(id, batch); - self.attempt(txn, id, 0).await + self.attempt(txn, id, 0) } #[must_use] - pub async fn handle( + pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, @@ -394,7 +390,7 @@ impl BatchSigner { let SubstrateSignableId::Batch(batch_id) = id.id else { panic!("BatchReattempt passed non-Batch ID") }; - self.attempt(txn, batch_id, id.attempt).await.map(Into::into) + self.attempt(txn, batch_id, id.attempt).map(Into::into) } } } diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs index c5c495672..a324da776 100644 --- a/processor/src/cosigner.rs +++ b/processor/src/cosigner.rs @@ -114,7 +114,7 @@ impl Cosigner { } #[must_use] - pub async fn handle( + pub fn handle( &mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage, diff --git a/processor/src/db.rs b/processor/src/db.rs index e02051d8e..ffd7c43ad 100644 --- a/processor/src/db.rs +++ b/processor/src/db.rs @@ -32,7 +32,7 @@ impl PendingActivationsDb { } pub fn set_pending_activation( txn: &mut impl DbTxn, - block_before_queue_block: >::Id, + block_before_queue_block: &>::Id, session: Session, key_pair: KeyPair, ) { diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index b81ba7b55..8185eb4fa 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -89,7 +89,7 @@ impl KeysDb { fn confirm_keys( txn: &mut impl DbTxn, session: Session, - key_pair: KeyPair, + key_pair: &KeyPair, ) -> (Vec>, Vec>) { let (keys_vec, keys) = GeneratedKeysDb::read_keys::( txn, @@ -175,7 +175,7 @@ impl KeyGen { KeysDb::substrate_keys_by_session::(&self.db, session) } - pub async fn handle( + pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, @@ -582,11 +582,13 @@ impl KeyGen { } } - pub async fn confirm( + // This should only be called if we're participating, hence taking our instance + #[allow(clippy::unused_self)] + pub fn confirm( &mut self, txn: &mut D::Transaction<'_>, session: Session, - key_pair: KeyPair, + key_pair: &KeyPair, ) -> KeyConfirmed { info!( "Confirmed key pair {} {} for {:?}", diff --git a/processor/src/main.rs b/processor/src/main.rs index a80f93dc0..7eb0fcd70 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -199,7 +199,7 @@ async fn handle_coordinator_msg( if tributary_mutable.key_gen.in_set(&session) { // See TributaryMutable's struct definition for why this block is safe let KeyConfirmed { substrate_keys, network_keys } = - tributary_mutable.key_gen.confirm(txn, session, key_pair.clone()).await; + tributary_mutable.key_gen.confirm(txn, session, &key_pair); if session.0 == 0 { tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); @@ -214,7 +214,7 @@ async fn handle_coordinator_msg( match msg.msg.clone() { CoordinatorMessage::KeyGen(msg) => { - coordinator.send(tributary_mutable.key_gen.handle(txn, msg).await).await; + coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; } CoordinatorMessage::Sign(msg) => { @@ -232,9 +232,7 @@ async fn handle_coordinator_msg( CoordinatorMessage::Coordinator(msg) => { let is_batch = match msg { CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } => false, - CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } => { - matches!(&id.id, SubstrateSignableId::Batch(_)) - } + CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => { matches!(&id.id, SubstrateSignableId::Batch(_)) } @@ -248,7 +246,6 @@ async fn handle_coordinator_msg( "coordinator told us to sign a batch when we don't currently have a Substrate signer", ) .handle(txn, msg) - .await { coordinator.send(msg).await; } @@ -272,7 +269,7 @@ async fn handle_coordinator_msg( } _ => { if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { - if let Some(msg) = cosigner.handle(txn, msg).await { + if let Some(msg) = cosigner.handle(txn, msg) { coordinator.send(msg).await; } } else { @@ -355,7 +352,7 @@ async fn handle_coordinator_msg( // Set this variable so when we get the next Batch event, we can handle it PendingActivationsDb::set_pending_activation::( txn, - block_before_queue_block, + &block_before_queue_block, session, key_pair, ); @@ -429,7 +426,7 @@ async fn handle_coordinator_msg( for (key, id, tx, eventuality) in to_sign { if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { let signer = signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await { + if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { coordinator.send(msg).await; } } @@ -521,7 +518,7 @@ async fn boot( if plan.key == network_key { let mut txn = raw_db.txn(); if let Some(msg) = - signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality.clone()).await + signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await { coordinator.send(msg).await; } @@ -622,7 +619,7 @@ async fn run(mut raw_db: D, network: N, mut ).await; if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - if let Some(msg) = batch_signer.sign(&mut txn, batch).await { + if let Some(msg) = batch_signer.sign(&mut txn, batch) { coordinator.send(msg).await; } } @@ -644,7 +641,7 @@ async fn run(mut raw_db: D, network: N, mut MultisigEvent::Completed(key, id, tx) => { if let Some(session) = SessionDb::get(&txn, &key) { let signer = tributary_mutable.signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.completed(&mut txn, id, tx) { + if let Some(msg) = signer.completed(&mut txn, id, &tx) { coordinator.send(msg).await; } } diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index b5bb643b8..51287a0e8 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -102,7 +102,7 @@ impl ResolvedDb { txn: &mut impl DbTxn, key: &[u8], plan: [u8; 32], - resolution: >::Id, + resolution: &>::Id, ) { let mut signing = SigningDb::get(txn, key).unwrap_or_default(); assert_eq!(signing.len() % 32, 0); @@ -160,7 +160,7 @@ impl PlansFromScanningDb { } impl ForwardedOutputDb { - pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) { + pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, instruction.balance, &existing); @@ -184,7 +184,7 @@ impl ForwardedOutputDb { } impl DelayedOutputDb { - pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) { + pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, &existing); diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index 8af386467..a6e8bbc9d 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ - primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash}, + primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, @@ -316,7 +316,7 @@ impl MultisigManager { assert_eq!(balance.coin.network(), N::NETWORK); if let Ok(address) = N::Address::try_from(address.consume()) { - payments.push(Payment { address, data: data.map(|data| data.consume()), balance }); + payments.push(Payment { address, data: data.map(Data::consume), balance }); } } @@ -513,7 +513,7 @@ impl MultisigManager { let mut plans = vec![]; existing_outputs.retain(|output| { match output.kind() { - OutputType::External => false, + OutputType::External | OutputType::Forwarded => false, OutputType::Branch => { let scheduler = &mut self.existing.as_mut().unwrap().scheduler; // There *would* be a race condition here due to the fact we only mark a `Branch` output @@ -576,7 +576,6 @@ impl MultisigManager { } false } - OutputType::Forwarded => false, } }); plans @@ -873,7 +872,7 @@ impl MultisigManager { // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); - ForwardedOutputDb::save_forwarded_output(txn, instruction); + ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { @@ -907,9 +906,7 @@ impl MultisigManager { } let (refund_to, instruction) = instruction_from_output::(&output); - let instruction = if let Some(instruction) = instruction { - instruction - } else { + let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { plans.push(Self::refund_plan(output.clone(), refund_to)); @@ -922,7 +919,7 @@ impl MultisigManager { if Some(output.key()) == self.new.as_ref().map(|new| new.key) { match step { RotationStep::UseExisting => { - DelayedOutputDb::save_delayed_output(txn, instruction); + DelayedOutputDb::save_delayed_output(txn, &instruction); continue; } RotationStep::NewAsChange | @@ -1003,7 +1000,7 @@ impl MultisigManager { // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. ScannerEvent::Completed(key, block_number, id, tx) => { - ResolvedDb::resolve_plan::(txn, &key, id, tx.id()); + ResolvedDb::resolve_plan::(txn, &key, id, &tx.id()); (block_number, MultisigEvent::Completed(key, id, tx)) } }; diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index 20f1ced16..1a13ba3d2 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -415,7 +415,7 @@ impl Scanner { ) } - async fn emit(&mut self, event: ScannerEvent) -> bool { + fn emit(&mut self, event: ScannerEvent) -> bool { if self.events.send(event).is_err() { info!("Scanner handler was dropped. Shutting down?"); return false; @@ -496,12 +496,9 @@ impl Scanner { } } - let block = match network.get_block(block_being_scanned).await { - Ok(block) => block, - Err(_) => { - warn!("couldn't get block {block_being_scanned}"); - break; - } + let Ok(block) = network.get_block(block_being_scanned).await else { + warn!("couldn't get block {block_being_scanned}"); + break; }; let block_id = block.id(); @@ -570,7 +567,7 @@ impl Scanner { completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)).await { + if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) { return; } } @@ -687,10 +684,7 @@ impl Scanner { txn.commit(); // Send all outputs - if !scanner - .emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) - .await - { + if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { return; } diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler.rs index 41c371b9b..cd1795852 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler.rs @@ -335,7 +335,7 @@ impl Scheduler { // Since we do multiple aggregation TXs at once, this will execute in logarithmic time let utxos = self.utxos.drain(..).collect::>(); let mut utxo_chunks = - utxos.chunks(N::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::>(); + utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); // Use the first chunk for any scheduled payments, since it has the most value let utxos = utxo_chunks.remove(0); @@ -456,10 +456,7 @@ impl Scheduler { } // If we didn't actually create this output, return, dropping the child payments - let actual = match actual { - Some(actual) => actual, - None => return, - }; + let Some(actual) = actual else { return }; // Amortize the fee amongst all payments underneath this branch { diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index bae189f95..a04c5d1e0 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -427,7 +427,7 @@ impl Bitcoin { match BSignableTransaction::new( inputs.iter().map(|input| input.output.clone()).collect(), &payments, - change.as_ref().map(|change| change.0.clone()), + change.as_ref().map(|change| &change.0), None, fee.0, ) { @@ -435,16 +435,14 @@ impl Bitcoin { Err(TransactionError::NoInputs) => { panic!("trying to create a bitcoin transaction without inputs") } - // No outputs left and the change isn't worth enough - Err(TransactionError::NoOutputs) => Ok(None), + // No outputs left and the change isn't worth enough/not even enough funds to pay the fee + Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds) => Ok(None), // amortize_fee removes payments which fall below the dust threshold Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), Err(TransactionError::TooMuchData) => panic!("too much data despite not specifying data"), Err(TransactionError::TooLowFee) => { panic!("created a transaction whose fee is below the minimum") } - // Mot even enough funds to pay the fee - Err(TransactionError::NotEnoughFunds) => Ok(None), Err(TransactionError::TooLargeTransaction) => { panic!("created a too large transaction despite limiting inputs/outputs") } @@ -637,7 +635,7 @@ impl Network for Bitcoin { return res; } - async fn check_block( + fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, res: &mut HashMap<[u8; 32], (usize, Transaction)>, @@ -678,11 +676,11 @@ impl Network for Bitcoin { block.unwrap() }; - check_block(eventualities, &block, &mut res).await; + check_block(eventualities, &block, &mut res); } // Also check the current block - check_block(eventualities, block, &mut res).await; + check_block(eventualities, block, &mut res); assert_eq!(eventualities.block_number, this_block_num); res @@ -733,7 +731,7 @@ impl Network for Bitcoin { transaction .actual .clone() - .multisig(keys.clone(), transaction.transcript) + .multisig(&keys, transaction.transcript) .expect("used the wrong keys"), ) } diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index 92b502fca..9251dca70 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -229,6 +229,7 @@ impl PartialEq for Monero { } impl Eq for Monero {} +#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations fn map_rpc_err(err: RpcError) -> NetworkError { if let RpcError::InvalidNode(reason) = &err { log::error!("Monero RpcError::InvalidNode({reason})"); @@ -384,7 +385,7 @@ impl Monero { Some(Zeroizing::new(*plan_id)), inputs.clone(), payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), + &Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), vec![], fee_rate, ) { @@ -657,7 +658,7 @@ impl Network for Monero { keys: ThresholdKeys, transaction: SignableTransaction, ) -> Result { - match transaction.actual.clone().multisig(keys, transaction.transcript) { + match transaction.actual.clone().multisig(&keys, transaction.transcript) { Ok(machine) => Ok(machine), Err(e) => panic!("failed to create a multisig machine for TX: {e}"), } @@ -753,7 +754,7 @@ impl Network for Monero { None, inputs, vec![(address.into(), amount - fee)], - Change::fingerprintable(Some(Self::test_address().into())), + &Change::fingerprintable(Some(Self::test_address().into())), vec![], self.rpc.get_fee(protocol, FeePriority::Low).await.unwrap(), ) diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 35146a9cb..b25d50be0 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -99,7 +99,7 @@ impl core::fmt::Debug for Plan { .field("key", &hex::encode(self.key.to_bytes())) .field("inputs", &self.inputs) .field("payments", &self.payments) - .field("change", &self.change.as_ref().map(|change| change.to_string())) + .field("change", &self.change.as_ref().map(ToString::to_string)) .finish() } } diff --git a/processor/src/signer.rs b/processor/src/signer.rs index a1c4303b5..7a4fcbedb 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -97,7 +97,11 @@ impl CompletionsDb { } impl EventualityDb { - fn save_eventuality(txn: &mut impl DbTxn, id: [u8; 32], eventuality: N::Eventuality) { + fn save_eventuality( + txn: &mut impl DbTxn, + id: [u8; 32], + eventuality: &N::Eventuality, + ) { txn.put(Self::key(id), eventuality.serialize()); } @@ -113,7 +117,7 @@ impl TransactionDb { fn transaction( getter: &impl Get, - id: >::Id, + id: &>::Id, ) -> Option { Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap()) } @@ -164,7 +168,7 @@ impl Signer { log::info!("rebroadcasting {}", hex::encode(&completion)); // TODO: Don't drop the error entirely. Check for invariants let _ = network - .publish_transaction(&TransactionDb::transaction::(&db, completion).unwrap()) + .publish_transaction(&TransactionDb::transaction::(&db, &completion).unwrap()) .await; } } @@ -221,7 +225,7 @@ impl Signer { } #[must_use] - fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { + fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { if !CompletionsDb::completions::(txn, id).is_empty() { debug!( "SignTransaction/Reattempt order for {}, which we've already completed signing", @@ -238,7 +242,7 @@ impl Signer { fn complete( &mut self, id: [u8; 32], - tx_id: >::Id, + tx_id: &>::Id, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); @@ -260,16 +264,16 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: N::Transaction, + tx: &N::Transaction, ) -> Option { - let first_completion = !self.already_completed(txn, id); + let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, &tx); + CompletionsDb::complete::(txn, id, tx); if first_completion { - Some(self.complete(id, tx.id())) + Some(self.complete(id, &tx.id())) } else { None } @@ -302,13 +306,13 @@ impl Signer { if self.network.confirm_completion(&eventuality, &tx) { info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); - let first_completion = !self.already_completed(txn, id); + let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletionsDb::complete::(txn, id, &tx); if first_completion { - return Some(self.complete(id, tx.id())); + return Some(self.complete(id, &tx.id())); } } else { warn!( @@ -337,7 +341,7 @@ impl Signer { id: [u8; 32], attempt: u32, ) -> Option { - if self.already_completed(txn, id) { + if Self::already_completed(txn, id) { return None; } @@ -427,13 +431,13 @@ impl Signer { txn: &mut D::Transaction<'_>, id: [u8; 32], tx: N::SignableTransaction, - eventuality: N::Eventuality, + eventuality: &N::Eventuality, ) -> Option { // The caller is expected to re-issue sign orders on reboot // This is solely used by the rebroadcast task ActiveSignsDb::add_active_sign(txn, &id); - if self.already_completed(txn, id) { + if Self::already_completed(txn, id) { return None; } @@ -596,7 +600,7 @@ impl Signer { } // Stop trying to sign for this TX - Some(self.complete(id.id, tx_id)) + Some(self.complete(id.id, &tx_id)) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, diff --git a/processor/src/tests/batch_signer.rs b/processor/src/tests/batch_signer.rs index eb9e33597..dc45ff312 100644 --- a/processor/src/tests/batch_signer.rs +++ b/processor/src/tests/batch_signer.rs @@ -23,8 +23,8 @@ use messages::{ }; use crate::batch_signer::BatchSigner; -#[tokio::test] -async fn test_batch_signer() { +#[test] +fn test_batch_signer() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); @@ -74,7 +74,7 @@ async fn test_batch_signer() { let mut db = MemDb::new(); let mut txn = db.txn(); - match signer.sign(&mut txn, batch.clone()).await.unwrap() { + match signer.sign(&mut txn, batch.clone()).unwrap() { // All participants should emit a preprocess coordinator::ProcessorMessage::BatchPreprocess { id, @@ -109,7 +109,6 @@ async fn test_batch_signer() { preprocesses: clone_without(&preprocesses, i), }, ) - .await .unwrap() { ProcessorMessage::Coordinator(coordinator::ProcessorMessage::SubstrateShare { @@ -137,7 +136,6 @@ async fn test_batch_signer() { shares: clone_without(&shares, i), }, ) - .await .unwrap() { ProcessorMessage::Substrate(substrate::ProcessorMessage::SignedBatch { diff --git a/processor/src/tests/cosigner.rs b/processor/src/tests/cosigner.rs index b7cc1a80a..a66161bf7 100644 --- a/processor/src/tests/cosigner.rs +++ b/processor/src/tests/cosigner.rs @@ -18,8 +18,8 @@ use serai_client::{primitives::*, validator_sets::primitives::Session}; use messages::coordinator::*; use crate::cosigner::Cosigner; -#[tokio::test] -async fn test_cosigner() { +#[test] +fn test_cosigner() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); @@ -88,7 +88,6 @@ async fn test_cosigner() { preprocesses: clone_without(&preprocesses, i), }, ) - .await .unwrap() { ProcessorMessage::SubstrateShare { id, shares: mut these_shares } => { @@ -113,7 +112,6 @@ async fn test_cosigner() { shares: clone_without(&shares, i), }, ) - .await .unwrap() { ProcessorMessage::CosignedBlock { block_number, block: signed_block, signature } => { diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs index beb158da4..047e006ac 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/src/tests/key_gen.rs @@ -20,7 +20,7 @@ use crate::{ const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 }; -pub async fn test_key_gen() { +pub fn test_key_gen() { let mut entropies = HashMap::new(); let mut dbs = HashMap::new(); let mut key_gens = HashMap::new(); @@ -37,18 +37,15 @@ pub async fn test_key_gen() { for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - if let ProcessorMessage::Commitments { id, mut commitments } = key_gen - .handle( - &mut txn, - CoordinatorMessage::GenerateKey { - id: ID, - params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) - .unwrap(), - shares: 1, - }, - ) - .await - { + if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle( + &mut txn, + CoordinatorMessage::GenerateKey { + id: ID, + params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) + .unwrap(), + shares: 1, + }, + ) { assert_eq!(id, ID); assert_eq!(commitments.len(), 1); all_commitments @@ -74,16 +71,10 @@ pub async fn test_key_gen() { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::Shares { id, mut shares } = key_gen - .handle( - &mut txn, - CoordinatorMessage::Commitments { - id: ID, - commitments: clone_without(&all_commitments, &i), - }, - ) - .await - { + if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle( + &mut txn, + CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) }, + ) { assert_eq!(id, ID); assert_eq!(shares.len(), 1); all_shares.insert(i, shares.swap_remove(0)); @@ -102,19 +93,16 @@ pub async fn test_key_gen() { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen - .handle( - &mut txn, - CoordinatorMessage::Shares { - id: ID, - shares: vec![all_shares - .iter() - .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) - .collect()], - }, - ) - .await - { + if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle( + &mut txn, + CoordinatorMessage::Shares { + id: ID, + shares: vec![all_shares + .iter() + .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) + .collect()], + }, + ) { assert_eq!(id, ID); if res.is_none() { res = Some((substrate_key, network_key.clone())); @@ -134,13 +122,11 @@ pub async fn test_key_gen() { for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen - .confirm( - &mut txn, - ID.session, - KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), - ) - .await; + let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( + &mut txn, + ID.session, + &KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), + ); txn.commit(); assert_eq!(substrate_keys.len(), 1); diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 2454acbbd..974be10b5 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -46,7 +46,7 @@ macro_rules! test_network { #[tokio::test] async fn $key_gen() { init_logger(); - test_key_gen::<$N>().await; + test_key_gen::<$N>(); } #[test] diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 8eacb9aec..89d57bf39 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -72,7 +72,7 @@ pub async fn sign( match signers .get_mut(&i) .unwrap() - .sign_transaction(&mut txn, actual_id.id, tx, eventuality) + .sign_transaction(&mut txn, actual_id.id, tx, &eventuality) .await { // All participants should emit a preprocess diff --git a/substrate/client/tests/common/in_instructions.rs b/substrate/client/tests/common/in_instructions.rs index 0435b6194..b4c248980 100644 --- a/substrate/client/tests/common/in_instructions.rs +++ b/substrate/client/tests/common/in_instructions.rs @@ -24,7 +24,7 @@ use crate::common::{tx::publish_tx, validator_sets::set_keys}; pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] { // TODO: Get the latest session let set = ValidatorSet { session: Session(0), network: batch.network }; - let pair = insecure_pair_from_name(&format!("ValidatorSet {:?}", set)); + let pair = insecure_pair_from_name(&format!("ValidatorSet {set:?}")); let keys = if let Some(keys) = serai.as_of_latest_finalized_block().await.unwrap().validator_sets().keys(set).await.unwrap() { diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 9c29d7765..22d0c005d 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -38,7 +38,7 @@ pub async fn set_keys(serai: &Serai, set: ValidatorSet, key_pair: KeyPair) -> [u &mut OsRng, frost::tests::algorithm_machines( &mut OsRng, - Schnorrkel::new(b"substrate"), + &Schnorrkel::new(b"substrate"), &HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]), ), &set_keys_message(&set, &[], &key_pair), diff --git a/substrate/coins/pallet/src/lib.rs b/substrate/coins/pallet/src/lib.rs index 32d75bc0d..178fc9357 100644 --- a/substrate/coins/pallet/src/lib.rs +++ b/substrate/coins/pallet/src/lib.rs @@ -12,7 +12,8 @@ impl AllowMint for () { } } -#[allow(clippy::cast_possible_truncation)] // TODO: Investigate why Substrate generates this +// TODO: Investigate why Substrate generates this +#[allow(clippy::cast_possible_truncation)] #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/substrate/dex/pallet/src/lib.rs b/substrate/dex/pallet/src/lib.rs index e079a58f3..352205029 100644 --- a/substrate/dex/pallet/src/lib.rs +++ b/substrate/dex/pallet/src/lib.rs @@ -94,7 +94,8 @@ use sp_std::prelude::*; pub use types::*; pub use weights::WeightInfo; -#[allow(clippy::cast_possible_truncation)] // TODO: Investigate why Substrate generates this +// TODO: Investigate why Substrate generates these +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] #[frame_support::pallet] pub mod pallet { use super::*; diff --git a/substrate/dex/pallet/src/tests.rs b/substrate/dex/pallet/src/tests.rs index 8e75ef804..a1809b738 100644 --- a/substrate/dex/pallet/src/tests.rs +++ b/substrate/dex/pallet/src/tests.rs @@ -74,7 +74,7 @@ fn check_pool_accounts_dont_collide() { for coin in coins() { let account = Dex::get_pool_account(coin); if map.contains(&account) { - panic!("Collision at {:?}", coin); + panic!("Collision at {coin:?}"); } map.insert(account); } diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index c9b339a3e..3ec63ae58 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -9,7 +9,8 @@ use serai_primitives::{BlockHash, NetworkId}; pub use in_instructions_primitives as primitives; use primitives::*; -#[allow(clippy::cast_possible_truncation)] // TODO: Investigate why Substrate generates this +// TODO: Investigate why Substrate generates these +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] #[frame_support::pallet] pub mod pallet { use sp_std::vec; diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index a61411801..91df761b5 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -100,7 +100,7 @@ pub fn run() -> sc_cli::Result<()> { if config.role.is_authority() { config.state_pruning = Some(PruningMode::ArchiveAll); } - service::new_full(config).await.map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }), } } diff --git a/substrate/node/src/service.rs b/substrate/node/src/service.rs index 694455433..4ae4a450b 100644 --- a/substrate/node/src/service.rs +++ b/substrate/node/src/service.rs @@ -149,7 +149,7 @@ pub fn new_partial(config: &Configuration) -> Result Result { +pub fn new_full(config: Configuration) -> Result { let sc_service::PartialComponents { client, backend, diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 51ba7e547..94ea6a7a1 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -65,8 +65,7 @@ impl Coin { match self { Coin::Serai => NetworkId::Serai, Coin::Bitcoin => NetworkId::Bitcoin, - Coin::Ether => NetworkId::Ethereum, - Coin::Dai => NetworkId::Ethereum, + Coin::Ether | Coin::Dai => NetworkId::Ethereum, Coin::Monero => NetworkId::Monero, } } @@ -93,11 +92,8 @@ impl Coin { pub fn decimals(&self) -> u32 { match self { - Coin::Serai => 8, - Coin::Bitcoin => 8, // Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s - Coin::Ether => 8, - Coin::Dai => 8, + Coin::Serai | Coin::Bitcoin | Coin::Ether | Coin::Dai => 8, Coin::Monero => 12, } } diff --git a/substrate/primitives/src/tx.rs b/substrate/primitives/src/tx.rs index bb3de1f4f..2b1e1e84a 100644 --- a/substrate/primitives/src/tx.rs +++ b/substrate/primitives/src/tx.rs @@ -46,7 +46,7 @@ mod _serde { fn deserialize>(de: D) -> Result { let bytes = sp_core::bytes::deserialize(de)?; scale::Decode::decode(&mut &bytes[..]) - .map_err(|e| serde::de::Error::custom(format!("invalid transaction: {}", e))) + .map_err(|e| serde::de::Error::custom(format!("invalid transaction: {e}"))) } } } diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index 26f385181..b17f4b434 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -159,10 +159,8 @@ impl Contains for CallFilter { // All of these pallets are our own, and all of their written calls are intended to be called RuntimeCall::Coins(call) => !matches!(call, coins::Call::__Ignore(_, _)), RuntimeCall::LiquidityTokens(call) => match call { - coins::Call::transfer { .. } => true, - coins::Call::burn { .. } => true, - coins::Call::burn_with_instruction { .. } => false, - coins::Call::__Ignore(_, _) => false, + coins::Call::transfer { .. } | coins::Call::burn { .. } => true, + coins::Call::burn_with_instruction { .. } | coins::Call::__Ignore(_, _) => false, }, RuntimeCall::Dex(call) => !matches!(call, dex::Call::__Ignore(_, _)), RuntimeCall::ValidatorSets(call) => !matches!(call, validator_sets::Call::__Ignore(_, _)), @@ -170,17 +168,15 @@ impl Contains for CallFilter { RuntimeCall::Signals(call) => !matches!(call, signals::Call::__Ignore(_, _)), RuntimeCall::Babe(call) => match call { - babe::Call::report_equivocation { .. } => true, + babe::Call::report_equivocation { .. } | babe::Call::report_equivocation_unsigned { .. } => true, - babe::Call::plan_config_change { .. } => false, - babe::Call::__Ignore(_, _) => false, + babe::Call::plan_config_change { .. } | babe::Call::__Ignore(_, _) => false, }, RuntimeCall::Grandpa(call) => match call { - grandpa::Call::report_equivocation { .. } => true, + grandpa::Call::report_equivocation { .. } | grandpa::Call::report_equivocation_unsigned { .. } => true, - grandpa::Call::note_stalled { .. } => false, - grandpa::Call::__Ignore(_, _) => false, + grandpa::Call::note_stalled { .. } | grandpa::Call::__Ignore(_, _) => false, }, } } diff --git a/substrate/signals/pallet/src/lib.rs b/substrate/signals/pallet/src/lib.rs index 60fb35b17..575bc6479 100644 --- a/substrate/signals/pallet/src/lib.rs +++ b/substrate/signals/pallet/src/lib.rs @@ -149,7 +149,7 @@ pub mod pallet { // Returns true if this network's current set is in favor of the signal. // // Must only be called for networks which have a set decided. - fn tally_for_network(signal_id: SignalId, network: NetworkId) -> Result> { + fn tally_for_network(signal_id: SignalId, network: NetworkId) -> bool { let this_network_session = VsPallet::::latest_decided_session(network).unwrap(); let this_set = ValidatorSet { network, session: this_network_session }; @@ -197,18 +197,18 @@ pub mod pallet { SetsInFavor::::set((signal_id, this_set), Some(())); Self::deposit_event(Event::SetInFavor { signal_id, set: this_set }); } - Ok(true) + true } else { if SetsInFavor::::contains_key((signal_id, this_set)) { // This should no longer be under the current tally SetsInFavor::::remove((signal_id, this_set)); Self::deposit_event(Event::SetNoLongerInFavor { signal_id, set: this_set }); } - Ok(false) + false } } - fn tally_for_all_networks(signal_id: SignalId) -> Result> { + fn tally_for_all_networks(signal_id: SignalId) -> bool { let mut total_in_favor_stake = 0; let mut total_allocated_stake = 0; for network in serai_primitives::NETWORKS { @@ -226,10 +226,8 @@ pub mod pallet { total_allocated_stake += network_stake.0; } - Ok( - total_in_favor_stake >= - (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR), - ) + total_in_favor_stake >= + (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR) } fn revoke_favor_internal( @@ -247,7 +245,7 @@ pub mod pallet { // Technically, this tally may make the network in favor and justify re-tallying for all // networks // Its assumed not to - Self::tally_for_network(signal_id, for_network)?; + Self::tally_for_network(signal_id, for_network); Ok(()) } } @@ -378,7 +376,7 @@ pub mod pallet { // Check if the network is in favor // tally_for_network expects the network to be active, which is implied by being in the // latest decided set - let network_in_favor = Self::tally_for_network(signal_id, for_network)?; + let network_in_favor = Self::tally_for_network(signal_id, for_network); // If this network is in favor, check if enough networks are // We could optimize this by only running the following code when the network is *newly* in @@ -387,7 +385,7 @@ pub mod pallet { // to each other, any new votes will cause a re-tally if network_in_favor { // If enough are, lock in the signal - if Self::tally_for_all_networks(signal_id)? { + if Self::tally_for_all_networks(signal_id) { match signal_id { SignalId::Retirement(signal_id) => { LockedInRetirement::::set(Some(( diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 8707fbe1a..73cc6263c 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -351,9 +351,8 @@ pub mod pallet { // Update CurrentSession let session = { - let new_session = CurrentSession::::get(network) - .map(|session| Session(session.0 + 1)) - .unwrap_or(Session(0)); + let new_session = + CurrentSession::::get(network).map_or(Session(0), |session| Session(session.0 + 1)); CurrentSession::::set(network, Some(new_session)); new_session }; diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index 316c8d4bc..df4f93761 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -307,12 +307,11 @@ impl Processor { block_number, }, ) => { - let block = match id { - SubstrateSignId { - id: SubstrateSignableId::CosigningSubstrateBlock(block), - .. - } => block, - _ => panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID"), + let SubstrateSignId { + id: SubstrateSignableId::CosigningSubstrateBlock(block), .. + } = id + else { + panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") }; let new_cosign = CurrentCosign { block_number, block }; diff --git a/tests/coordinator/src/tests/batch.rs b/tests/coordinator/src/tests/batch.rs index ea4555e6e..c5f3c52c5 100644 --- a/tests/coordinator/src/tests/batch.rs +++ b/tests/coordinator/src/tests/batch.rs @@ -99,7 +99,7 @@ pub async fn batch( participants.insert(known_signer_i); participants } - other => panic!("coordinator didn't send back SubstratePreprocesses: {:?}", other), + other => panic!("coordinator didn't send back SubstratePreprocesses: {other:?}"), }; for i in participants.clone() { diff --git a/tests/docker/src/lib.rs b/tests/docker/src/lib.rs index 572df4563..c25009daa 100644 --- a/tests/docker/src/lib.rs +++ b/tests/docker/src/lib.rs @@ -70,8 +70,7 @@ pub fn build(name: String) { // Check any additionally specified paths let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { - "bitcoin" => vec![], - "monero" => vec![], + "bitcoin" | "monero" => vec![], "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), @@ -94,12 +93,7 @@ pub fn build(name: String) { meta(repo_path.join("message-queue")), meta(repo_path.join("coordinator")), ], - "runtime" => vec![ - meta(repo_path.join("common")), - meta(repo_path.join("crypto")), - meta(repo_path.join("substrate")), - ], - "serai" => vec![ + "runtime" | "serai" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("substrate")), @@ -132,7 +126,7 @@ pub fn build(name: String) { if let Some(last_modified) = last_modified { if last_modified < created_time { - println!("{} was built after the most recent source code edits, assuming built.", name); + println!("{name} was built after the most recent source code edits, assuming built."); built_lock.insert(name, true); return; } diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 856834fa9..c90ed5dfc 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -395,7 +395,7 @@ async fn mint_and_burn_test() { ), 1_100_000_000_000, )], - Change::new(&view_pair, false), + &Change::new(&view_pair, false), vec![Shorthand::transfer(None, serai_addr).encode()], rpc.get_fee(Protocol::v16, FeePriority::Low).await.unwrap(), ) diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index a318851eb..511382abb 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -314,7 +314,7 @@ impl Coordinator { let res: Option = rpc.rpc_call("submitblock", serde_json::json!([hex::encode(block)])).await.unwrap(); if let Some(err) = res { - panic!("submitblock failed: {}", err); + panic!("submitblock failed: {err}"); } } NetworkId::Ethereum => todo!(), diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index db02686e5..a54703ce9 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -361,7 +361,7 @@ impl Wallet { None, these_inputs.drain(..).zip(decoys.drain(..)).collect(), vec![(to_addr, AMOUNT)], - Change::new(view_pair, false), + &Change::new(view_pair, false), data, rpc.get_fee(Protocol::v16, FeePriority::Low).await.unwrap(), ) diff --git a/tests/reproducible-runtime/src/lib.rs b/tests/reproducible-runtime/src/lib.rs index 2a7f7f51d..3421026db 100644 --- a/tests/reproducible-runtime/src/lib.rs +++ b/tests/reproducible-runtime/src/lib.rs @@ -96,6 +96,6 @@ pub fn reproducibly_builds() { for res in res.clone() { identical.insert(res.unwrap()); } - assert_eq!(identical.len(), 1, "got different runtime hashes {:?}", res); + assert_eq!(identical.len(), 1, "got different runtime hashes {res:?}"); }); }