diff --git a/.buildkite/scripts/build-downstream-projects.sh b/.buildkite/scripts/build-downstream-projects.sh index c1e7405e1325d6..c7291a3fc3037e 100755 --- a/.buildkite/scripts/build-downstream-projects.sh +++ b/.buildkite/scripts/build-downstream-projects.sh @@ -9,6 +9,6 @@ source "$here"/common.sh agent="${1-solana}" group "downstream projects" \ - '{ "name": "example-helloworld", "command": "./ci/downstream-projects/run-example-helloworld.sh", "timeout_in_minutes": 30, "agent": "'"$agent"'" }' -# '{ "name": "spl", "command": "./ci/downstream-projects/run-spl.sh", "timeout_in_minutes": 30, "agent": "'"$agent"'" }' \ + '{ "name": "example-helloworld", "command": "./ci/downstream-projects/run-example-helloworld.sh", "timeout_in_minutes": 30, "agent": "'"$agent"'" }' \ + '{ "name": "spl", "command": "./ci/downstream-projects/run-spl.sh", "timeout_in_minutes": 30, "agent": "'"$agent"'" }' # '{ "name": "openbook-dex", "command": "./ci/downstream-projects/run-openbook-dex.sh", "timeout_in_minutes": 30, "agent": "'"$agent"'" }' \ diff --git a/Cargo.lock b/Cargo.lock index a2ca8f9d15fc4a..fba1d9ed570e0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -218,7 +218,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint 0.4.3", "num-traits", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -254,7 +254,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -315,7 +315,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "synstructure", @@ -327,7 +327,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -354,9 +354,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", "event-listener", @@ -402,7 +402,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -413,9 +413,9 @@ version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -550,12 +550,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.4", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "regex", "rustc-hash", "shlex", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -683,7 +683,7 @@ dependencies = [ "borsh-derive-internal 0.9.3", "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -696,7 +696,7 @@ dependencies = [ "borsh-derive-internal 0.10.3", "borsh-schema-derive-internal 0.10.3", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -706,7 +706,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -717,7 +717,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -728,7 +728,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -739,7 +739,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -845,7 +845,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1061,7 +1061,7 @@ checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ "heck 0.4.0", "proc-macro-error", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1151,7 +1151,7 @@ version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "unicode-xid 0.2.2", ] @@ -1403,10 +1403,10 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "strsim 0.10.0", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1417,7 +1417,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1483,7 +1483,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1495,7 +1495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustc_version 0.3.3", "syn 1.0.109", @@ -1584,7 +1584,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1666,7 +1666,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1707,9 +1707,9 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1720,7 +1720,7 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" dependencies = [ "num-bigint 0.4.3", "num-traits", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1984,9 +1984,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -2294,9 +2294,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hidapi" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f841dbb77615e116fb2ca38044b42310370f0d093c774a72361670ff2ae431b" +checksum = "794c84123f70c5ac93c45f0fed6edd39db80aab71a62097982e4ab13b43e8bb7" dependencies = [ "cc", "libc", @@ -2689,7 +2689,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3087,7 +3087,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3218,7 +3218,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3301,7 +3301,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3313,9 +3313,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -3387,7 +3387,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3463,7 +3463,7 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3617,7 +3617,7 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3668,7 +3668,7 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3802,7 +3802,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -3812,8 +3812,8 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2 1.0.63", - "syn 2.0.23", + "proc-macro2 1.0.64", + "syn 2.0.25", ] [[package]] @@ -3842,7 +3842,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "version_check", @@ -3854,7 +3854,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "version_check", ] @@ -3870,9 +3870,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] @@ -3967,7 +3967,7 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3980,7 +3980,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -4101,7 +4101,7 @@ version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", ] [[package]] @@ -4319,13 +4319,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-automata 0.3.0", + "regex-automata 0.3.2", "regex-syntax 0.7.3", ] @@ -4337,9 +4337,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4642,7 +4642,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -4719,9 +4719,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "d614f89548720367ded108b3c843be93f3a341e22d5674ca0dd5cd57f34926af" dependencies = [ "serde_derive", ] @@ -4747,13 +4747,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "d4fe589678c688e44177da4f27152ee2d190757271dc7f1d5b6b9f68d869d641" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -4796,9 +4796,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -4847,7 +4847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b6f5d1c3087fb119617cff2966fe3808a80e5eb59a8c1601d5994d66f4346a5" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -5620,6 +5620,7 @@ dependencies = [ "base64 0.21.2", "bincode", "bs58", + "bytes", "chrono", "crossbeam-channel", "dashmap 4.0.2", @@ -5634,6 +5635,7 @@ dependencies = [ "matches", "min-max-heap", "num_enum 0.6.1", + "quinn", "rand 0.7.3", "rand_chacha 0.2.2", "raptorq", @@ -5848,10 +5850,10 @@ dependencies = [ name = "solana-frozen-abi-macro" version = "1.17.0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustc_version 0.4.0", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -6842,10 +6844,10 @@ name = "solana-sdk-macro" version = "1.17.0" dependencies = [ "bs58", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustversion", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -7170,7 +7172,6 @@ dependencies = [ "rayon", "rcgen", "rustls 0.20.8", - "solana-client", "solana-entry", "solana-gossip", "solana-ledger", @@ -7558,7 +7559,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustversion", "syn 1.0.109", @@ -7593,18 +7594,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.23" +version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "unicode-ident", ] @@ -7621,7 +7622,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.2", @@ -7705,7 +7706,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -7756,7 +7757,7 @@ checksum = "e45b7bf6e19353ddd832745c8fcf77a17a93171df7151187f26623f2b75b5b26" dependencies = [ "cfg-if 1.0.0", "proc-macro-error", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -7791,9 +7792,9 @@ version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -7940,7 +7941,7 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -8131,7 +8132,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "prost-build 0.9.0", "quote 1.0.29", "syn 1.0.109", @@ -8144,7 +8145,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease 0.1.9", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "prost-build 0.11.4", "quote 1.0.29", "syn 1.0.109", @@ -8220,7 +8221,7 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -8533,9 +8534,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", "wasm-bindgen-shared", ] @@ -8567,9 +8568,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8932,9 +8933,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 15b14e0de819d6..0fbe1223ff9c19 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,7 +135,7 @@ array-bytes = "=1.4.1" arrayref = "0.3.7" assert_cmd = "2.0" assert_matches = "1.5.0" -async-channel = "1.8.0" +async-channel = "1.9.0" async-mutex = "1.4.0" async-trait = "0.1.71" atty = "0.2.11" @@ -200,7 +200,7 @@ gethostname = "0.2.3" getrandom = "0.1.14" goauth = "0.13.1" hex = "0.4.3" -hidapi = { version = "2.3.3", default-features = false } +hidapi = { version = "2.4.0", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.9" @@ -253,7 +253,7 @@ pickledb = { version = "0.5.1", default-features = false } pkcs8 = "0.8.0" predicates = "2.1" pretty-hex = "0.3.0" -proc-macro2 = "1.0.63" +proc-macro2 = "1.0.64" proptest = "1.2" prost = "0.11.9" prost-types = "0.11.9" @@ -270,7 +270,7 @@ raptorq = "1.7.0" rayon = "1.7.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" -regex = "1.9.0" +regex = "1.9.1" rolling-file = "0.2.0" reqwest = { version = "0.11.17", default-features = false } rpassword = "7.2" @@ -279,7 +279,7 @@ rustls = { version = "0.20.8", default-features = false } rustversion = "1.0.13" scopeguard = "1.1.0" semver = "1.0.17" -serde = "1.0.166" +serde = "1.0.168" serde_bytes = "0.11.11" serde_derive = "1.0.103" serde_json = "1.0.100" diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index 55d5833bf13943..f3d1e440426a1f 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -733,14 +733,11 @@ impl<'b, T: Clone + Copy + 'static> Bucket { pub fn insert(&mut self, key: &Pubkey, value: (&[T], RefCount)) { let (new, refct) = value; loop { - let rv = self.try_write(key, new.iter(), new.len(), refct); - match rv { - Ok(_) => return, - Err(err) => { - self.grow(err); - self.handle_delayed_grows(); - } - } + let Err(err) = self.try_write(key, new.iter(), new.len(), refct) else { + return; + }; + self.grow(err); + self.handle_delayed_grows(); } } diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 8464e797b55fb4..44b7481bc67bce 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -326,7 +326,7 @@ pull_or_push_steps() { # Run the full test suite by default, skipping only if modifications are local # to some particular areas of the tree - if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^.gitbook; then + if affects_other_than ^.mergify .md$ ^docs/ ^.gitbook; then all_test_steps fi diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml index 7dfa2a701c7967..83ebca7d9471a8 100644 --- a/ci/buildkite-secondary.yml +++ b/ci/buildkite-secondary.yml @@ -28,6 +28,9 @@ steps: command: "ci/publish-crate.sh" agents: queue: "release-build" + retry: + manual: + permit_on_passed: true timeout_in_minutes: 240 branches: "!master" - name: "publish tarball (aarch64-apple-darwin)" diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 2e4299f768cd7f..57a3d3de3b2049 100644 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -284,7 +284,7 @@ pull_or_push_steps() { # Run the full test suite by default, skipping only if modifications are local # to some particular areas of the tree - if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^.gitbook; then + if affects_other_than ^.mergify .md$ ^docs/ ^.gitbook; then all_test_steps fi diff --git a/ci/order-crates-for-publishing.py b/ci/order-crates-for-publishing.py index 05f572b2a912a8..855f0e89d17407 100755 --- a/ci/order-crates-for-publishing.py +++ b/ci/order-crates-for-publishing.py @@ -21,6 +21,56 @@ def load_metadata(): return json.loads(subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]) +# Consider a situation where a crate now wants to use already existing +# developing-oriented library code for their integration tests and benchmarks, +# like creating malformed data or omitting signature verifications. Ideally, +# the code should have been guarded under the special feature +# `dev-context-only-utils` to avoid accidental misuse for production code path. +# +# In this case, the feature needs to be defined then activated for the crate +# itself. To that end, the crate actually needs to depend on itself as a +# dev-dependency with `dev-context-only-utils` activated, so that the feature +# is conditionally activated only for integration tests and benchmarks. In this +# way, other crates won't see the feature activated even if they normal-depend +# on the crate. +# +# This self-referencing dev-dependency can be thought of a variant of +# dev-dependency cycles and it's well supported by cargo. The only exception is +# when publishing. In general, cyclic dev-dependency doesn't work nicely with +# publishing: https://github.com/rust-lang/cargo/issues/4242 . +# +# However, there's a work around supported by cargo. Namely, it will ignore and +# strip these cyclic dev-dependencies when publishing, if explicit version +# isn't specified: https://github.com/rust-lang/cargo/pull/7333 (Released in +# rust 1.40.0: https://releases.rs/docs/1.40.0/#cargo ) +# +# This script follows the same safe discarding logic to exclude these +# special-cased dev dependencies from its `dependency_graph` and further +# processing. +def is_self_dev_dep_with_dev_context_only_utils(package, dependency, wrong_self_dev_dependencies): + no_explicit_version = '*' + + is_special_cased = False + if (dependency['kind'] == 'dev' and + dependency['name'] == package['name'] and + 'dev-context-only-utils' in dependency['features'] and + 'path' in dependency): + is_special_cased = True + if dependency['req'] != no_explicit_version: + # it's likely `{ workspace = true, ... }` is used, which implicitly pulls the + # version in... + wrong_self_dev_dependencies.append(dependency) + + return is_special_cased + +def should_add(package, dependency, wrong_self_dev_dependencies): + related_to_solana = dependency['name'].startswith('solana') + self_dev_dep_with_dev_context_only_utils = is_self_dev_dep_with_dev_context_only_utils( + package, dependency, wrong_self_dev_dependencies + ) + + return related_to_solana and not self_dev_dep_with_dev_context_only_utils + def get_packages(): metadata = load_metadata() @@ -28,9 +78,13 @@ def get_packages(): # Build dictionary of packages and their immediate solana-only dependencies dependency_graph = dict() + wrong_self_dev_dependencies = list() + for pkg in metadata['packages']: manifest_path[pkg['name']] = pkg['manifest_path']; - dependency_graph[pkg['name']] = [x['name'] for x in pkg['dependencies'] if x['name'].startswith('solana')]; + dependency_graph[pkg['name']] = [ + x['name'] for x in pkg['dependencies'] if should_add(pkg, x, wrong_self_dev_dependencies) + ]; # Check for direct circular dependencies circular_dependencies = set() @@ -41,8 +95,13 @@ def get_packages(): for dependency in circular_dependencies: sys.stderr.write('Error: Circular dependency: {}\n'.format(dependency)) + for dependency in wrong_self_dev_dependencies: + sys.stderr.write('Error: wrong dev-context-only-utils circular dependency. try: ' + + '{} = {{ path = ".", features = {} }}\n' + .format(dependency['name'], json.dumps(dependency['features'])) + ) - if len(circular_dependencies) != 0: + if len(circular_dependencies) != 0 or len(wrong_self_dev_dependencies) != 0: sys.exit(1) # Order dependencies diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 86fc49e0c12387..029df388bc5aa8 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -90,6 +90,7 @@ _ scripts/cargo-for-all-lock-files.sh -- "+${rust_nightly}" clippy --workspace - --deny=warnings \ --deny=clippy::default_trait_access \ --deny=clippy::integer_arithmetic \ + --deny=clippy::manual_let_else \ --deny=clippy::used_underscore_binding \ "${nightly_clippy_allows[@]}" @@ -103,6 +104,7 @@ _ scripts/cargo-for-all-lock-files.sh -- clippy --workspace --tests --bins --ex --deny=warnings \ --deny=clippy::default_trait_access \ --deny=clippy::integer_arithmetic \ + --deny=clippy::manual_let_else \ --deny=clippy::used_underscore_binding if [[ -n $CI ]]; then diff --git a/cli/src/program.rs b/cli/src/program.rs index 8080de818efdca..05ccc04256ccf4 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2023,7 +2023,7 @@ fn read_and_verify_elf(program_location: &str) -> Result, Box Result<()> { - let current_working_bank = match current_working_bank { - Some(current_working_bank) => current_working_bank, - None => { - // We are not the leader! - if let Some(bank_vote_sender_state) = bank_vote_sender_state_option { - // This ensures we report the last slot's metrics - bank_vote_sender_state.report_metrics(); - *bank_vote_sender_state_option = None; - } - return Ok(()); + let Some(current_working_bank) = current_working_bank else { + // We are not the leader! + if let Some(bank_vote_sender_state) = bank_vote_sender_state_option { + // This ensures we report the last slot's metrics + bank_vote_sender_state.report_metrics(); + *bank_vote_sender_state_option = None; } + return Ok(()); }; // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` if let Some(bank_vote_sender_state) = bank_vote_sender_state_option { diff --git a/core/src/cluster_slots_service/cluster_slots.rs b/core/src/cluster_slots_service/cluster_slots.rs index 10b0291dbced3f..137e153a7a9722 100644 --- a/core/src/cluster_slots_service/cluster_slots.rs +++ b/core/src/cluster_slots_service/cluster_slots.rs @@ -186,9 +186,8 @@ impl ClusterSlots { }) .collect() }; - let slot_peers = match self.lookup(slot) { - None => return stakes, - Some(slot_peers) => slot_peers, + let Some(slot_peers) = self.lookup(slot) else { + return stakes; }; let slot_peers = slot_peers.read().unwrap(); repair_peers diff --git a/core/src/consensus.rs b/core/src/consensus.rs index db03bb7a491983..7b2622ac3fc438 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -746,9 +746,8 @@ impl Tower { latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks, heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice, ) -> SwitchForkDecision { - let (last_voted_slot, last_voted_hash) = match self.last_voted_slot_hash() { - None => return SwitchForkDecision::SameFork, - Some(slot_hash) => slot_hash, + let Some((last_voted_slot, last_voted_hash)) = self.last_voted_slot_hash() else { + return SwitchForkDecision::SameFork; }; let root = self.root(); let empty_ancestors = HashSet::default(); diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index a66544c28ce058..e5d24df75b160c 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -358,31 +358,22 @@ impl AncestorHashesService { ancestor_socket: &UdpSocket, ) -> Option { let from_addr = packet.meta().socket_addr(); - let packet_data = match packet.data(..) { - Some(data) => data, - None => { - stats.invalid_packets += 1; - return None; - } + let Some(packet_data) = packet.data(..) else { + stats.invalid_packets += 1; + return None; }; let mut cursor = Cursor::new(packet_data); - let response = match deserialize_from_with_limit(&mut cursor) { - Ok(response) => response, - Err(_) => { - stats.invalid_packets += 1; - return None; - } + let Ok(response) = deserialize_from_with_limit(&mut cursor) else { + stats.invalid_packets += 1; + return None; }; match response { AncestorHashesResponse::Hashes(ref hashes) => { // deserialize trailing nonce - let nonce = match deserialize_from_with_limit(&mut cursor) { - Ok(nonce) => nonce, - Err(_) => { - stats.invalid_packets += 1; - return None; - } + let Ok(nonce) = deserialize_from_with_limit(&mut cursor) else { + stats.invalid_packets += 1; + return None; }; // verify that packet does not contain extraneous data diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 69b9b1f2eb9146..9d58a5c682f27d 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -884,10 +884,9 @@ pub mod tests { // We don't have the earlier ancestors because we just started up, however sample should // not be rejected as invalid. - let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = run_add_multiple_correct_and_incorrect_responses( - vec![], - &mut test_setup, - ) else { + let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = + run_add_multiple_correct_and_incorrect_responses(vec![], &mut test_setup) + else { panic!("Incorrect decision") }; assert_eq!( @@ -938,10 +937,12 @@ pub mod tests { )]; // We have no entries in the blockstore, so all the ancestors will be missing - let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = run_add_multiple_correct_and_incorrect_responses( - desired_incorrect_responses, - &mut test_setup, - ) else { + let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = + run_add_multiple_correct_and_incorrect_responses( + desired_incorrect_responses, + &mut test_setup, + ) + else { panic!("Incorrect decision") }; assert_eq!( @@ -1089,10 +1090,9 @@ pub mod tests { } // All the ancestors matched, only the requested slot should be dumped - let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = run_add_multiple_correct_and_incorrect_responses( - vec![], - &mut test_setup, - ) else { + let DuplicateAncestorDecision::EarliestMismatchFound(repair_status) = + run_add_multiple_correct_and_incorrect_responses(vec![], &mut test_setup) + else { panic!("Incorrect decision") }; assert_eq!( @@ -1133,10 +1133,9 @@ pub mod tests { .add_tree(tree, true, true, 2, Hash::default()); // All the ancestors matched, only the requested slot should be dumped - let DuplicateAncestorDecision::EarliestPrunedMismatchFound(repair_status) = run_add_multiple_correct_and_incorrect_responses( - vec![], - &mut test_setup, - ) else { + let DuplicateAncestorDecision::EarliestPrunedMismatchFound(repair_status) = + run_add_multiple_correct_and_incorrect_responses(vec![], &mut test_setup) + else { panic!("Incorrect decision") }; assert_eq!( diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 735804a91046ac..b7b48cdfc077ec 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -867,25 +867,19 @@ impl ServeRepair { if u128::from(time_diff_ms) > SIGNED_REPAIR_TIME_WINDOW.as_millis() { return Err(Error::from(RepairVerifyError::TimeSkew)); } - let leading_buf = match packet.data(..4) { - Some(buf) => buf, - None => { - debug_assert!( - false, - "request should have failed deserialization: {request:?}", - ); - return Err(Error::from(RepairVerifyError::Malformed)); - } + let Some(leading_buf) = packet.data(..4) else { + debug_assert!( + false, + "request should have failed deserialization: {request:?}", + ); + return Err(Error::from(RepairVerifyError::Malformed)); }; - let trailing_buf = match packet.data(4 + SIGNATURE_BYTES..) { - Some(buf) => buf, - None => { - debug_assert!( - false, - "request should have failed deserialization: {request:?}", - ); - return Err(Error::from(RepairVerifyError::Malformed)); - } + let Some(trailing_buf) = packet.data(4 + SIGNATURE_BYTES..) else { + debug_assert!( + false, + "request should have failed deserialization: {request:?}", + ); + return Err(Error::from(RepairVerifyError::Malformed)); }; let from_id = request.sender(); let signed_data = [leading_buf, trailing_buf].concat(); @@ -978,11 +972,10 @@ impl ServeRepair { } } stats.processed += 1; - let rsp = match Self::handle_repair( - recycler, &from_addr, blockstore, request, stats, ping_cache, - ) { - None => continue, - Some(rsp) => rsp, + let Some(rsp) = + Self::handle_repair(recycler, &from_addr, blockstore, request, stats, ping_cache) + else { + continue; }; let num_response_packets = rsp.len(); let num_response_bytes = rsp.iter().map(|p| p.meta().size).sum(); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a03bb69b18d571..03e6c43ca9b0dc 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -2272,17 +2272,14 @@ impl ReplayStage { return None; } - let authorized_voter_pubkey = - if let Some(authorized_voter_pubkey) = vote_state.get_authorized_voter(bank.epoch()) { - authorized_voter_pubkey - } else { - warn!( - "Vote account {} has no authorized voter for epoch {}. Unable to vote", - vote_account_pubkey, - bank.epoch() - ); - return None; - }; + let Some(authorized_voter_pubkey) = vote_state.get_authorized_voter(bank.epoch()) else { + warn!( + "Vote account {} has no authorized voter for epoch {}. Unable to vote", + vote_account_pubkey, + bank.epoch() + ); + return None; + }; let authorized_voter_keypair = match authorized_voter_keypairs .iter() diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index eb3e1b5640aa0b..bc6b19520c082f 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -2,19 +2,23 @@ use { crate::repair::serve_repair::ServeRepair, - crossbeam_channel::{unbounded, Sender}, - solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, + bytes::Bytes, + crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, + itertools::Itertools, + solana_gossip::cluster_info::ClusterInfo, solana_ledger::shred::{should_discard_shred, ShredFetchStats}, - solana_perf::packet::{PacketBatch, PacketBatchRecycler, PacketFlags}, + solana_perf::packet::{PacketBatch, PacketBatchRecycler, PacketFlags, PACKETS_PER_BATCH}, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ clock::{Slot, DEFAULT_MS_PER_SLOT}, feature_set, + packet::PACKET_DATA_SIZE, + pubkey::Pubkey, }, - solana_streamer::streamer::{self, PacketBatchReceiver, StakedNodes, StreamerReceiveStats}, + solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats}, solana_turbine::cluster_nodes::check_feature_activation, std::{ - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -24,11 +28,7 @@ use { }, }; -const MAX_QUIC_CONNECTIONS_PER_PEER: usize = 8; -const MAX_STAKED_QUIC_CONNECTIONS: usize = 4000; -const MAX_UNSTAKED_QUIC_CONNECTIONS: usize = 2000; -const QUIC_WAIT_FOR_CHUNK_TIMEOUT: Duration = Duration::from_secs(5); -const QUIC_COALESCE_WAIT: Duration = Duration::from_millis(10); +const PACKET_COALESCE_DURATION: Duration = Duration::from_millis(1); pub(crate) struct ShredFetchStage { thread_hdls: Vec>, @@ -141,9 +141,9 @@ impl ShredFetchStage { packet_sender.clone(), recycler.clone(), Arc::new(StreamerReceiveStats::new("packet_modifier")), - Duration::from_millis(1), // coalesce - true, - None, + PACKET_COALESCE_DURATION, + true, // use_pinned_memory + None, // in_vote_only_mode ) }) .collect(); @@ -171,13 +171,12 @@ impl ShredFetchStage { #[allow(clippy::too_many_arguments)] pub(crate) fn new( sockets: Vec>, - quic_socket: UdpSocket, + quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, repair_socket: Arc, sender: Sender, shred_version: u16, bank_forks: Arc>, cluster_info: Arc, - staked_nodes: Arc>, turbine_disabled: Arc, exit: Arc, ) -> Self { @@ -200,12 +199,12 @@ impl ShredFetchStage { vec![repair_socket.clone()], exit.clone(), sender.clone(), - recycler, + recycler.clone(), bank_forks.clone(), shred_version, "shred_fetch_repair", PacketFlags::REPAIR, - Some((repair_socket, cluster_info.clone())), + Some((repair_socket, cluster_info)), turbine_disabled.clone(), ); @@ -213,33 +212,16 @@ impl ShredFetchStage { tvu_threads.push(tvu_filter); tvu_threads.push(repair_handler); - let keypair = cluster_info.keypair().clone(); - let ip_addr = cluster_info - .my_contact_info() - .tvu(Protocol::QUIC) - .expect("Operator must spin up node with valid (QUIC) TVU address") - .ip(); let (packet_sender, packet_receiver) = unbounded(); - let (_endpoint, join_handle) = solana_streamer::quic::spawn_server( - "quic_streamer_tvu", - quic_socket, - &keypair, - ip_addr, - packet_sender, - exit, - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes, - MAX_STAKED_QUIC_CONNECTIONS, - MAX_UNSTAKED_QUIC_CONNECTIONS, - QUIC_WAIT_FOR_CHUNK_TIMEOUT, - QUIC_COALESCE_WAIT, - ) - .unwrap(); - tvu_threads.push(join_handle); - - tvu_threads.push( + tvu_threads.extend([ Builder::new() - .name("solTvuFetchPMod".to_string()) + .name("solTvuRecvQuic".to_string()) + .spawn(|| { + receive_quic_datagrams(quic_endpoint_receiver, packet_sender, recycler, exit) + }) + .unwrap(), + Builder::new() + .name("solTvuFetchQuic".to_string()) .spawn(move || { Self::modify_packets( packet_receiver, @@ -253,8 +235,7 @@ impl ShredFetchStage { ) }) .unwrap(), - ); - + ]); Self { thread_hdls: tvu_threads, } @@ -268,6 +249,48 @@ impl ShredFetchStage { } } +fn receive_quic_datagrams( + quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, + sender: Sender, + recycler: PacketBatchRecycler, + exit: Arc, +) { + const RECV_TIMEOUT: Duration = Duration::from_secs(1); + while !exit.load(Ordering::Relaxed) { + let entry = match quic_endpoint_receiver.recv_timeout(RECV_TIMEOUT) { + Ok(entry) => entry, + Err(RecvTimeoutError::Timeout) => continue, + Err(RecvTimeoutError::Disconnected) => return, + }; + let mut packet_batch = + PacketBatch::new_with_recycler(&recycler, PACKETS_PER_BATCH, "receive_quic_datagrams"); + unsafe { + packet_batch.set_len(PACKETS_PER_BATCH); + }; + let deadline = Instant::now() + PACKET_COALESCE_DURATION; + let entries = std::iter::once(entry).chain( + std::iter::repeat_with(|| quic_endpoint_receiver.recv_deadline(deadline).ok()) + .while_some(), + ); + let size = entries + .filter(|(_, _, bytes)| bytes.len() <= PACKET_DATA_SIZE) + .zip(packet_batch.iter_mut()) + .map(|((_pubkey, addr, bytes), packet)| { + packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); + let meta = packet.meta_mut(); + meta.size = bytes.len(); + meta.set_socket_addr(&addr); + }) + .count(); + if size > 0 { + packet_batch.truncate(size); + if sender.send(packet_batch).is_err() { + return; + } + } + } +} + #[must_use] fn should_drop_merkle_shreds(shred_slot: Slot, root_bank: &Bank) -> bool { check_feature_activation( diff --git a/core/src/tpu.rs b/core/src/tpu.rs index c6db13cc7a194f..cc45a0d0b38fb9 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -17,6 +17,7 @@ use { tpu_entry_notifier::TpuEntryNotifier, validator::GeneratorConfig, }, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver}, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_gossip::cluster_info::ClusterInfo, @@ -25,7 +26,6 @@ use { entry_notifier_service::EntryNotifierSender, }, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, - solana_quic_client::QuicConnectionCache, solana_rpc::{ optimistically_confirmed_bank_tracker::BankNotificationSender, rpc_subscriptions::RpcSubscriptions, @@ -44,11 +44,12 @@ use { solana_turbine::broadcast_stage::{BroadcastStage, BroadcastStageType}, std::{ collections::HashMap, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc, RwLock}, thread, time::Duration, }, + tokio::sync::mpsc::Sender as AsyncSender, }; // allow multiple connections for NAT and any open/close overlap @@ -102,7 +103,7 @@ impl Tpu { tpu_coalesce: Duration, cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, connection_cache: &Arc, - turbine_quic_connection_cache: Arc, + turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, keypair: &Keypair, log_messages_bytes_limit: Option, staked_nodes: &Arc>, @@ -256,7 +257,7 @@ impl Tpu { blockstore.clone(), bank_forks, shred_version, - turbine_quic_connection_cache, + turbine_quic_endpoint_sender, ); Self { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 89b31e7dcb8fba..040bac55f6a0b1 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -24,6 +24,7 @@ use { warm_quic_cache_service::WarmQuicCacheService, window_service::WindowService, }, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver, Sender}, solana_client::connection_cache::ConnectionCache, solana_geyser_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, @@ -36,7 +37,6 @@ use { entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, }, solana_poh::poh_recorder::PohRecorder, - solana_quic_client::QuicConnectionCache, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::BankNotificationSenderConfig, rpc_subscriptions::RpcSubscriptions, @@ -47,14 +47,14 @@ use { vote_sender_types::ReplayVoteSender, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, - solana_streamer::streamer::StakedNodes, solana_turbine::retransmit_stage::RetransmitStage, std::{ collections::HashSet, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, }, + tokio::sync::mpsc::Sender as AsyncSender, }; pub struct Tvu { @@ -74,7 +74,6 @@ pub struct Tvu { pub struct TvuSockets { pub fetch: Vec, - pub(crate) fetch_quic: UdpSocket, pub repair: UdpSocket, pub retransmit: Vec, pub ancestor_hashes_requests: UdpSocket, @@ -137,13 +136,12 @@ impl Tvu { connection_cache: &Arc, prioritization_fee_cache: &Arc, banking_tracer: Arc, - staked_nodes: Arc>, - quic_connection_cache: Arc, + turbine_quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, + turbine_quic_endpoint_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, ) -> Result { let TvuSockets { repair: repair_socket, fetch: fetch_sockets, - fetch_quic: fetch_quic_socket, retransmit: retransmit_sockets, ancestor_hashes_requests: ancestor_hashes_socket, } = sockets; @@ -155,13 +153,12 @@ impl Tvu { let fetch_sockets: Vec> = fetch_sockets.into_iter().map(Arc::new).collect(); let fetch_stage = ShredFetchStage::new( fetch_sockets, - fetch_quic_socket, + turbine_quic_endpoint_receiver, repair_socket.clone(), fetch_sender, tvu_config.shred_version, bank_forks.clone(), cluster_info.clone(), - staked_nodes, turbine_disabled, exit.clone(), ); @@ -182,7 +179,7 @@ impl Tvu { leader_schedule_cache.clone(), cluster_info.clone(), Arc::new(retransmit_sockets), - quic_connection_cache, + turbine_quic_endpoint_sender, retransmit_receiver, max_slots.clone(), Some(rpc_subscriptions.clone()), @@ -369,10 +366,7 @@ impl Tvu { pub mod tests { use { super::*, - crate::{ - consensus::tower_storage::FileTowerStorage, - validator::TURBINE_QUIC_CONNECTION_POOL_SIZE, - }, + crate::consensus::tower_storage::FileTowerStorage, serial_test::serial, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -386,10 +380,7 @@ pub mod tests { solana_runtime::bank::Bank, solana_sdk::signature::{Keypair, Signer}, solana_streamer::socket::SocketAddrSpace, - std::{ - net::{IpAddr, Ipv4Addr}, - sync::atomic::{AtomicU64, Ordering}, - }, + std::sync::atomic::{AtomicU64, Ordering}, }; #[ignore] @@ -407,16 +398,9 @@ pub mod tests { let bank_forks = BankForks::new(Bank::new_for_tests(&genesis_config)); let keypair = Arc::new(Keypair::new()); - let quic_connection_cache = Arc::new( - solana_quic_client::new_quic_connection_cache( - "connection_cache_test", - &keypair, - IpAddr::V4(Ipv4Addr::LOCALHOST), - &Arc::>::default(), - TURBINE_QUIC_CONNECTION_POOL_SIZE, - ) - .unwrap(), - ); + let (turbine_quic_endpoint_sender, _turbine_quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); + let (_turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); //start cluster_info1 let cluster_info1 = ClusterInfo::new(target1.info.clone(), keypair, SocketAddrSpace::Unspecified); @@ -457,7 +441,6 @@ pub mod tests { repair: target1.sockets.repair, retransmit: target1.sockets.retransmit_sockets, fetch: target1.sockets.tvu, - fetch_quic: target1.sockets.tvu_quic, ancestor_hashes_requests: target1.sockets.ancestor_hashes_requests, } }, @@ -499,8 +482,8 @@ pub mod tests { &Arc::new(ConnectionCache::new("connection_cache_test")), &ignored_prioritization_fee_cache, BankingTracer::new_disabled(), - Arc::>::default(), - quic_connection_cache, + turbine_quic_endpoint_sender, + turbine_quic_endpoint_receiver, ) .expect("assume success"); exit.store(true, Ordering::Relaxed); diff --git a/core/src/validator.rs b/core/src/validator.rs index d105531da1373b..e0042cfc7d4301 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -30,6 +30,7 @@ use { }, crossbeam_channel::{bounded, unbounded, Receiver}, lazy_static::lazy_static, + quinn::Endpoint, rand::{thread_rng, Rng}, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_entry::poh::compute_hash_time_ns, @@ -113,7 +114,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, - solana_turbine::broadcast_stage::BroadcastStageType, + solana_turbine::{self, broadcast_stage::BroadcastStageType}, solana_vote_program::vote_state, std::{ collections::{HashMap, HashSet}, @@ -133,8 +134,6 @@ use { const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000; const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; -pub const TURBINE_QUIC_CONNECTION_POOL_SIZE: usize = 4; - #[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] #[strum(serialize_all = "kebab-case")] pub enum BlockVerificationMethod { @@ -461,6 +460,9 @@ pub struct Validator { ledger_metric_report_service: LedgerMetricReportService, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, + turbine_quic_endpoint: Endpoint, + turbine_quic_endpoint_runtime: Option, + turbine_quic_endpoint_join_handle: solana_turbine::quic_endpoint::AsyncTryJoinHandle, } impl Validator { @@ -1111,19 +1113,40 @@ impl Validator { let entry_notification_sender = entry_notifier_service .as_ref() .map(|service| service.sender_cloned()); - let turbine_quic_connection_cache = Arc::new( - solana_quic_client::new_quic_connection_cache( - "connection_cache_tvu_quic", - &identity_keypair, - node.info - .tvu(Protocol::QUIC) - .expect("Operator must spin up node with valid TVU address") - .ip(), - &staked_nodes, - TURBINE_QUIC_CONNECTION_POOL_SIZE, - ) - .unwrap(), - ); + + // test-validator crate may start the validator in a tokio runtime + // context which forces us to use the same runtime because a nested + // runtime will cause panic at drop. + // Outside test-validator crate, we always need a tokio runtime (and + // the respective handle) to initialize the turbine QUIC endpoint. + let current_runtime_handle = tokio::runtime::Handle::try_current(); + let turbine_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solTurbineQuic") + .build() + .unwrap() + }); + let (turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); + let ( + turbine_quic_endpoint, + turbine_quic_endpoint_sender, + turbine_quic_endpoint_join_handle, + ) = solana_turbine::quic_endpoint::new_quic_endpoint( + turbine_quic_endpoint_runtime + .as_ref() + .map(tokio::runtime::Runtime::handle) + .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), + &identity_keypair, + node.sockets.tvu_quic, + node.info + .tvu(Protocol::QUIC) + .expect("Operator must spin up node with valid QUIC TVU address") + .ip(), + turbine_quic_endpoint_sender, + ) + .unwrap(); + let (replay_vote_sender, replay_vote_receiver) = unbounded(); let tvu = Tvu::new( vote_account, @@ -1134,7 +1157,6 @@ impl Validator { repair: node.sockets.repair, retransmit: node.sockets.retransmit_sockets, fetch: node.sockets.tvu, - fetch_quic: node.sockets.tvu_quic, ancestor_hashes_requests: node.sockets.ancestor_hashes_requests, }, blockstore.clone(), @@ -1175,8 +1197,8 @@ impl Validator { &connection_cache, &prioritization_fee_cache, banking_tracer.clone(), - staked_nodes.clone(), - turbine_quic_connection_cache.clone(), + turbine_quic_endpoint_sender.clone(), + turbine_quic_endpoint_receiver, )?; let tpu = Tpu::new( @@ -1209,7 +1231,7 @@ impl Validator { config.tpu_coalesce, cluster_confirmed_slot_sender, &connection_cache, - turbine_quic_connection_cache, + turbine_quic_endpoint_sender, &identity_keypair, config.runtime_config.log_messages_bytes_limit, &staked_nodes, @@ -1258,6 +1280,9 @@ impl Validator { ledger_metric_report_service, accounts_background_service, accounts_hash_verifier, + turbine_quic_endpoint, + turbine_quic_endpoint_runtime, + turbine_quic_endpoint_join_handle, }) } @@ -1302,6 +1327,7 @@ impl Validator { pub fn join(self) { drop(self.bank_forks); drop(self.cluster_info); + solana_turbine::quic_endpoint::close_quic_endpoint(&self.turbine_quic_endpoint); self.poh_service.join().expect("poh_service"); drop(self.poh_recorder); @@ -1384,6 +1410,10 @@ impl Validator { .expect("accounts_hash_verifier"); self.tpu.join().expect("tpu"); self.tvu.join().expect("tvu"); + self.turbine_quic_endpoint_runtime + .map(|runtime| runtime.block_on(self.turbine_quic_endpoint_join_handle)) + .transpose() + .unwrap(); self.completed_data_sets_service .join() .expect("completed_data_sets_service"); diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 351c74b9b3ce29..e0dc22adb07c29 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -782,9 +782,8 @@ impl EntrySlice for [Entry] { recyclers: VerifyRecyclers, ) -> EntryVerificationState { let start = Instant::now(); - let api = match perf_libs::api() { - None => return self.verify_cpu(start_hash), - Some(api) => api, + let Some(api) = perf_libs::api() else { + return self.verify_cpu(start_hash); }; inc_new_counter_info!("entry_verify-num_entries", self.len()); diff --git a/genesis/src/main.rs b/genesis/src/main.rs index fb6b3de347566c..875655d42b72a7 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -531,9 +531,8 @@ fn main() -> Result<(), Box> { let mut bootstrap_validator_pubkeys_iter = bootstrap_validator_pubkeys.iter(); loop { - let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() { - None => break, - Some(identity_pubkey) => identity_pubkey, + let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() else { + break; }; let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index abff856a15a901..f3bccbf3a8ef4d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1412,9 +1412,8 @@ impl ClusterInfo { const THROTTLE_DELAY: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2; let entrypoint = { let mut entrypoints = self.entrypoints.write().unwrap(); - let entrypoint = match entrypoints.choose_mut(&mut rand::thread_rng()) { - Some(entrypoint) => entrypoint, - None => return, + let Some(entrypoint) = entrypoints.choose_mut(&mut rand::thread_rng()) else { + return; }; if !pulls.is_empty() { let now = timestamp(); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 42a079ab10ef1f..617328abe5c959 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -428,13 +428,11 @@ impl TryFrom for ContactInfo { let mut port = 0u16; for &SocketEntry { key, index, offset } in &node.sockets { port += offset; - let entry = match node.cache.get_mut(usize::from(key)) { - None => continue, - Some(entry) => entry, + let Some(entry) = node.cache.get_mut(usize::from(key)) else { + continue; }; - let addr = match node.addrs.get(usize::from(index)) { - None => continue, - Some(&addr) => addr, + let Some(&addr) = node.addrs.get(usize::from(index)) else { + continue; }; let socket = SocketAddr::new(addr, port); if sanitize_socket(&socket).is_ok() { diff --git a/gossip/src/crds.rs b/gossip/src/crds.rs index 429cd88045e561..a4ab600e167401 100644 --- a/gossip/src/crds.rs +++ b/gossip/src/crds.rs @@ -518,9 +518,8 @@ impl Crds { } pub fn remove(&mut self, key: &CrdsValueLabel, now: u64) { - let (index, _ /*label*/, value) = match self.table.swap_remove_full(key) { - Some(entry) => entry, - None => return, + let Some((index, _ /*label*/, value)) = self.table.swap_remove_full(key) else { + return; }; self.purged.push_back((value.value_hash, now)); self.shards.remove(index, &value); diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index e064c0a5ca6359..6ab52edd2687f2 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -405,9 +405,8 @@ pub(crate) fn maybe_ping_gossip_addresses( nodes .into_iter() .filter(|node| { - let node_gossip = match node.gossip() { - Err(_) => return false, - Ok(addr) => addr, + let Ok(node_gossip) = node.gossip() else { + return false; }; let (check, ping) = { let node = (*node.pubkey(), node_gossip); diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index d907f1cb933151..82e53d32fa259d 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -462,9 +462,8 @@ impl NodeInstance { // the same owner. Otherwise returns true if self has more recent timestamp // than other, and so overrides it. pub(crate) fn overrides(&self, other: &CrdsValue) -> Option { - let other = match &other.data { - CrdsData::NodeInstance(other) => other, - _ => return None, + let CrdsData::NodeInstance(other) = &other.data else { + return None; }; if self.token == other.token || self.from != other.from { return None; diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 5a63c537e267ed..6843ddeeb28e76 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -3016,10 +3016,9 @@ fn main() { let mut bootstrap_validator_pubkeys_iter = bootstrap_validator_pubkeys.iter(); loop { - let identity_pubkey = match bootstrap_validator_pubkeys_iter.next() - { - None => break, - Some(identity_pubkey) => identity_pubkey, + let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() + else { + break; }; let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 8dc314ae6556f1..fbdef0bae1f686 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -82,6 +82,9 @@ test-case = { workspace = true } [build-dependencies] rustc_version = { workspace = true } +[features] +dev-context-only-utils = [] + [lib] crate-type = ["lib"] name = "solana_ledger" diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index ca975c56a3dcdf..741c3dd3096392 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -268,7 +268,19 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .expect("load bank from snapshot archives"); + .unwrap_or_else(|err| { + error!( + "Failed to load bank: {err} \ + \nfull snapshot archive: {} \ + \nincremental snapshot archive: {}", + full_snapshot_archive_info.path().display(), + incremental_snapshot_archive_info + .as_ref() + .map(|archive| archive.path().display().to_string()) + .unwrap_or("none".to_string()), + ); + process::exit(1); + }); bank } else { let Some(bank_snapshot) = latest_bank_snapshot else { @@ -313,7 +325,14 @@ fn bank_forks_from_snapshot( accounts_update_notifier, exit, ) - .expect("load bank from local state"); + .unwrap_or_else(|err| { + error!( + "Failed to load bank: {err} \ + \nsnapshot: {}", + bank_snapshot.snapshot_path().display(), + ); + process::exit(1); + }); bank }; diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index dfa2e58e682efc..7af0b66428d1d2 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1636,14 +1636,16 @@ impl Blockstore { }) } - pub fn get_data_shreds_for_slot( - &self, - slot: Slot, - start_index: u64, - ) -> std::result::Result, shred::Error> { + pub fn get_data_shreds_for_slot(&self, slot: Slot, start_index: u64) -> Result> { self.slot_data_iterator(slot, start_index) .expect("blockstore couldn't fetch iterator") - .map(|data| Shred::new_from_serialized_shred(data.1.to_vec())) + .map(|(_, bytes)| { + Shred::new_from_serialized_shred(bytes.to_vec()).map_err(|err| { + BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom( + format!("Could not reconstruct shred from shred payload: {err:?}"), + ))) + }) + }) .collect() } @@ -1966,12 +1968,9 @@ impl Blockstore { require_previous_blockhash: bool, ) -> Result { let slot_meta_cf = self.db.column::(); - let slot_meta = match slot_meta_cf.get(slot)? { - Some(slot_meta) => slot_meta, - None => { - info!("SlotMeta not found for slot {}", slot); - return Err(BlockstoreError::SlotUnavailable); - } + let Some(slot_meta) = slot_meta_cf.get(slot)? else { + info!("SlotMeta not found for slot {}", slot); + return Err(BlockstoreError::SlotUnavailable); }; if slot_meta.is_full() { let slot_entries = self.get_slot_entries(slot, 0)?; @@ -4508,6 +4507,7 @@ pub mod tests { solana_entry::entry::{next_entry, next_entry_mut}, solana_runtime::bank::{Bank, RewardType}, solana_sdk::{ + clock::{DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, hash::{self, hash, Hash}, instruction::CompiledInstruction, message::v0::LoadedAddresses, @@ -6296,6 +6296,13 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + // Blockstore::find_missing_data_indexes() compares timestamps, so + // set a small value for defer_threshold_ticks to avoid flakiness. + let defer_threshold_ticks = DEFAULT_TICKS_PER_SLOT / 16; + let start_index = 0; + let end_index = 50; + let max_missing = 9; + // Write entries let gap: u64 = 10; let shreds: Vec<_> = (0..64) @@ -6319,10 +6326,10 @@ pub mod tests { blockstore.find_missing_data_indexes( slot, timestamp(), // first_timestamp - 0, // defer_threshold_ticks - 0, // start_index - 50, // end_index - 1, // max_missing + defer_threshold_ticks, + start_index, + end_index, + max_missing, ), empty ); @@ -6330,11 +6337,11 @@ pub mod tests { assert_eq!( blockstore.find_missing_data_indexes( slot, - timestamp() - 400, // first_timestamp - 0, // defer_threshold_ticks - 0, // start_index - 50, // end_index - 9, // max_missing + timestamp() - DEFAULT_MS_PER_SLOT, // first_timestamp + defer_threshold_ticks, + start_index, + end_index, + max_missing, ), expected ); diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index f43e227ad65ac8..304af391cf9962 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -21,8 +21,9 @@ use { compaction_filter::CompactionFilter, compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory}, properties as RocksProperties, ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, - DBCompactionStyle, DBIterator, DBPinnableSlice, DBRawIterator, FifoCompactOptions, - IteratorMode as RocksIteratorMode, LiveFile, Options, WriteBatch as RWriteBatch, DB, + DBCompactionStyle, DBCompressionType, DBIterator, DBPinnableSlice, DBRawIterator, + FifoCompactOptions, IteratorMode as RocksIteratorMode, LiveFile, Options, + WriteBatch as RWriteBatch, DB, }, serde::{de::DeserializeOwned, Serialize}, solana_runtime::hardened_unpack::UnpackError, @@ -1709,6 +1710,10 @@ fn process_cf_options_advanced( cf_options: &mut Options, column_options: &LedgerColumnOptions, ) { + // Explicitly disable compression on all columns by default + // See https://docs.rs/rocksdb/0.21.0/rocksdb/struct.Options.html#method.set_compression_type + cf_options.set_compression_type(DBCompressionType::None); + if should_enable_compression::() { cf_options.set_compression_type( column_options diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 65a5c2c2ed0757..b638a8b448eaa5 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -289,7 +289,8 @@ impl SlotMeta { self.is_connected() } - /// Dangerous. Currently only needed for a local-cluster test + /// Dangerous. + #[cfg(feature = "dev-context-only-utils")] pub fn unset_parent(&mut self) { self.parent_slot = None; } @@ -344,9 +345,8 @@ impl ErasureMeta { // Returns true if the erasure fields on the shred // are consistent with the erasure-meta. pub(crate) fn check_coding_shred(&self, shred: &Shred) -> bool { - let mut other = match Self::from_coding_shred(shred) { - Some(erasure_meta) => erasure_meta, - None => return false, + let Some(mut other) = Self::from_coding_shred(shred) else { + return false; }; other.__unused_size = self.__unused_size; self == &other diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 04b213e598edee..29e5724bb0939c 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -4250,92 +4250,6 @@ pub mod tests { ) } - #[test] - fn test_confirm_slot_entries_without_fix() { - const HASHES_PER_TICK: u64 = 10; - const TICKS_PER_SLOT: u64 = 2; - - let collector_id = Pubkey::new_unique(); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config(10_000); - genesis_config.poh_config.hashes_per_tick = Some(HASHES_PER_TICK); - genesis_config.ticks_per_slot = TICKS_PER_SLOT; - let genesis_hash = genesis_config.hash(); - - let mut slot_0_bank = Bank::new_for_tests(&genesis_config); - slot_0_bank.deactivate_feature(&feature_set::fix_recent_blockhashes::id()); - let slot_0_bank = Arc::new(slot_0_bank); - assert_eq!(slot_0_bank.slot(), 0); - assert_eq!(slot_0_bank.tick_height(), 0); - assert_eq!(slot_0_bank.max_tick_height(), 2); - assert_eq!(slot_0_bank.last_blockhash(), genesis_hash); - assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(0)); - - let slot_0_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, genesis_hash); - let slot_0_hash = slot_0_entries.last().unwrap().hash; - confirm_slot_entries_for_tests(&slot_0_bank, slot_0_entries, true, genesis_hash).unwrap(); - assert_eq!(slot_0_bank.tick_height(), slot_0_bank.max_tick_height()); - assert_eq!(slot_0_bank.last_blockhash(), slot_0_hash); - assert_eq!(slot_0_bank.get_hash_age(&genesis_hash), Some(1)); - assert_eq!(slot_0_bank.get_hash_age(&slot_0_hash), Some(0)); - - let slot_2_bank = Arc::new(Bank::new_from_parent(&slot_0_bank, &collector_id, 2)); - assert_eq!(slot_2_bank.slot(), 2); - assert_eq!(slot_2_bank.tick_height(), 2); - assert_eq!(slot_2_bank.max_tick_height(), 6); - assert_eq!(slot_2_bank.last_blockhash(), slot_0_hash); - - let slot_1_entries = entry::create_ticks(TICKS_PER_SLOT, HASHES_PER_TICK, slot_0_hash); - let slot_1_hash = slot_1_entries.last().unwrap().hash; - confirm_slot_entries_for_tests(&slot_2_bank, slot_1_entries, false, slot_0_hash).unwrap(); - assert_eq!(slot_2_bank.tick_height(), 4); - assert_eq!(slot_2_bank.last_blockhash(), slot_1_hash); - assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(2)); - assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(1)); - assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(0)); - - // Check that slot 2 transactions can use any previous slot hash, including the - // hash for slot 1 which is just ticks. - let slot_2_entries = { - let to_pubkey = Pubkey::new_unique(); - let mut prev_entry_hash = slot_1_hash; - let mut remaining_entry_hashes = HASHES_PER_TICK; - let mut entries: Vec = [genesis_hash, slot_0_hash, slot_1_hash] - .into_iter() - .map(|recent_hash| { - let tx = - system_transaction::transfer(&mint_keypair, &to_pubkey, 1, recent_hash); - remaining_entry_hashes = remaining_entry_hashes.checked_sub(1).unwrap(); - next_entry_mut(&mut prev_entry_hash, 1, vec![tx]) - }) - .collect(); - - entries.push(next_entry_mut( - &mut prev_entry_hash, - remaining_entry_hashes, - vec![], - )); - entries.push(next_entry_mut( - &mut prev_entry_hash, - HASHES_PER_TICK, - vec![], - )); - entries - }; - let slot_2_hash = slot_2_entries.last().unwrap().hash; - confirm_slot_entries_for_tests(&slot_2_bank, slot_2_entries, true, slot_1_hash).unwrap(); - assert_eq!(slot_2_bank.tick_height(), slot_2_bank.max_tick_height()); - assert_eq!(slot_2_bank.last_blockhash(), slot_2_hash); - assert_eq!(slot_2_bank.get_hash_age(&genesis_hash), Some(3)); - assert_eq!(slot_2_bank.get_hash_age(&slot_0_hash), Some(2)); - assert_eq!(slot_2_bank.get_hash_age(&slot_1_hash), Some(1)); - assert_eq!(slot_2_bank.get_hash_age(&slot_2_hash), Some(0)); - } - #[test] fn test_confirm_slot_entries_progress_num_txs_indexes() { let GenesisConfigInfo { diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index ca6bd55837c1e4..479a0e3a469f9f 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -586,9 +586,8 @@ pub mod layout { } pub(super) fn get_shred_variant(shred: &[u8]) -> Result { - let shred_variant = match shred.get(OFFSET_OF_SHRED_VARIANT) { - None => return Err(Error::InvalidPayloadSize(shred.len())), - Some(shred_variant) => *shred_variant, + let Some(&shred_variant) = shred.get(OFFSET_OF_SHRED_VARIANT) else { + return Err(Error::InvalidPayloadSize(shred.len())); }; ShredVariant::try_from(shred_variant).map_err(|_| Error::InvalidShredVariant) } @@ -673,9 +672,8 @@ pub mod layout { if get_shred_type(shred)? != ShredType::Data { return Err(Error::InvalidShredType); } - let flags = match shred.get(85) { - None => return Err(Error::InvalidPayloadSize(shred.len())), - Some(flags) => flags, + let Some(flags) = shred.get(85) else { + return Err(Error::InvalidPayloadSize(shred.len())); }; Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) } @@ -915,12 +913,9 @@ pub fn should_discard_shred( } } } - let shred_variant = match layout::get_shred_variant(shred) { - Ok(shred_variant) => shred_variant, - Err(_) => { - stats.bad_shred_type += 1; - return true; - } + let Ok(shred_variant) = layout::get_shred_variant(shred) else { + stats.bad_shred_type += 1; + return true; }; let slot = match layout::get_slot(shred) { Some(slot) => { @@ -935,12 +930,9 @@ pub fn should_discard_shred( return true; } }; - let index = match layout::get_index(shred) { - Some(index) => index, - None => { - stats.index_bad_deserialize += 1; - return true; - } + let Some(index) = layout::get_index(shred) else { + stats.index_bad_deserialize += 1; + return true; }; match ShredType::from(shred_variant) { ShredType::Code => { @@ -958,19 +950,13 @@ pub fn should_discard_shred( stats.index_out_of_bounds += 1; return true; } - let parent_offset = match layout::get_parent_offset(shred) { - Some(parent_offset) => parent_offset, - None => { - stats.bad_parent_offset += 1; - return true; - } + let Some(parent_offset) = layout::get_parent_offset(shred) else { + stats.bad_parent_offset += 1; + return true; }; - let parent = match slot.checked_sub(Slot::from(parent_offset)) { - Some(parent) => parent, - None => { - stats.bad_parent_offset += 1; - return true; - } + let Some(parent) = slot.checked_sub(Slot::from(parent_offset)) else { + stats.bad_parent_offset += 1; + return true; }; if !blockstore::verify_shred_slots(slot, parent, root) { stats.slot_out_of_range += 1; diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 10646edd39fae8..046e81adec6d3d 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -186,9 +186,8 @@ impl ShredData { // Deserialize headers. let mut cursor = Cursor::new(&shard[..]); let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - let proof_size = match common_header.shred_variant { - ShredVariant::MerkleData(proof_size) => proof_size, - _ => return Err(Error::InvalidShredVariant), + let ShredVariant::MerkleData(proof_size) = common_header.shred_variant else { + return Err(Error::InvalidShredVariant); }; if ShredCode::capacity(proof_size)? != shard_size { return Err(Error::InvalidShardSize(shard_size)); @@ -293,9 +292,8 @@ impl ShredCode { coding_header: CodingShredHeader, mut shard: Vec, ) -> Result { - let proof_size = match common_header.shred_variant { - ShredVariant::MerkleCode(proof_size) => proof_size, - _ => return Err(Error::InvalidShredVariant), + let ShredVariant::MerkleCode(proof_size) = common_header.shred_variant else { + return Err(Error::InvalidShredVariant); }; let shard_size = shard.len(); if Self::capacity(proof_size)? != shard_size { @@ -630,9 +628,8 @@ pub(super) fn recover( ) -> Result, Error> { // Grab {common, coding} headers from first coding shred. let headers = shreds.iter().find_map(|shred| { - let shred = match shred { - Shred::ShredCode(shred) => shred, - Shred::ShredData(_) => return None, + let Shred::ShredCode(shred) = shred else { + return None; }; let position = u32::from(shred.coding_header.position); let common_header = ShredCommonHeader { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 5f315c0670495f..a73969c1bb1a8b 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -34,27 +34,22 @@ pub fn verify_shred_cpu( if packet.meta().discard() { return false; } - let shred = match shred::layout::get_shred(packet) { - None => return false, - Some(shred) => shred, + let Some(shred) = shred::layout::get_shred(packet) else { + return false; }; - let slot = match shred::layout::get_slot(shred) { - None => return false, - Some(slot) => slot, + let Some(slot) = shred::layout::get_slot(shred) else { + return false; }; trace!("slot {}", slot); - let pubkey = match slot_leaders.get(&slot) { - None => return false, - Some(pubkey) => pubkey, + let Some(pubkey) = slot_leaders.get(&slot) else { + return false; }; - let signature = match shred::layout::get_signature(shred) { - None => return false, - Some(signature) => signature, + let Some(signature) = shred::layout::get_signature(shred) else { + return false; }; trace!("signature {}", signature); - let data = match shred::layout::get_signed_data(shred) { - None => return false, - Some(data) => data, + let Some(data) = shred::layout::get_signed_data(shred) else { + return false; }; signature.verify(pubkey, data.as_ref()) } @@ -255,9 +250,8 @@ pub fn verify_shreds_gpu( slot_leaders: &HashMap, recycler_cache: &RecyclerCache, ) -> Vec> { - let api = match perf_libs::api() { - None => return verify_shreds_cpu(thread_pool, batches, slot_leaders), - Some(api) => api, + let Some(api) = perf_libs::api() else { + return verify_shreds_cpu(thread_pool, batches, slot_leaders); }; let (pubkeys, pubkey_offsets) = slot_key_data_for_gpu(thread_pool, batches, slot_leaders, recycler_cache); @@ -378,9 +372,8 @@ pub fn sign_shreds_gpu( if packet_count < SIGN_SHRED_GPU_MIN || pinned_keypair.is_none() { return sign_shreds_cpu(thread_pool, keypair, batches); } - let api = match perf_libs::api() { - None => return sign_shreds_cpu(thread_pool, keypair, batches), - Some(api) => api, + let Some(api) = perf_libs::api() else { + return sign_shreds_cpu(thread_pool, keypair, batches); }; let pinned_keypair = pinned_keypair.as_ref().unwrap(); diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 9c856882f8aa40..05e0772a7073b1 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -41,6 +41,7 @@ fs_extra = { workspace = true } gag = { workspace = true } serial_test = { workspace = true } solana-download-utils = { workspace = true } +solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } [package.metadata.docs.rs] diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index b7f04f7b9e250e..4cdb6c5f8b585e 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -131,21 +131,17 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { for _ in 0..packet_offsets.sig_len { let pubkey_end = pubkey_start.saturating_add(size_of::()); - let sig_end = match sig_start.checked_add(size_of::()) { - Some(sig_end) => sig_end, - None => return false, + let Some(sig_end) = sig_start.checked_add(size_of::()) else { + return false; }; - let signature = match packet.data(sig_start..sig_end) { - Some(signature) => Signature::new(signature), - None => return false, + let Some(signature) = packet.data(sig_start..sig_end).map(Signature::new) else { + return false; }; - let pubkey = match packet.data(pubkey_start..pubkey_end) { - Some(pubkey) => pubkey, - None => return false, + let Some(pubkey) = packet.data(pubkey_start..pubkey_end) else { + return false; }; - let message = match packet.data(msg_start..) { - Some(message) => message, - None => return false, + let Some(message) = packet.data(msg_start..) else { + return false; }; if !signature.verify(pubkey, message) { return false; @@ -317,9 +313,8 @@ fn do_get_packet_offsets( pub fn check_for_tracer_packet(packet: &mut Packet) -> bool { let first_pubkey_start: usize = TRACER_KEY_OFFSET_IN_TRANSACTION; - let first_pubkey_end = match first_pubkey_start.checked_add(size_of::()) { - Some(offset) => offset, - None => return false, + let Some(first_pubkey_end) = first_pubkey_start.checked_add(size_of::()) else { + return false; }; // Check for tracer pubkey match packet.data(first_pubkey_start..first_pubkey_end) { @@ -610,9 +605,8 @@ pub fn ed25519_verify( reject_non_vote: bool, valid_packet_count: usize, ) { - let api = match perf_libs::api() { - None => return ed25519_verify_cpu(batches, reject_non_vote, valid_packet_count), - Some(api) => api, + let Some(api) = perf_libs::api() else { + return ed25519_verify_cpu(batches, reject_non_vote, valid_packet_count); }; let total_packet_count = count_packets_in_batches(batches); // micro-benchmarks show GPU time for smallest batch around 15-20ms diff --git a/program-runtime/src/log_collector.rs b/program-runtime/src/log_collector.rs index 57ac1cede62df7..ead575ddfb81d2 100644 --- a/program-runtime/src/log_collector.rs +++ b/program-runtime/src/log_collector.rs @@ -23,12 +23,9 @@ impl Default for LogCollector { impl LogCollector { pub fn log(&mut self, message: &str) { - let limit = match self.bytes_limit { - Some(limit) => limit, - None => { - self.messages.push(message.to_string()); - return; - } + let Some(limit) = self.bytes_limit else { + self.messages.push(message.to_string()); + return; }; let bytes_written = self.bytes_written.saturating_add(message.len()); diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 1a4aea1564e633..246b03e9d24250 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -549,11 +549,8 @@ declare_syscall!( } else { align_of::() }; - let layout = match Layout::from_size_align(size as usize, align) { - Ok(layout) => layout, - Err(_) => { - return Ok(0); - } + let Ok(layout) = Layout::from_size_align(size as usize, align) else { + return Ok(0); }; let allocator = &mut invoke_context.get_syscall_context_mut()?.allocator; if free_addr == 0 { @@ -631,11 +628,8 @@ declare_syscall!( invoke_context.get_check_size(), )?; - let new_address = match Pubkey::create_program_address(&seeds, program_id) { - Ok(address) => address, - Err(_) => { - return Ok(1); - } + let Ok(new_address) = Pubkey::create_program_address(&seeds, program_id) else { + return Ok(1); }; let address = translate_slice_mut::( memory_mapping, @@ -879,23 +873,14 @@ declare_syscall!( invoke_context.get_check_size(), )?; - let message = match libsecp256k1::Message::parse_slice(hash) { - Ok(msg) => msg, - Err(_) => { - return Ok(Secp256k1RecoverError::InvalidHash.into()); - } + let Ok(message) = libsecp256k1::Message::parse_slice(hash) else { + return Ok(Secp256k1RecoverError::InvalidHash.into()); }; - let adjusted_recover_id_val = match recovery_id_val.try_into() { - Ok(adjusted_recover_id_val) => adjusted_recover_id_val, - Err(_) => { - return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); - } + let Ok(adjusted_recover_id_val) = recovery_id_val.try_into() else { + return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); }; - let recovery_id = match libsecp256k1::RecoveryId::parse(adjusted_recover_id_val) { - Ok(id) => id, - Err(_) => { - return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); - } + let Ok(recovery_id) = libsecp256k1::RecoveryId::parse(adjusted_recover_id_val) else { + return Ok(Secp256k1RecoverError::InvalidRecoveryId.into()); }; let sig_parse_result = if invoke_context .feature_set @@ -906,11 +891,8 @@ declare_syscall!( libsecp256k1::Signature::parse_overflowing_slice(signature) }; - let signature = match sig_parse_result { - Ok(sig) => sig, - Err(_) => { - return Ok(Secp256k1RecoverError::InvalidSignature.into()); - } + let Ok(signature) = sig_parse_result else { + return Ok(Secp256k1RecoverError::InvalidSignature.into()); }; let public_key = match libsecp256k1::recover(&message, &signature, &recovery_id) { diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index 3851d815247830..628e77cb93af43 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -206,13 +206,13 @@ mod tests { let instructions = config_instruction::create_account::(&from_pubkey, &config_pubkey, 1, keys); let system_instruction = limited_deserialize(&instructions[0].data).unwrap(); - let space = match system_instruction { - SystemInstruction::CreateAccount { - lamports: _, - space, - owner: _, - } => space, - _ => panic!("Not a CreateAccount system instruction"), + let SystemInstruction::CreateAccount { + lamports: _, + space, + owner: _, + } = system_instruction + else { + panic!("Not a CreateAccount system instruction") }; let config_account = AccountSharedData::new(0, space as usize, &id()); let accounts = process_instruction( diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index c818e690cee014..3bb78b1b142f2c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -218,7 +218,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint 0.4.3", "num-traits", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -254,7 +254,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -315,7 +315,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "synstructure", @@ -327,7 +327,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -340,9 +340,9 @@ checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] name = "async-channel" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", "event-listener", @@ -388,7 +388,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -399,9 +399,9 @@ version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -536,12 +536,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.4", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "regex", "rustc-hash", "shlex", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -654,7 +654,7 @@ dependencies = [ "borsh-derive-internal 0.9.3", "borsh-schema-derive-internal 0.9.3", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -667,7 +667,7 @@ dependencies = [ "borsh-derive-internal 0.10.3", "borsh-schema-derive-internal 0.10.3", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -677,7 +677,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -688,7 +688,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -699,7 +699,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -710,7 +710,7 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -788,7 +788,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1177,10 +1177,10 @@ checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "strsim 0.10.0", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1191,7 +1191,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1246,7 +1246,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1258,7 +1258,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustc_version", "syn 1.0.109", @@ -1341,7 +1341,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1417,7 +1417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1467,9 +1467,9 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -1480,7 +1480,7 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" dependencies = [ "num-bigint 0.4.3", "num-traits", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -1718,9 +1718,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -2359,7 +2359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -2790,7 +2790,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -2914,7 +2914,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c8b15b261814f992e33760b1fca9fe8b693d8a65299f20c9901688636cfb746" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -2996,7 +2996,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3008,9 +3008,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -3076,7 +3076,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3155,7 +3155,7 @@ checksum = "5f7d21ccd03305a674437ee1248f3ab5d4b1db095cf1caf49f1713ddf61956b7" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3309,7 +3309,7 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3350,7 +3350,7 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3426,7 +3426,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "syn 1.0.109", ] @@ -3436,8 +3436,8 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ - "proc-macro2 1.0.63", - "syn 2.0.23", + "proc-macro2 1.0.64", + "syn 2.0.25", ] [[package]] @@ -3466,7 +3466,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "version_check", @@ -3478,7 +3478,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "version_check", ] @@ -3494,9 +3494,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] @@ -3571,7 +3571,7 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3584,7 +3584,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -3691,7 +3691,7 @@ version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", ] [[package]] @@ -3859,9 +3859,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -3871,9 +3871,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4149,7 +4149,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -4205,9 +4205,9 @@ checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "d614f89548720367ded108b3c843be93f3a341e22d5674ca0dd5cd57f34926af" dependencies = [ "serde_derive", ] @@ -4223,13 +4223,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "d4fe589678c688e44177da4f27152ee2d190757271dc7f1d5b6b9f68d869d641" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -4272,9 +4272,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" dependencies = [ "darling", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -4765,6 +4765,7 @@ dependencies = [ "base64 0.21.2", "bincode", "bs58", + "bytes", "chrono", "crossbeam-channel", "dashmap", @@ -4777,6 +4778,7 @@ dependencies = [ "lru", "min-max-heap", "num_enum 0.6.1", + "quinn", "rand 0.7.3", "rand_chacha 0.2.2", "rayon", @@ -4935,10 +4937,10 @@ dependencies = [ name = "solana-frozen-abi-macro" version = "1.17.0" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustc_version", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -6010,10 +6012,10 @@ name = "solana-sdk-macro" version = "1.17.0" dependencies = [ "bs58", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustversion", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -6239,7 +6241,6 @@ dependencies = [ "rayon", "rcgen", "rustls 0.20.8", - "solana-client", "solana-entry", "solana-gossip", "solana-ledger", @@ -6550,7 +6551,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "rustversion", "syn 1.0.109", @@ -6585,18 +6586,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.23" +version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "unicode-ident", ] @@ -6613,7 +6614,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.3", @@ -6683,7 +6684,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -6741,9 +6742,9 @@ version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] @@ -6874,7 +6875,7 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -7065,7 +7066,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "prost-build 0.9.0", "quote 1.0.29", "syn 1.0.109", @@ -7078,7 +7079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease 0.1.9", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "prost-build 0.11.4", "quote 1.0.29", "syn 1.0.109", @@ -7154,7 +7155,7 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", "syn 1.0.109", ] @@ -7453,9 +7454,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", "wasm-bindgen-shared", ] @@ -7487,9 +7488,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7843,9 +7844,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.63", + "proc-macro2 1.0.64", "quote 1.0.29", - "syn 2.0.23", + "syn 2.0.25", ] [[package]] diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 7894ee4ba898d2..373c7c3e1f78a8 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -2464,11 +2464,11 @@ fn test_program_sbf_upgrade_via_cpi() { .advance_slot(1, &Pubkey::default()) .expect("Failed to advance the slot"); let program_account = bank_client.get_account(&program_id).unwrap().unwrap(); - let programdata_address = match program_account.state() { - Ok(bpf_loader_upgradeable::UpgradeableLoaderState::Program { - programdata_address, - }) => programdata_address, - _ => unreachable!(), + let Ok(bpf_loader_upgradeable::UpgradeableLoaderState::Program { + programdata_address, + }) = program_account.state() + else { + unreachable!() }; let original_programdata = bank_client .get_account_data(&programdata_address) diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 48324ad0c12984..e177a82c97dba1 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -552,15 +552,13 @@ impl JsonRpcRequestProcessor { slot: first_slot_in_epoch, })?; - let first_confirmed_block = if let Ok(Some(first_confirmed_block)) = self + let Ok(Some(first_confirmed_block)) = self .get_block( first_confirmed_block_in_epoch, Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), ) .await - { - first_confirmed_block - } else { + else { return Err(RpcCustomError::BlockNotAvailable { slot: first_confirmed_block_in_epoch, } diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 2dd7e20b86bffb..99eab0a2353670 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -407,13 +407,10 @@ async fn handle_connection( } } } - let data_str = match str::from_utf8(&data) { - Ok(str) => str, - Err(_) => { - // Old implementation just closes the connection, so we preserve that behavior - // for now. It would be more correct to respond with an error. - break; - } + let Ok(data_str) = str::from_utf8(&data) else { + // Old implementation just closes the connection, so we preserve that behavior + // for now. It would be more correct to respond with an error. + break; }; if let Some(response) = json_rpc_handler.handle_request(data_str).await { diff --git a/runtime/src/account_storage/meta.rs b/runtime/src/account_storage/meta.rs index c80bfaacfd5f79..dba672292310f5 100644 --- a/runtime/src/account_storage/meta.rs +++ b/runtime/src/account_storage/meta.rs @@ -98,18 +98,18 @@ impl<'a: 'b, 'b, T: ReadableAccount + Sync + 'b, U: StorableAccounts<'a, T>, V: /// References to account data stored elsewhere. Getting an `Account` requires cloning /// (see `StoredAccountMeta::clone_account()`). #[derive(PartialEq, Eq, Debug)] -pub enum StoredAccountMeta<'a> { - AppendVec(AppendVecStoredAccountMeta<'a>), +pub enum StoredAccountMeta<'storage> { + AppendVec(AppendVecStoredAccountMeta<'storage>), } -impl<'a> StoredAccountMeta<'a> { - pub fn pubkey(&self) -> &'a Pubkey { +impl<'storage> StoredAccountMeta<'storage> { + pub fn pubkey(&self) -> &'storage Pubkey { match self { Self::AppendVec(av) => av.pubkey(), } } - pub fn hash(&self) -> &'a Hash { + pub fn hash(&self) -> &'storage Hash { match self { Self::AppendVec(av) => av.hash(), } @@ -127,7 +127,7 @@ impl<'a> StoredAccountMeta<'a> { } } - pub fn data(&self) -> &'a [u8] { + pub fn data(&self) -> &'storage [u8] { match self { Self::AppendVec(av) => av.data(), } @@ -151,7 +151,7 @@ impl<'a> StoredAccountMeta<'a> { } } - pub fn set_meta(&mut self, meta: &'a StoredMeta) { + pub fn set_meta(&mut self, meta: &'storage StoredMeta) { match self { Self::AppendVec(av) => av.set_meta(meta), } @@ -164,7 +164,7 @@ impl<'a> StoredAccountMeta<'a> { } } -impl<'a> ReadableAccount for StoredAccountMeta<'a> { +impl<'storage> ReadableAccount for StoredAccountMeta<'storage> { fn lamports(&self) -> u64 { match self { Self::AppendVec(av) => av.lamports(), diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 25ee74e877133a..a9c3b22fc86313 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -85,7 +85,6 @@ pub struct AccountLocks { readonly_locks: HashMap, } -#[allow(dead_code)] #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub(crate) enum RewardInterval { /// the slot within the epoch is INSIDE the reward distribution interval diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 28450dfbc22e56..1b7be720573c16 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1504,7 +1504,6 @@ pub struct AccountsDb { /// this will live here until the feature for partitioned epoch rewards is activated. /// At that point, this and other code can be deleted. - #[allow(dead_code)] pub(crate) partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig, /// the full accounts hash calculation as of a predetermined block height 'N' @@ -4474,16 +4473,13 @@ impl AccountsDb { let len = sorted_slots.len(); for slot in sorted_slots { - let old_storage = match self.get_storage_to_move_to_ancient_append_vec( + let Some(old_storage) = self.get_storage_to_move_to_ancient_append_vec( slot, &mut current_ancient, can_randomly_shrink, - ) { - Some(old_storages) => old_storages, - None => { - // nothing to squash for this slot - continue; - } + ) else { + // nothing to squash for this slot + continue; }; if guard.is_none() { diff --git a/runtime/src/ancient_append_vecs.rs b/runtime/src/ancient_append_vecs.rs index f435e508931aaf..6d9c7f8a624677 100644 --- a/runtime/src/ancient_append_vecs.rs +++ b/runtime/src/ancient_append_vecs.rs @@ -285,6 +285,9 @@ impl AccountsDb { tuning: PackedAncientStorageTuning, metrics: &mut ShrinkStatsSub, ) { + self.shrink_ancient_stats + .slots_considered + .fetch_add(sorted_slots.len() as u64, Ordering::Relaxed); let ancient_slot_infos = self.collect_sort_filter_ancient_slots(sorted_slots, &tuning); if ancient_slot_infos.all_infos.is_empty() { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 38163122141406..119f1991a373c1 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -845,7 +845,6 @@ struct VoteReward { } type VoteRewards = DashMap; -#[allow(dead_code)] #[derive(Debug, Default)] struct VoteRewardsAccounts { /// reward info for each vote account pubkey. @@ -864,7 +863,6 @@ struct EpochRewardCalculateParamInfo<'a> { cached_vote_accounts: &'a VoteAccounts, } -#[allow(dead_code)] /// Hold all results from calculating the rewards for partitioned distribution. /// This struct exists so we can have a function which does all the calculation with no /// side effects. @@ -879,7 +877,6 @@ struct PartitionedRewardsCalculation { capitalization: u64, } -#[allow(dead_code)] /// result of calculating the stake rewards at beginning of new epoch struct StakeRewardCalculationPartitioned { /// each individual stake account to reward, grouped by partition @@ -888,7 +885,6 @@ struct StakeRewardCalculationPartitioned { total_stake_rewards_lamports: u64, } -#[allow(dead_code)] struct CalculateRewardsAndDistributeVoteRewardsResult { /// total rewards for the epoch (including both vote rewards and stake rewards) total_rewards: u64, @@ -958,7 +954,6 @@ impl WorkingSlot for Bank { } } -#[allow(dead_code)] #[derive(Debug, Default)] /// result of calculating the stake rewards at end of epoch struct StakeRewardCalculation { @@ -1231,13 +1226,11 @@ impl Bank { rent_collector.clone_with_epoch(epoch) } - #[allow(dead_code)] fn is_partitioned_rewards_feature_enabled(&self) -> bool { self.feature_set .is_active(&feature_set::enable_partitioned_epoch_reward::id()) } - #[allow(dead_code)] pub(crate) fn set_epoch_reward_status_active( &mut self, stake_rewards_by_partition: Vec, @@ -1257,7 +1250,6 @@ impl Bank { } /// # stake accounts to store in one block during partitioned reward interval - #[allow(dead_code)] fn partitioned_rewards_stake_account_stores_per_block(&self) -> u64 { self.partitioned_epoch_rewards_config() .stake_account_stores_per_block @@ -1265,36 +1257,31 @@ impl Bank { /// reward calculation happens synchronously during the first block of the epoch boundary. /// So, # blocks for reward calculation is 1. - #[allow(dead_code)] fn get_reward_calculation_num_blocks(&self) -> Slot { self.partitioned_epoch_rewards_config() .reward_calculation_num_blocks } - #[allow(dead_code)] /// Calculate the number of blocks required to distribute rewards to all stake accounts. fn get_reward_distribution_num_blocks(&self, rewards: &StakeRewards) -> u64 { let total_stake_accounts = rewards.len(); if self.epoch_schedule.warmup && self.epoch < self.first_normal_epoch() { 1 } else { + const MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH: u64 = 10; let num_chunks = crate::accounts_hash::AccountsHasher::div_ceil( total_stake_accounts, self.partitioned_rewards_stake_account_stores_per_block() as usize, ) as u64; - // Limit the reward credit interval to 5% of the total number of slots in a epoch - num_chunks.clamp(1, (self.epoch_schedule.slots_per_epoch / 20).max(1)) + // Limit the reward credit interval to 10% of the total number of slots in a epoch + num_chunks.clamp( + 1, + (self.epoch_schedule.slots_per_epoch / MAX_FACTOR_OF_REWARD_BLOCKS_IN_EPOCH).max(1), + ) } } - #[allow(dead_code)] - /// Return the total number of blocks in reward interval (including both calculation and crediting). - fn get_reward_total_num_blocks(&self, rewards: &StakeRewards) -> u64 { - self.get_reward_calculation_num_blocks() + self.get_reward_distribution_num_blocks(rewards) - } - - #[allow(dead_code)] /// Return `RewardInterval` enum for current bank fn get_reward_interval(&self) -> RewardInterval { if matches!(self.epoch_reward_status, EpochRewardStatus::Active(_)) { @@ -1608,7 +1595,6 @@ impl Bank { ); } - #[allow(dead_code)] /// partitioned reward distribution is complete. /// So, deactivate the epoch rewards sysvar. fn deactivate_epoch_reward_status(&mut self) { @@ -1630,7 +1616,6 @@ impl Bank { } } - #[allow(dead_code)] /// Begin the process of calculating and distributing rewards. /// This process can take multiple slots. fn begin_partitioned_rewards( @@ -2385,7 +2370,6 @@ impl Bank { } } - #[allow(dead_code)] /// Calculate rewards from previous epoch to prepare for partitioned distribution. fn calculate_rewards_for_partitioning( &self, @@ -2436,7 +2420,6 @@ impl Bank { } } - #[allow(dead_code)] // Calculate rewards from previous epoch and distribute vote rewards fn calculate_rewards_and_distribute_vote_rewards( &self, @@ -2526,7 +2509,6 @@ impl Bank { } } - #[allow(dead_code)] fn assert_validator_rewards_paid(&self, validator_rewards_paid: u64) { assert_eq!( validator_rewards_paid, @@ -2667,15 +2649,12 @@ impl Bank { if invalid_vote_keys.contains_key(vote_pubkey) { return; } - let stake_account = match self.get_account_with_fixed_root(stake_pubkey) { - Some(stake_account) => stake_account, - None => { + let Some(stake_account) = self.get_account_with_fixed_root(stake_pubkey) else { invalid_stake_keys .insert(*stake_pubkey, InvalidCacheEntryReason::Missing); invalid_cached_stake_accounts.fetch_add(1, Relaxed); return; - } - }; + }; if cached_stake_account.account() != &stake_account { if self.rc.accounts.accounts_db.assert_stakes_cache_consistency { panic!( @@ -2886,23 +2865,17 @@ impl Bank { }; let invalid_vote_keys = DashMap::::new(); let make_vote_delegations_entry = |vote_pubkey| { - let vote_account = match get_vote_account(&vote_pubkey) { - Some(vote_account) => vote_account, - None => { - invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::Missing); - return None; - } + let Some(vote_account) = get_vote_account(&vote_pubkey) else { + invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::Missing); + return None; }; if vote_account.owner() != &solana_vote_program { invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::WrongOwner); return None; } - let vote_state = match vote_account.vote_state().cloned() { - Ok(vote_state) => vote_state, - Err(_) => { - invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::BadState); - return None; - } + let Ok(vote_state) = vote_account.vote_state().cloned() else { + invalid_vote_keys.insert(vote_pubkey, InvalidCacheEntryReason::BadState); + return None; }; let vote_with_stake_delegations = VoteWithStakeDelegations { vote_state: Arc::new(vote_state), @@ -2921,11 +2894,11 @@ impl Bank { // Join stake accounts with vote-accounts. let push_stake_delegation = |(stake_pubkey, stake_account): (&Pubkey, &StakeAccount<_>)| { let delegation = stake_account.delegation(); - let mut vote_delegations = - match vote_with_stake_delegations_map.get_mut(&delegation.voter_pubkey) { - Some(vote_delegations) => vote_delegations, - None => return, - }; + let Some(mut vote_delegations) = + vote_with_stake_delegations_map.get_mut(&delegation.voter_pubkey) + else { + return; + }; if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() { let delegation = InflationPointCalculationEvent::Delegation(delegation, solana_vote_program); @@ -2952,7 +2925,6 @@ impl Bank { } } - #[allow(dead_code)] /// calculate and return some reward calc info to avoid recalculation across functions fn get_epoch_reward_calculate_param_info<'a>( &self, @@ -2971,7 +2943,6 @@ impl Bank { } } - #[allow(dead_code)] /// Calculate epoch reward and return vote and stake rewards. fn calculate_validator_rewards( &self, @@ -3060,7 +3031,6 @@ impl Bank { } } - #[allow(dead_code)] /// compare the vote and stake accounts between the normal rewards calculation code /// and the partitioned rewards calculation code /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code @@ -3129,7 +3099,6 @@ impl Bank { ); } - #[allow(dead_code)] /// compare the vote and stake accounts between the normal rewards calculation code /// and the partitioned rewards calculation code /// `stake_rewards_expected` and `vote_rewards_expected` are the results of the normal rewards calculation code @@ -3198,7 +3167,6 @@ impl Bank { /// Calculates epoch reward points from stake/vote accounts. /// Returns reward lamports and points for the epoch or none if points == 0. - #[allow(dead_code)] fn calculate_reward_points_partitioned( &self, reward_calculate_params: &EpochRewardCalculateParamInfo, @@ -3233,20 +3201,14 @@ impl Bank { let delegation = stake_account.delegation(); let vote_pubkey = delegation.voter_pubkey; - let vote_account = match get_vote_account(&vote_pubkey) { - Some(vote_account) => vote_account, - None => { - return 0; - } + let Some(vote_account) = get_vote_account(&vote_pubkey) else { + return 0; }; if vote_account.owner() != &solana_vote_program { return 0; } - let vote_state = match vote_account.vote_state() { - Ok(vote_state) => vote_state, - Err(_) => { - return 0; - } + let Ok(vote_state) = vote_account.vote_state() else { + return 0; }; stake_state::calculate_points( @@ -3302,7 +3264,6 @@ impl Bank { (points > 0).then_some(PointValue { rewards, points }) } - #[allow(dead_code)] /// Calculates epoch rewards for stake/vote accounts /// Returns vote rewards, stake rewards, and the sum of all stake rewards in lamports fn calculate_stake_vote_rewards( @@ -3355,20 +3316,14 @@ impl Bank { let (mut stake_account, stake_state) = <(AccountSharedData, StakeState)>::from(stake_account); let vote_pubkey = delegation.voter_pubkey; - let vote_account = match get_vote_account(&vote_pubkey) { - Some(vote_account) => vote_account, - None => { - return None; - } + let Some(vote_account) = get_vote_account(&vote_pubkey) else { + return None; }; if vote_account.owner() != &solana_vote_program { return None; } - let vote_state = match vote_account.vote_state().cloned() { - Ok(vote_state) => vote_state, - Err(_) => { - return None; - } + let Ok(vote_state) = vote_account.vote_state().cloned() else { + return None; }; let pre_lamport = stake_account.lamports(); @@ -3548,7 +3503,6 @@ impl Bank { .fetch_add(measure.as_us(), Relaxed); } - #[allow(dead_code)] /// store stake rewards in partition /// return the sum of all the stored rewards /// @@ -3582,7 +3536,6 @@ impl Bank { .sum::() as u64 } - #[allow(dead_code)] fn store_vote_accounts_partitioned( &self, vote_account_rewards: VoteRewardsAccounts, @@ -3654,7 +3607,6 @@ impl Bank { vote_rewards } - #[allow(dead_code)] /// return reward info for each vote account /// return account data for each vote account that needs to be stored /// This return value is a little awkward at the moment so that downstream existing code in the non-partitioned rewards code path can be re-used without duplication or modification. @@ -3718,7 +3670,6 @@ impl Bank { .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); } - #[allow(dead_code)] /// insert non-zero stake rewards to self.rewards /// Return the number of rewards inserted fn update_reward_history_in_partition(&self, stake_rewards: &[StakeReward]) -> usize { @@ -3732,7 +3683,6 @@ impl Bank { rewards.len().saturating_sub(initial_len) } - #[allow(dead_code)] /// Process reward credits for a partition of rewards /// Store the rewards to AccountsDB, update reward history record and total capitalization. fn distribute_epoch_rewards_in_partition( @@ -3746,12 +3696,16 @@ impl Bank { let (total_rewards_in_lamports, store_stake_accounts_us) = measure_us!(self.store_stake_accounts_in_partition(this_partition_stake_rewards)); - self.update_reward_history_in_partition(this_partition_stake_rewards); - // increase total capitalization by the distributed rewards self.capitalization .fetch_add(total_rewards_in_lamports, Relaxed); + // decrease distributed capital from epoch rewards sysvar + self.update_epoch_rewards_sysvar(total_rewards_in_lamports); + + // update reward history for this partitioned distribution + self.update_reward_history_in_partition(this_partition_stake_rewards); + let metrics = RewardsStoreMetrics { pre_capitalization, post_capitalization: self.capitalization(), @@ -3764,7 +3718,6 @@ impl Bank { report_partitioned_reward_metrics(self, metrics); } - #[allow(dead_code)] /// true if it is ok to run partitioned rewards code. /// This means the feature is activated or certain testing situations. fn is_partitioned_rewards_code_enabled(&self) -> bool { @@ -3774,7 +3727,6 @@ impl Bank { .test_enable_partitioned_rewards } - #[allow(dead_code)] /// Helper fn to log epoch_rewards sysvar fn log_epoch_rewards_sysvar(&self, prefix: &str) { if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { @@ -3789,7 +3741,6 @@ impl Bank { } } - #[allow(dead_code)] /// Create EpochRewards syavar with calculated rewards fn create_epoch_rewards_sysvar( &self, @@ -3818,7 +3769,6 @@ impl Bank { self.log_epoch_rewards_sysvar("create"); } - #[allow(dead_code)] /// Update EpochRewards sysvar with distributed rewards fn update_epoch_rewards_sysvar(&self, distributed: u64) { assert!(self.is_partitioned_rewards_code_enabled()); @@ -4461,14 +4411,7 @@ impl Bank { } pub fn is_block_boundary(&self, tick_height: u64) -> bool { - if self - .feature_set - .is_active(&feature_set::fix_recent_blockhashes::id()) - { - tick_height == self.max_tick_height - } else { - tick_height % self.ticks_per_slot == 0 - } + tick_height == self.max_tick_height } /// Get the max number of accounts that a transaction may lock in this block @@ -4866,9 +4809,7 @@ impl Bank { } pub fn load_program(&self, pubkey: &Pubkey) -> Arc { - let program = if let Some(program) = self.get_account_with_fixed_root(pubkey) { - program - } else { + let Some(program) = self.get_account_with_fixed_root(pubkey) else { return Arc::new(LoadedProgram::new_tombstone( self.slot, LoadedProgramType::Closed, diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 8e12788a25d50d..f92fc21b3757c4 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -24,6 +24,7 @@ use { }, inline_spl_token, nonce_info::NonceFull, + partitioned_rewards::TestPartitionedEpochRewards, rent_collector::RENT_EXEMPT_RENT_EPOCH, status_cache::MAX_CACHE_ENTRIES, transaction_error_metrics::TransactionErrorMetrics, @@ -4978,17 +4979,18 @@ fn test_hash_internal_state_unchanged() { } #[test] -fn test_ticks_change_state() { +fn test_hash_internal_state_unchanged_with_ticks() { let (genesis_config, _) = create_genesis_config(500); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let bank1 = new_from_parent(&bank); let hash1 = bank1.hash_internal_state(); - // ticks don't change its state unless a block boundary is crossed + // ticks don't change its state even if a slot boundary is crossed + // because blockhashes are only recorded at block boundaries for _ in 0..genesis_config.ticks_per_slot { assert_eq!(bank1.hash_internal_state(), hash1); bank1.register_tick(&Hash::default()); } - assert_ne!(bank1.hash_internal_state(), hash1); + assert_eq!(bank1.hash_internal_state(), hash1); } #[ignore] @@ -12604,6 +12606,54 @@ fn test_epoch_credit_rewards_and_history_update() { ); } +/// Test distribute partitioned epoch rewards +#[test] +fn test_distribute_partitioned_epoch_rewards_bank_capital_and_sysvar_balance() { + let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + genesis_config.epoch_schedule = EpochSchedule::custom(432000, 432000, false); + let mut bank = Bank::new_for_tests(&genesis_config); + bank.activate_feature(&feature_set::enable_partitioned_epoch_reward::id()); + + // Set up epoch_rewards sysvar with rewards with 1e9 lamports to distribute. + let total_rewards = 1_000_000_000; + bank.create_epoch_rewards_sysvar(total_rewards, 0, 42); + let pre_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + assert_eq!(pre_epoch_rewards_account.lamports(), total_rewards); + + // Set up a partition of rewards to distribute + let expected_num = 100; + let mut stake_rewards = (0..expected_num) + .map(|_| StakeReward::new_random()) + .collect::>(); + let mut rewards_to_distribute = 0; + for stake_reward in &mut stake_rewards { + stake_reward.credit(100); + rewards_to_distribute += 100; + } + let all_rewards = vec![stake_rewards]; + + // Distribute rewards + let pre_cap = bank.capitalization(); + bank.distribute_epoch_rewards_in_partition(&all_rewards, 0); + let post_cap = bank.capitalization(); + let post_epoch_rewards_account = bank.get_account(&sysvar::epoch_rewards::id()).unwrap(); + let expected_epoch_rewards_sysvar_lamports_remaining = total_rewards - rewards_to_distribute; + + // Assert that epoch rewards sysvar lamports decreases by the distributed rewards + assert_eq!( + post_epoch_rewards_account.lamports(), + expected_epoch_rewards_sysvar_lamports_remaining + ); + + let epoch_rewards: sysvar::epoch_rewards::EpochRewards = + from_account(&post_epoch_rewards_account).unwrap(); + assert_eq!(epoch_rewards.total_rewards, total_rewards); + assert_eq!(epoch_rewards.distributed_rewards, rewards_to_distribute,); + + // Assert that the bank total capital didn't change + assert_eq!(pre_cap, post_cap); +} + #[test] /// Test rewards computation and partitioned rewards distribution at the epoch boundary fn test_rewards_computation() { @@ -12644,9 +12694,9 @@ fn test_rewards_computation() { assert_eq!(stake_rewards.stake_rewards.len(), expected_num_delegations); } -/// Test rewards compuation and partitioned rewards distribution at the epoch boundary +/// Test rewards computation and partitioned rewards distribution at the epoch boundary (one reward distribution block) #[test] -fn test_rewards_computation_and_partitioned_distribution() { +fn test_rewards_computation_and_partitioned_distribution_one_block() { solana_logger::setup(); // setup the expected number of stake delegations @@ -12731,6 +12781,128 @@ fn test_rewards_computation_and_partitioned_distribution() { } } +/// Test rewards computation and partitioned rewards distribution at the epoch boundary (two reward distribution blocks) +#[test] +fn test_rewards_computation_and_partitioned_distribution_two_blocks() { + solana_logger::setup(); + + // Set up the expected number of stake delegations 100 + let expected_num_delegations = 100; + + let validator_keypairs = (0..expected_num_delegations) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect::>(); + + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![2_000_000_000; expected_num_delegations], + ); + genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); + + // Config stake reward distribution to be 50 per block + // We will need two blocks for reward distribution. And we can assert that the expected bank + // capital changes before/during/after reward distribution. + let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); + accounts_db_config.test_partitioned_epoch_rewards = + TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks: 1, + stake_account_stores_per_block: 50, + }; + + let bank0 = Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + Arc::default(), + ); + + let num_slots_in_epoch = bank0.get_slots_in_epoch(bank0.epoch()); + assert_eq!(num_slots_in_epoch, 32); + + let mut previous_bank = Arc::new(Bank::new_from_parent( + &Arc::new(bank0), + &Pubkey::default(), + 1, + )); + + // simulate block progress + for slot in 2..=num_slots_in_epoch + 3 { + let pre_cap = previous_bank.capitalization(); + let curr_bank = Bank::new_from_parent(&previous_bank, &Pubkey::default(), slot); + let post_cap = curr_bank.capitalization(); + + // Fill banks with banks with votes landing in the next slot + // Create enough banks such that vote account will root + for validator_vote_keypairs in validator_keypairs.iter() { + let vote_id = validator_vote_keypairs.vote_keypair.pubkey(); + let mut vote_account = curr_bank.get_account(&vote_id).unwrap(); + // generate some rewards + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); + for i in 0..MAX_LOCKOUT_HISTORY + 42 { + if let Some(v) = vote_state.as_mut() { + vote_state::process_slot_vote_unchecked(v, i as u64) + } + let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + vote_state::to(&versioned, &mut vote_account).unwrap(); + match versioned { + VoteStateVersions::Current(v) => { + vote_state = Some(*v); + } + _ => panic!("Has to be of type Current"), + }; + } + curr_bank.store_account_and_update_capitalization(&vote_id, &vote_account); + } + + if slot == num_slots_in_epoch { + // This is the first block of epoch 1. Reward computation should happen in this block. + // assert reward compute status activated at epoch boundary + assert!(matches!( + curr_bank.get_reward_interval(), + RewardInterval::InsideInterval + )); + + // cap should increase because of new epoch rewards + assert!(post_cap > pre_cap); + } else if slot == num_slots_in_epoch + 1 { + // When curr_slot == num_slots_in_epoch + 1, the 2nd block of epoch 1, reward distribution should happen in this block. + // however, since rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. + assert!(matches!( + curr_bank.get_reward_interval(), + RewardInterval::InsideInterval + )); + + assert_eq!(post_cap, pre_cap); + } else if slot == num_slots_in_epoch + 2 || slot == num_slots_in_epoch + 3 { + // 1. when curr_slot == num_slots_in_epoch + 2, the 3nd block of epoch 1, reward distribution should happen in this block. + // however, all stake rewards are paid at the this block therefore reward_status should have transitioned to inactive. And since + // rewards are transferred from epoch_rewards sysvar to stake accounts. The cap should stay the same. + // 2. when curr_slot == num_slots_in_epoch+2, the 3rd block of epoch 1. reward distribution should have already completed. Therefore, + // reward_status should stay inactive and cap should stay the same. + assert!(matches!( + curr_bank.get_reward_interval(), + RewardInterval::OutsideInterval + )); + + assert_eq!(post_cap, pre_cap); + } else { + // slot is not in rewards, cap should not change + assert_eq!(post_cap, pre_cap); + } + previous_bank = Arc::new(curr_bank); + } +} + /// Test `EpochRewards` sysvar creation, distribution, and burning. /// This test covers the following epoch_rewards_sysvar bank member functions, i.e. /// `create_epoch_rewards_sysvar`, `update_epoch_rewards_sysvar`, `burn_and_purge_account`. @@ -13306,6 +13478,13 @@ fn test_calc_vote_accounts_to_store_normal() { } } +impl Bank { + /// Return the total number of blocks in reward interval (including both calculation and crediting). + fn get_reward_total_num_blocks(&self, rewards: &StakeRewards) -> u64 { + self.get_reward_calculation_num_blocks() + self.get_reward_distribution_num_blocks(rewards) + } +} + /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during normal epoch gives the expected result #[test] fn test_get_reward_distribution_num_blocks_normal() { @@ -13331,28 +13510,75 @@ fn test_get_reward_distribution_num_blocks_normal() { } /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during small epoch -/// The num_credit_blocks should be cap to 5% of the total number of blocks in the epoch. +/// The num_credit_blocks should be cap to 10% of the total number of blocks in the epoch. #[test] fn test_get_reward_distribution_num_blocks_cap() { let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); genesis_config.epoch_schedule = EpochSchedule::custom(32, 32, false); - let bank = Bank::new_for_tests(&genesis_config); - - // Given 8k rewards, normally it will take 2 blocks to credit all the rewards. However, because of - // the short epoch, i.e. 32 slots, we should cap the number of credit blocks to 32/20 = 1. - let expected_num = 8192; - let stake_rewards = (0..expected_num) - .map(|_| StakeReward::new_random()) - .collect::>(); + // Config stake reward distribution to be 10 per block + let mut accounts_db_config: AccountsDbConfig = ACCOUNTS_DB_CONFIG_FOR_TESTING.clone(); + accounts_db_config.test_partitioned_epoch_rewards = + TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks: 1, + stake_account_stores_per_block: 10, + }; - assert_eq!(bank.get_reward_distribution_num_blocks(&stake_rewards), 1); - assert_eq!(bank.get_reward_calculation_num_blocks(), 1); - assert_eq!( - bank.get_reward_total_num_blocks(&stake_rewards), - bank.get_reward_distribution_num_blocks(&stake_rewards) - + bank.get_reward_calculation_num_blocks(), + let bank = Bank::new_with_paths( + &genesis_config, + Arc::new(RuntimeConfig::default()), + Vec::new(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(accounts_db_config), + None, + Arc::default(), ); + + let stake_account_stores_per_block = bank.partitioned_rewards_stake_account_stores_per_block(); + assert_eq!(stake_account_stores_per_block, 10); + + let check_num_reward_distribution_blocks = + |num_stakes: u64, + expected_num_reward_distribution_blocks: u64, + expected_num_reward_computation_blocks: u64| { + // Given the short epoch, i.e. 32 slots, we should cap the number of reward distribution blocks to 32/10 = 3. + let stake_rewards = (0..num_stakes) + .map(|_| StakeReward::new_random()) + .collect::>(); + + assert_eq!( + bank.get_reward_distribution_num_blocks(&stake_rewards), + expected_num_reward_distribution_blocks + ); + assert_eq!( + bank.get_reward_calculation_num_blocks(), + expected_num_reward_computation_blocks + ); + assert_eq!( + bank.get_reward_total_num_blocks(&stake_rewards), + bank.get_reward_distribution_num_blocks(&stake_rewards) + + bank.get_reward_calculation_num_blocks(), + ); + }; + + for test_record in [ + // num_stakes, expected_num_reward_distribution_blocks, expected_num_reward_computation_blocks + (0, 1, 1), + (1, 1, 1), + (stake_account_stores_per_block, 1, 1), + (2 * stake_account_stores_per_block - 1, 2, 1), + (2 * stake_account_stores_per_block, 2, 1), + (3 * stake_account_stores_per_block - 1, 3, 1), + (3 * stake_account_stores_per_block, 3, 1), + (4 * stake_account_stores_per_block, 3, 1), // cap at 3 + (5 * stake_account_stores_per_block, 3, 1), //cap at 3 + ] { + check_num_reward_distribution_blocks(test_record.0, test_record.1, test_record.2); + } } /// Test get_reward_distribution_num_blocks, get_reward_calculation_num_blocks, get_reward_total_num_blocks during warm up epoch gives the expected result. diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/epoch_rewards_hasher.rs index b5398d6e429993..22dedb2d72cd40 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/epoch_rewards_hasher.rs @@ -40,7 +40,6 @@ fn hash_to_partition(hash: u64, partitions: usize) -> usize { .saturating_div(u128::from(u64::MAX).saturating_add(1))) as usize } -#[allow(dead_code)] pub(crate) fn hash_rewards_into_partitions( stake_rewards: StakeRewards, parent_block_hash: &Hash, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index e016fa1cae957f..6d12e29db3c3d1 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -251,9 +251,8 @@ fn sanitize_path(entry_path: &Path, dst: &Path) -> Result> { } // Skip entries without a parent (i.e. outside of FS root) - let parent = match file_dst.parent() { - Some(p) => p, - None => return SKIP, + let Some(parent) = file_dst.parent() else { + return SKIP; }; fs::create_dir_all(parent)?; diff --git a/runtime/src/partitioned_rewards.rs b/runtime/src/partitioned_rewards.rs index 41458356e70305..f9c286d01cfea7 100644 --- a/runtime/src/partitioned_rewards.rs +++ b/runtime/src/partitioned_rewards.rs @@ -12,7 +12,7 @@ pub(crate) struct PartitionedEpochRewardsConfig { /// Normally, this will be 1. /// if force_one_slot_partitioned_rewards, this will be 0 (ie. we take 0 blocks just for reward calculation) pub(crate) reward_calculation_num_blocks: Slot, - /// number of stake accounts to store in one block during partititioned reward interval + /// number of stake accounts to store in one block during partitioned reward interval /// normally, this is a number tuned for reasonable performance, such as 4096 accounts/block /// if force_one_slot_partitioned_rewards, this will usually be u64::MAX so that all stake accounts are written in the first block pub(crate) stake_account_stores_per_block: Slot, @@ -47,6 +47,10 @@ pub enum TestPartitionedEpochRewards { None, CompareResults, ForcePartitionedEpochRewardsInOneBlock, + PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks: u64, + stake_account_stores_per_block: u64, + }, } #[allow(dead_code)] @@ -60,10 +64,17 @@ impl PartitionedEpochRewardsConfig { TestPartitionedEpochRewards::ForcePartitionedEpochRewardsInOneBlock => { Self::set_test_enable_partitioned_rewards() } + TestPartitionedEpochRewards::PartitionedEpochRewardsConfigRewardBlocks { + reward_calculation_num_blocks, stake_account_stores_per_block } => { + Self::set_test_enable_partitioned_rewards_with_custom_number_of_stake_accounts_per_block( + reward_calculation_num_blocks, + stake_account_stores_per_block) + } + } } - /// All rewards will be distributed in the first block in the epoch, maching + /// All rewards will be distributed in the first block in the epoch, matching /// consensus for the non-partitioned rewards, but running all the partitioned rewards /// code. fn set_test_enable_partitioned_rewards() -> Self { @@ -85,4 +96,19 @@ impl PartitionedEpochRewardsConfig { ..PartitionedEpochRewardsConfig::default() } } + + /// A method that configures how many reward reward calculation blocks and how many stake + /// accounts to store per reward block. + fn set_test_enable_partitioned_rewards_with_custom_number_of_stake_accounts_per_block( + reward_calculation_num_blocks: u64, + stake_account_stores_per_block: u64, + ) -> Self { + Self { + reward_calculation_num_blocks, + stake_account_stores_per_block, + test_enable_partitioned_rewards: true, + // irrelevant if we are not running old code path + test_compare_partitioned_epoch_rewards: false, + } + } } diff --git a/runtime/src/read_only_accounts_cache.rs b/runtime/src/read_only_accounts_cache.rs index 9956af64bb8ec8..0532a570596d53 100644 --- a/runtime/src/read_only_accounts_cache.rs +++ b/runtime/src/read_only_accounts_cache.rs @@ -72,12 +72,9 @@ impl ReadOnlyAccountsCache { pub(crate) fn load(&self, pubkey: Pubkey, slot: Slot) -> Option { let key = (pubkey, slot); - let mut entry = match self.cache.get_mut(&key) { - None => { - self.misses.fetch_add(1, Ordering::Relaxed); - return None; - } - Some(entry) => entry, + let Some(mut entry) = self.cache.get_mut(&key) else { + self.misses.fetch_add(1, Ordering::Relaxed); + return None; }; self.hits.fetch_add(1, Ordering::Relaxed); // Move the entry to the end of the queue. @@ -122,9 +119,8 @@ impl ReadOnlyAccountsCache { // Evict entries from the front of the queue. let mut num_evicts = 0; while self.data_size.load(Ordering::Relaxed) > self.max_data_size { - let (pubkey, slot) = match self.queue.lock().unwrap().get_first() { - None => break, - Some(key) => *key, + let Some(&(pubkey, slot)) = self.queue.lock().unwrap().get_first() else { + break; }; num_evicts += 1; self.remove(pubkey, slot); diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 2919db702c4dbc..8d39f03cc5f414 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -216,9 +216,8 @@ impl Stakes { F: Fn(&Pubkey) -> Option, { let stake_delegations = stakes.stake_delegations.iter().map(|(pubkey, delegation)| { - let stake_account = match get_account(pubkey) { - None => return Err(Error::StakeAccountNotFound(*pubkey)), - Some(account) => account, + let Some(stake_account) = get_account(pubkey) else { + return Err(Error::StakeAccountNotFound(*pubkey)); }; let stake_account = StakeAccount::try_from(stake_account)?; // Sanity check that the delegation is consistent with what is @@ -231,9 +230,8 @@ impl Stakes { }); // Assert that cached vote accounts are consistent with accounts-db. for (pubkey, vote_account) in stakes.vote_accounts.iter() { - let account = match get_account(pubkey) { - None => return Err(Error::VoteAccountNotFound(*pubkey)), - Some(account) => account, + let Some(account) = get_account(pubkey) else { + return Err(Error::VoteAccountNotFound(*pubkey)); }; let vote_account = vote_account.account(); if vote_account != &account { @@ -250,9 +248,8 @@ impl Stakes { .filter(|voter_pubkey| stakes.vote_accounts.get(voter_pubkey).is_none()) .collect(); for pubkey in voter_pubkeys { - let account = match get_account(&pubkey) { - None => continue, - Some(account) => account, + let Some(account) = get_account(&pubkey) else { + continue; }; if VoteStateVersions::is_correct_size_and_initialized(account.data()) && VoteAccount::try_from(account.clone()).is_ok() diff --git a/runtime/src/tiered_storage/readable.rs b/runtime/src/tiered_storage/readable.rs index cf2b968f76208f..8a9e223e8891f4 100644 --- a/runtime/src/tiered_storage/readable.rs +++ b/runtime/src/tiered_storage/readable.rs @@ -41,6 +41,11 @@ impl<'a, M: TieredAccountMeta> TieredReadableAccount<'a, M> { pub fn write_version(&self) -> Option { self.meta.write_version(self.account_block) } + + /// Returns the data associated to this account. + pub fn data(&self) -> &'a [u8] { + self.meta.account_data(self.account_block) + } } impl<'a, M: TieredAccountMeta> ReadableAccount for TieredReadableAccount<'a, M> { @@ -73,6 +78,6 @@ impl<'a, M: TieredAccountMeta> ReadableAccount for TieredReadableAccount<'a, M> /// Returns the data associated to this account. fn data(&self) -> &'a [u8] { - self.meta.account_data(self.account_block) + self.data() } } diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 87aa91c6f7f402..e5e8e1cbbf2a87 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -179,9 +179,8 @@ impl VoteAccounts { if stake == 0u64 { return; } - let staked_nodes = match self.staked_nodes.get_mut() { - None => return, - Some(staked_nodes) => staked_nodes, + let Some(staked_nodes) = self.staked_nodes.get_mut() else { + return; }; if let Some(node_pubkey) = vote_account.node_pubkey() { Arc::make_mut(staked_nodes) @@ -195,9 +194,8 @@ impl VoteAccounts { if stake == 0u64 { return; } - let staked_nodes = match self.staked_nodes.get_mut() { - None => return, - Some(staked_nodes) => staked_nodes, + let Some(staked_nodes) = self.staked_nodes.get_mut() else { + return; }; if let Some(node_pubkey) = vote_account.node_pubkey() { match Arc::make_mut(staked_nodes).entry(node_pubkey) { diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 7397e1642c7a0d..b91f82588af78e 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -339,9 +339,8 @@ fn postprocess_dump(program_dump: &Path) { let mut rel: HashMap = HashMap::new(); let mut name = String::from(""); let mut state = 0; - let file = match File::open(program_dump) { - Ok(x) => x, - _ => return, + let Ok(file) = File::open(program_dump) else { + return; }; for line_result in BufReader::new(file).lines() { let line = line_result.unwrap(); @@ -371,14 +370,12 @@ fn postprocess_dump(program_dump: &Path) { } } } - let file = match File::create(&postprocessed_dump) { - Ok(x) => x, - _ => return, + let Ok(file) = File::create(&postprocessed_dump) else { + return; }; let mut out = BufWriter::new(file); - let file = match File::open(program_dump) { - Ok(x) => x, - _ => return, + let Ok(file) = File::open(program_dump) else { + return; }; let mut pc = 0u64; let mut step = 0u64; @@ -426,9 +423,8 @@ fn postprocess_dump(program_dump: &Path) { // not known to the runtime and warn about them if any. fn check_undefined_symbols(config: &Config, program: &Path) { let syscalls_txt = config.sbf_sdk.join("syscalls.txt"); - let file = match File::open(syscalls_txt) { - Ok(x) => x, - _ => return, + let Ok(file) = File::open(syscalls_txt) else { + return; }; let mut syscalls = HashSet::new(); for line_result in BufReader::new(file).lines() { diff --git a/sdk/gen-headers/src/main.rs b/sdk/gen-headers/src/main.rs index 9d81438f14e705..d40143bdc74981 100644 --- a/sdk/gen-headers/src/main.rs +++ b/sdk/gen-headers/src/main.rs @@ -44,17 +44,14 @@ fn main() { */ fn transform(inc: &PathBuf) { let inc_path = PathBuf::from(inc); - let filename = match inc_path.file_name() { - Some(f) => f, - None => return, + let Some(filename) = inc_path.file_name() else { + return; }; - let parent = match inc_path.parent() { - Some(f) => f, - None => return, + let Some(parent) = inc_path.parent() else { + return; }; - let parent = match parent.parent() { - Some(f) => f, - None => return, + let Some(parent) = parent.parent() else { + return; }; let mut header_path = PathBuf::from(parent); let mut filename = PathBuf::from(filename); diff --git a/sdk/program/src/serde_varint.rs b/sdk/program/src/serde_varint.rs index 6476a1f0cfd24d..40872be07fc787 100644 --- a/sdk/program/src/serde_varint.rs +++ b/sdk/program/src/serde_varint.rs @@ -73,10 +73,9 @@ macro_rules! impl_var_int { let mut out = 0; let mut shift = 0u32; while shift < <$type>::BITS { - let byte = match seq.next_element::()? { - None => return Err(A::Error::custom("Invalid Sequence")), - Some(byte) => byte, - }; + let Some(byte) = seq.next_element::()? else { + return Err(A::Error::custom("Invalid Sequence")); + }; out |= ((byte & 0x7F) as Self) << shift; if byte & 0x80 == 0 { // Last byte should not have been truncated when it was diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 3e39d2f4df9d3c..45e1f19c30bb80 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -686,15 +686,11 @@ pub mod serde_compact_vote_state_update { let lockout_offsets = vote_state_update.lockouts.iter().scan( vote_state_update.root.unwrap_or_default(), |slot, lockout| { - let offset = match lockout.slot().checked_sub(*slot) { - None => return Some(Err(serde::ser::Error::custom("Invalid vote lockout"))), - Some(offset) => offset, + let Some(offset) = lockout.slot().checked_sub(*slot) else { + return Some(Err(serde::ser::Error::custom("Invalid vote lockout"))); }; - let confirmation_count = match u8::try_from(lockout.confirmation_count()) { - Ok(confirmation_count) => confirmation_count, - Err(_) => { - return Some(Err(serde::ser::Error::custom("Invalid confirmation count"))) - } + let Ok(confirmation_count) = u8::try_from(lockout.confirmation_count()) else { + return Some(Err(serde::ser::Error::custom("Invalid confirmation count"))); }; let lockout_offset = LockoutOffset { offset, diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 0657a5b17637b8..4d1bbf3e755b26 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -560,12 +560,11 @@ impl AccountSharedData { } pub fn set_data_from_slice(&mut self, new_data: &[u8]) { - let data = match Arc::get_mut(&mut self.data) { - // The buffer isn't shared, so we're going to memcpy in place. - Some(data) => data, + // If the buffer isn't shared, we're going to memcpy in place. + let Some(data) = Arc::get_mut(&mut self.data) else { // If the buffer is shared, the cheapest thing to do is to clone the // incoming slice and replace the buffer. - None => return self.set_data(new_data.to_vec()), + return self.set_data(new_data.to_vec()); }; let new_len = new_data.len(); diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 02614bbd14f8f4..2f5b9471bb3032 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -796,9 +796,8 @@ async fn handle_chunk( stats.total_invalid_chunks.fetch_add(1, Ordering::Relaxed); return true; } - let end_of_chunk = match chunk.offset.checked_add(chunk_len) { - Some(end) => end, - None => return true, + let Some(end_of_chunk) = chunk.offset.checked_add(chunk_len) else { + return true; }; if end_of_chunk > PACKET_DATA_SIZE as u64 { stats @@ -819,10 +818,9 @@ async fn handle_chunk( if let Some(accum) = packet_accum.as_mut() { let offset = chunk.offset; - let end_of_chunk = match (chunk.offset as usize).checked_add(chunk.bytes.len()) - { - Some(end) => end, - None => return true, + let Some(end_of_chunk) = (chunk.offset as usize).checked_add(chunk.bytes.len()) + else { + return true; }; accum.chunks.push(PacketChunk { bytes: chunk.bytes, diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 29868bb0198e4e..56c3d5bd02cf9d 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -367,9 +367,8 @@ impl TestValidatorGenesis { accounts: &[AccountInfo], ) -> Result<&mut Self, String> { for account in accounts { - let account_path = match solana_program_test::find_file(account.filename) { - Some(path) => path, - None => return Err(format!("Unable to locate {}", account.filename)), + let Some(account_path) = solana_program_test::find_file(account.filename) else { + return Err(format!("Unable to locate {}", account.filename)); }; let mut file = File::open(&account_path).unwrap(); let mut account_info_raw = String::new(); diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 9807959fd0c545..1cddfcef4854fb 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -23,7 +23,6 @@ rand_chacha = { workspace = true } rayon = { workspace = true } rcgen = { workspace = true } rustls = { workspace = true } -solana-client = { workspace = true } solana-entry = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } diff --git a/turbine/benches/cluster_info.rs b/turbine/benches/cluster_info.rs index cde1dacd44850c..f10c2151bb6687 100644 --- a/turbine/benches/cluster_info.rs +++ b/turbine/benches/cluster_info.rs @@ -12,14 +12,13 @@ use { genesis_utils::{create_genesis_config, GenesisConfigInfo}, shred::{Shred, ShredFlags}, }, - solana_quic_client::new_quic_connection_cache, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ pubkey, signature::{Keypair, Signer}, timing::{timestamp, AtomicInterval}, }, - solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_streamer::socket::SocketAddrSpace, solana_turbine::{ broadcast_stage::{ broadcast_metrics::TransmitShredsStats, broadcast_shreds, BroadcastStage, @@ -28,7 +27,7 @@ use { }, std::{ collections::HashMap, - net::{IpAddr, Ipv4Addr, UdpSocket}, + net::UdpSocket, sync::{Arc, RwLock}, time::Duration, }, @@ -39,14 +38,8 @@ use { fn broadcast_shreds_bench(bencher: &mut Bencher) { solana_logger::setup(); let leader_keypair = Arc::new(Keypair::new()); - let quic_connection_cache = new_quic_connection_cache( - "connection_cache_test", - &leader_keypair, - IpAddr::V4(Ipv4Addr::LOCALHOST), - &Arc::>::default(), - 4, // connection_pool_size - ) - .unwrap(); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); let leader_info = Node::new_localhost_with_pubkey(&leader_keypair.pubkey()); let cluster_info = ClusterInfo::new( leader_info.info, @@ -82,12 +75,12 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { &socket, &shreds, &cluster_nodes_cache, - &quic_connection_cache, &last_datapoint, &mut TransmitShredsStats::default(), &cluster_info, &bank_forks, &SocketAddrSpace::Unspecified, + &quic_endpoint_sender, ) .unwrap(); }); diff --git a/turbine/benches/retransmit_stage.rs b/turbine/benches/retransmit_stage.rs index 5907b18ec639f8..b0dd67db8225ec 100644 --- a/turbine/benches/retransmit_stage.rs +++ b/turbine/benches/retransmit_stage.rs @@ -25,11 +25,11 @@ use { system_transaction, timing::timestamp, }, - solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_streamer::socket::SocketAddrSpace, solana_turbine::retransmit_stage::retransmitter, std::{ iter::repeat_with, - net::{IpAddr, Ipv4Addr, UdpSocket}, + net::{Ipv4Addr, UdpSocket}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, RwLock, @@ -97,16 +97,8 @@ fn bench_retransmitter(bencher: &mut Bencher) { .collect(); let keypair = Keypair::new(); - let quic_connection_cache = Arc::new( - solana_quic_client::new_quic_connection_cache( - "connection_cache_test", - &keypair, - IpAddr::V4(Ipv4Addr::LOCALHOST), - &Arc::>::default(), - 4, // connection_pool_size - ) - .unwrap(), - ); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); let slot = 0; let parent = 0; let shredder = Shredder::new(slot, parent, 0, 0).unwrap(); @@ -125,7 +117,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { let retransmitter_handles = retransmitter( Arc::new(sockets), - quic_connection_cache, + quic_endpoint_sender, bank_forks, leader_schedule_cache, cluster_info, diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index 116f488859c197..90a112c24ea011 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -9,9 +9,9 @@ use { standard_broadcast_run::StandardBroadcastRun, }, crate::cluster_nodes::{self, ClusterNodes, ClusterNodesCache}, + bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender}, itertools::{Either, Itertools}, - solana_client::tpu_connection::TpuConnection, solana_gossip::{ cluster_info::{ClusterInfo, ClusterInfoError}, contact_info::Protocol, @@ -20,7 +20,6 @@ use { solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, solana_poh::poh_recorder::WorkingBankEntry, - solana_quic_client::QuicConnectionCache, solana_runtime::bank_forks::BankForks, solana_sdk::{ clock::Slot, @@ -35,7 +34,7 @@ use { std::{ collections::{HashMap, HashSet}, iter::repeat_with, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, RwLock, @@ -44,6 +43,7 @@ use { time::{Duration, Instant}, }, thiserror::Error, + tokio::sync::mpsc::Sender as AsyncSender, }; pub mod broadcast_duplicates_run; @@ -107,7 +107,7 @@ impl BroadcastStageType { blockstore: Arc, bank_forks: Arc>, shred_version: u16, - quic_connection_cache: Arc, + quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, ) -> BroadcastStage { match self { BroadcastStageType::Standard => BroadcastStage::new( @@ -118,7 +118,8 @@ impl BroadcastStageType { exit_sender, blockstore, bank_forks, - StandardBroadcastRun::new(shred_version, quic_connection_cache), + quic_endpoint_sender, + StandardBroadcastRun::new(shred_version), ), BroadcastStageType::FailEntryVerification => BroadcastStage::new( @@ -129,7 +130,8 @@ impl BroadcastStageType { exit_sender, blockstore, bank_forks, - FailEntryVerificationBroadcastRun::new(shred_version, quic_connection_cache), + quic_endpoint_sender, + FailEntryVerificationBroadcastRun::new(shred_version), ), BroadcastStageType::BroadcastFakeShreds => BroadcastStage::new( @@ -140,6 +142,7 @@ impl BroadcastStageType { exit_sender, blockstore, bank_forks, + quic_endpoint_sender, BroadcastFakeShredsRun::new(0, shred_version), ), @@ -151,6 +154,7 @@ impl BroadcastStageType { exit_sender, blockstore, bank_forks, + quic_endpoint_sender, BroadcastDuplicatesRun::new(shred_version, config.clone()), ), } @@ -172,6 +176,7 @@ trait BroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()>; fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()>; } @@ -265,6 +270,7 @@ impl BroadcastStage { exit: Arc, blockstore: Arc, bank_forks: Arc>, + quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone, ) -> Self { let (socket_sender, socket_receiver) = unbounded(); @@ -296,8 +302,15 @@ impl BroadcastStage { let mut bs_transmit = broadcast_stage_run.clone(); let cluster_info = cluster_info.clone(); let bank_forks = bank_forks.clone(); + let quic_endpoint_sender = quic_endpoint_sender.clone(); let run_transmit = move || loop { - let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks); + let res = bs_transmit.transmit( + &socket_receiver, + &cluster_info, + &sock, + &bank_forks, + &quic_endpoint_sender, + ); let res = Self::handle_error(res, "solana-broadcaster-transmit"); if let Some(res) = res { return res; @@ -411,12 +424,12 @@ pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], cluster_nodes_cache: &ClusterNodesCache, - quic_connection_cache: &QuicConnectionCache, last_datapoint_submit: &AtomicInterval, transmit_stats: &mut TransmitShredsStats, cluster_info: &ClusterInfo, bank_forks: &RwLock, socket_addr_space: &SocketAddrSpace, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { let mut result = Ok(()); let mut shred_select = Measure::start("shred_select"); @@ -459,14 +472,14 @@ pub fn broadcast_shreds( } send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); - for (shred, addr) in &quic_packets { - let conn = quic_connection_cache.get_connection(addr); - if let Err(err) = conn.send_data(shred) { + transmit_stats.total_packets += packets.len() + quic_packets.len(); + for (shred, addr) in quic_packets { + let shred = Bytes::from(shred.clone()); + if let Err(err) = quic_endpoint_sender.blocking_send((addr, shred)) { transmit_stats.dropped_packets_quic += 1; result = Err(Error::from(err)); } } - transmit_stats.total_packets += packets.len() + quic_packets.len(); result } @@ -476,6 +489,12 @@ impl From> for Error { } } +impl From> for Error { + fn from(_: tokio::sync::mpsc::error::SendError) -> Error { + Error::Send + } +} + #[cfg(test)] pub mod test { use { @@ -494,9 +513,7 @@ pub mod test { hash::Hash, signature::{Keypair, Signer}, }, - solana_streamer::streamer::StakedNodes, std::{ - net::{IpAddr, Ipv4Addr}, path::Path, sync::{atomic::AtomicBool, Arc}, thread::sleep, @@ -628,16 +645,8 @@ pub mod test { ) -> MockBroadcastStage { // Make the database ledger let blockstore = Arc::new(Blockstore::open(ledger_path).unwrap()); - let quic_connection_cache = Arc::new( - solana_quic_client::new_quic_connection_cache( - "connection_cache_test", - &leader_keypair, - IpAddr::V4(Ipv4Addr::LOCALHOST), - &Arc::>::default(), - 4, // connection_pool_size - ) - .unwrap(), - ); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); // Make the leader node and scheduler let leader_info = Node::new_localhost_with_pubkey(&leader_keypair.pubkey()); @@ -671,7 +680,8 @@ pub mod test { exit_sender, blockstore.clone(), bank_forks, - StandardBroadcastRun::new(0, quic_connection_cache), + quic_endpoint_sender, + StandardBroadcastRun::new(0), ); MockBroadcastStage { diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 61f3a9b0f487f6..4ced9739c83f86 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -265,6 +265,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + _quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { let (shreds, _) = receiver.recv()?; if shreds.is_empty() { diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index 05c48de50583af..1464d46493d730 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -132,6 +132,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { cluster_info: &ClusterInfo, sock: &UdpSocket, _bank_forks: &RwLock, + _quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { for (data_shreds, batch_info) in receiver { let fake = batch_info.is_some(); diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index fe4c28b6ade198..66cf942da6a635 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -42,9 +42,8 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result // Drain channel while last_tick_height != bank.max_tick_height() { - let (try_bank, (entry, tick_height)) = match receiver.try_recv() { - Ok(working_bank_entry) => working_bank_entry, - Err(_) => break, + let Ok((try_bank, (entry, tick_height))) = receiver.try_recv() else { + break; }; // If the bank changed, that implies the previous slot was interrupted and we do not have to // broadcast its entries. @@ -65,11 +64,11 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result while last_tick_height != bank.max_tick_height() && serialized_batch_byte_count < target_serialized_batch_byte_count { - let (try_bank, (entry, tick_height)) = - match receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) { - Ok(working_bank_entry) => working_bank_entry, - Err(_) => break, - }; + let Ok((try_bank, (entry, tick_height))) = + receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) + else { + break; + }; // If the bank changed, that implies the previous slot was interrupted and we do not have to // broadcast its entries. if try_bank.slot() != bank.slot() { diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 85500b1dd753b9..1dda981e693218 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -4,6 +4,7 @@ use { solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, std::{thread::sleep, time::Duration}, + tokio::sync::mpsc::Sender as AsyncSender, }; pub const NUM_BAD_SLOTS: u64 = 10; @@ -17,12 +18,11 @@ pub(super) struct FailEntryVerificationBroadcastRun { next_shred_index: u32, next_code_index: u32, cluster_nodes_cache: Arc>, - quic_connection_cache: Arc, reed_solomon_cache: Arc, } impl FailEntryVerificationBroadcastRun { - pub(super) fn new(shred_version: u16, quic_connection_cache: Arc) -> Self { + pub(super) fn new(shred_version: u16) -> Self { let cluster_nodes_cache = Arc::new(ClusterNodesCache::::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, CLUSTER_NODES_CACHE_TTL, @@ -34,7 +34,6 @@ impl FailEntryVerificationBroadcastRun { next_shred_index: 0, next_code_index: 0, cluster_nodes_cache, - quic_connection_cache, reed_solomon_cache: Arc::::default(), } } @@ -164,18 +163,19 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { let (shreds, _) = receiver.recv()?; broadcast_shreds( sock, &shreds, &self.cluster_nodes_cache, - &self.quic_connection_cache, &AtomicInterval::default(), &mut TransmitShredsStats::default(), cluster_info, bank_forks, cluster_info.socket_addr_space(), + quic_endpoint_sender, ) } fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()> { diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 34dd8c36b28082..592d6e699d1d9c 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -19,6 +19,7 @@ use { timing::{duration_as_us, AtomicInterval}, }, std::{sync::RwLock, time::Duration}, + tokio::sync::mpsc::Sender as AsyncSender, }; #[derive(Clone)] @@ -33,7 +34,6 @@ pub struct StandardBroadcastRun { last_datapoint_submit: Arc, num_batches: usize, cluster_nodes_cache: Arc>, - quic_connection_cache: Arc, reed_solomon_cache: Arc, } @@ -43,7 +43,7 @@ enum BroadcastError { } impl StandardBroadcastRun { - pub(super) fn new(shred_version: u16, quic_connection_cache: Arc) -> Self { + pub(super) fn new(shred_version: u16) -> Self { let cluster_nodes_cache = Arc::new(ClusterNodesCache::::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, CLUSTER_NODES_CACHE_TTL, @@ -59,7 +59,6 @@ impl StandardBroadcastRun { last_datapoint_submit: Arc::default(), num_batches: 0, cluster_nodes_cache, - quic_connection_cache, reed_solomon_cache: Arc::::default(), } } @@ -195,15 +194,16 @@ impl StandardBroadcastRun { blockstore: &Blockstore, receive_results: ReceiveResults, bank_forks: &RwLock, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { let (bsend, brecv) = unbounded(); let (ssend, srecv) = unbounded(); self.process_receive_results(keypair, blockstore, &ssend, &bsend, receive_results)?; //data - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks); + let _ = self.transmit(&srecv, cluster_info, sock, bank_forks, quic_endpoint_sender); let _ = self.record(&brecv, blockstore); //coding - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks); + let _ = self.transmit(&srecv, cluster_info, sock, bank_forks, quic_endpoint_sender); let _ = self.record(&brecv, blockstore); Ok(()) } @@ -402,6 +402,7 @@ impl StandardBroadcastRun { shreds: Arc>, broadcast_shred_batch_info: Option, bank_forks: &RwLock, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { trace!("Broadcasting {:?} shreds", shreds.len()); let mut transmit_stats = TransmitShredsStats::default(); @@ -412,12 +413,12 @@ impl StandardBroadcastRun { sock, &shreds, &self.cluster_nodes_cache, - &self.quic_connection_cache, &self.last_datapoint_submit, &mut transmit_stats, cluster_info, bank_forks, cluster_info.socket_addr_space(), + quic_endpoint_sender, )?; transmit_time.stop(); @@ -487,9 +488,17 @@ impl BroadcastRun for StandardBroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, ) -> Result<()> { let (shreds, batch_info) = receiver.recv()?; - self.broadcast(sock, cluster_info, shreds, batch_info, bank_forks) + self.broadcast( + sock, + cluster_info, + shreds, + batch_info, + bank_forks, + quic_endpoint_sender, + ) } fn record(&mut self, receiver: &RecordReceiver, blockstore: &Blockstore) -> Result<()> { let (shreds, slot_start_ts) = receiver.recv()?; @@ -520,13 +529,8 @@ mod test { genesis_config::GenesisConfig, signature::{Keypair, Signer}, }, - solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, - std::{ - net::{IpAddr, Ipv4Addr}, - ops::Deref, - sync::Arc, - time::Duration, - }, + solana_streamer::socket::SocketAddrSpace, + std::{ops::Deref, sync::Arc, time::Duration}, }; #[allow(clippy::type_complexity)] @@ -572,24 +576,10 @@ mod test { ) } - fn new_quic_connection_cache(keypair: &Keypair) -> Arc { - Arc::new( - solana_quic_client::new_quic_connection_cache( - "connection_cache_test", - keypair, - IpAddr::V4(Ipv4Addr::LOCALHOST), - &Arc::>::default(), - 4, // connection_pool_size - ) - .unwrap(), - ) - } - #[test] fn test_interrupted_slot_last_shred() { let keypair = Arc::new(Keypair::new()); - let quic_connection_cache = new_quic_connection_cache(&keypair); - let mut run = StandardBroadcastRun::new(0, quic_connection_cache); + let mut run = StandardBroadcastRun::new(0); // Set up the slot to be interrupted let next_shred_index = 10; @@ -631,7 +621,8 @@ mod test { let num_shreds_per_slot = 2; let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket, bank_forks) = setup(num_shreds_per_slot); - let quic_connection_cache = new_quic_connection_cache(&leader_keypair); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); // Insert 1 less than the number of ticks needed to finish the slot let ticks0 = create_ticks(genesis_config.ticks_per_slot - 1, 0, genesis_config.hash()); @@ -644,7 +635,7 @@ mod test { }; // Step 1: Make an incomplete transmission for slot 0 - let mut standard_broadcast_run = StandardBroadcastRun::new(0, quic_connection_cache); + let mut standard_broadcast_run = StandardBroadcastRun::new(0); standard_broadcast_run .test_process_receive_results( &leader_keypair, @@ -653,6 +644,7 @@ mod test { &blockstore, receive_results, &bank_forks, + &quic_endpoint_sender, ) .unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); @@ -719,6 +711,7 @@ mod test { &blockstore, receive_results, &bank_forks, + &quic_endpoint_sender, ) .unwrap(); let unfinished_slot = standard_broadcast_run.unfinished_slot.as_ref().unwrap(); @@ -762,11 +755,10 @@ mod test { let num_shreds_per_slot = 2; let (blockstore, genesis_config, _cluster_info, bank, leader_keypair, _socket, _bank_forks) = setup(num_shreds_per_slot); - let quic_connection_cache = new_quic_connection_cache(&leader_keypair); let (bsend, brecv) = unbounded(); let (ssend, _srecv) = unbounded(); let mut last_tick_height = 0; - let mut standard_broadcast_run = StandardBroadcastRun::new(0, quic_connection_cache); + let mut standard_broadcast_run = StandardBroadcastRun::new(0); let mut process_ticks = |num_ticks| { let ticks = create_ticks(num_ticks, 0, genesis_config.hash()); last_tick_height += (ticks.len() - 1) as u64; @@ -811,7 +803,8 @@ mod test { let num_shreds_per_slot = 2; let (blockstore, genesis_config, cluster_info, bank0, leader_keypair, socket, bank_forks) = setup(num_shreds_per_slot); - let quic_connection_cache = new_quic_connection_cache(&leader_keypair); + let (quic_endpoint_sender, _quic_endpoint_receiver) = + tokio::sync::mpsc::channel(/*capacity:*/ 128); // Insert complete slot of ticks needed to finish the slot let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash()); @@ -823,7 +816,7 @@ mod test { last_tick_height: ticks.len() as u64, }; - let mut standard_broadcast_run = StandardBroadcastRun::new(0, quic_connection_cache); + let mut standard_broadcast_run = StandardBroadcastRun::new(0); standard_broadcast_run .test_process_receive_results( &leader_keypair, @@ -832,6 +825,7 @@ mod test { &blockstore, receive_results, &bank_forks, + &quic_endpoint_sender, ) .unwrap(); assert!(standard_broadcast_run.unfinished_slot.is_none()) @@ -841,8 +835,7 @@ mod test { fn entries_to_shreds_max() { solana_logger::setup(); let keypair = Keypair::new(); - let quic_connection_cache = new_quic_connection_cache(&keypair); - let mut bs = StandardBroadcastRun::new(0, quic_connection_cache); + let mut bs = StandardBroadcastRun::new(0); bs.current_slot_and_parent = Some((1, 0)); let entries = create_ticks(10_000, 1, solana_sdk::hash::Hash::default()); diff --git a/turbine/src/quic_endpoint.rs b/turbine/src/quic_endpoint.rs index 513d2c9acf2918..a4b753be8db7d3 100644 --- a/turbine/src/quic_endpoint.rs +++ b/turbine/src/quic_endpoint.rs @@ -23,7 +23,6 @@ use { }, thiserror::Error, tokio::{ - runtime::Runtime, sync::{ mpsc::{Receiver as AsyncReceiver, Sender as AsyncSender}, RwLock, @@ -47,6 +46,7 @@ const CONNECTION_CLOSE_REASON_DROPPED: &[u8] = b"DROPPED"; const CONNECTION_CLOSE_REASON_INVALID_IDENTITY: &[u8] = b"INVALID_IDENTITY"; const CONNECTION_CLOSE_REASON_REPLACED: &[u8] = b"REPLACED"; +pub type AsyncTryJoinHandle = TryJoin, JoinHandle<()>>; type ConnectionCache = HashMap<(SocketAddr, Option), Arc>>>; #[derive(Error, Debug)] @@ -71,7 +71,7 @@ pub enum Error { #[allow(clippy::type_complexity)] pub fn new_quic_endpoint( - runtime: &Runtime, + runtime: &tokio::runtime::Handle, keypair: &Keypair, socket: UdpSocket, address: IpAddr, @@ -80,7 +80,7 @@ pub fn new_quic_endpoint( ( Endpoint, AsyncSender<(SocketAddr, Bytes)>, - TryJoin, JoinHandle<()>>, + AsyncTryJoinHandle, ), Error, > { @@ -156,6 +156,7 @@ async fn run_server( ) { while let Some(connecting) = endpoint.accept().await { tokio::task::spawn(handle_connecting_error( + endpoint.clone(), connecting, sender.clone(), cache.clone(), @@ -182,16 +183,18 @@ async fn run_client( } async fn handle_connecting_error( + endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, cache: Arc>, ) { - if let Err(err) = handle_connecting(connecting, sender, cache).await { + if let Err(err) = handle_connecting(endpoint, connecting, sender, cache).await { error!("handle_connecting: {err:?}"); } } async fn handle_connecting( + endpoint: Endpoint, connecting: Connecting, sender: Sender<(Pubkey, SocketAddr, Bytes)>, cache: Arc>, @@ -199,11 +202,20 @@ async fn handle_connecting( let connection = connecting.await?; let remote_address = connection.remote_address(); let remote_pubkey = get_remote_pubkey(&connection)?; - handle_connection_error(remote_address, remote_pubkey, connection, sender, cache).await; + handle_connection_error( + endpoint, + remote_address, + remote_pubkey, + connection, + sender, + cache, + ) + .await; Ok(()) } async fn handle_connection_error( + endpoint: Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: Connection, @@ -211,23 +223,37 @@ async fn handle_connection_error( cache: Arc>, ) { cache_connection(remote_address, remote_pubkey, connection.clone(), &cache).await; - if let Err(err) = handle_connection(remote_address, remote_pubkey, &connection, sender).await { + if let Err(err) = handle_connection( + &endpoint, + remote_address, + remote_pubkey, + &connection, + &sender, + ) + .await + { drop_connection(remote_address, remote_pubkey, &connection, &cache).await; error!("handle_connection: {remote_pubkey}, {remote_address}, {err:?}"); } } async fn handle_connection( + endpoint: &Endpoint, remote_address: SocketAddr, remote_pubkey: Pubkey, connection: &Connection, - sender: Sender<(Pubkey, SocketAddr, Bytes)>, + sender: &Sender<(Pubkey, SocketAddr, Bytes)>, ) -> Result<(), Error> { // Assert that send won't block. debug_assert_eq!(sender.capacity(), None); loop { match connection.read_datagram().await { - Ok(bytes) => sender.send((remote_pubkey, remote_address, bytes))?, + Ok(bytes) => { + if let Err(err) = sender.send((remote_pubkey, remote_address, bytes)) { + close_quic_endpoint(endpoint); + return Err(Error::from(err)); + } + } Err(err) => { if let Some(err) = connection.close_reason() { return Err(Error::from(err)); @@ -293,6 +319,7 @@ async fn get_connection( entry.insert(connection).clone() }; tokio::task::spawn(handle_connection_error( + endpoint.clone(), connection.remote_address(), get_remote_pubkey(&connection)?, connection.clone(), @@ -404,7 +431,7 @@ mod tests { multiunzip(keypairs.iter().zip(sockets).zip(senders).map( |((keypair, socket), sender)| { new_quic_endpoint( - &runtime, + runtime.handle(), keypair, socket, IpAddr::V4(Ipv4Addr::LOCALHOST), diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index d2e751298bc48a..c4c7a751ab24ce 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -3,12 +3,12 @@ use { crate::cluster_nodes::{self, ClusterNodes, ClusterNodesCache, Error, MAX_NUM_TURBINE_HOPS}, + bytes::Bytes, crossbeam_channel::{Receiver, RecvTimeoutError}, itertools::{izip, Itertools}, lru::LruCache, rand::Rng, rayon::{prelude::*, ThreadPool, ThreadPoolBuilder}, - solana_client::tpu_connection::TpuConnection, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, solana_ledger::{ leader_schedule_cache::LeaderScheduleCache, @@ -16,7 +16,6 @@ use { }, solana_measure::measure::Measure, solana_perf::deduper::Deduper, - solana_quic_client::QuicConnectionCache, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_rpc_client_api::response::SlotUpdate, @@ -29,7 +28,7 @@ use { std::{ collections::HashMap, iter::repeat, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, ops::AddAssign, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -38,6 +37,7 @@ use { thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, + tokio::sync::mpsc::Sender as AsyncSender, }; const MAX_DUPLICATE_COUNT: usize = 2; @@ -173,7 +173,7 @@ fn retransmit( cluster_info: &ClusterInfo, shreds_receiver: &Receiver>>, sockets: &[UdpSocket], - quic_connection_cache: &QuicConnectionCache, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, stats: &mut RetransmitStats, cluster_nodes_cache: &ClusterNodesCache, shred_deduper: &mut ShredDeduper<2>, @@ -224,13 +224,10 @@ fn retransmit( // and if the leader is unknown they should fail signature check. // So here we should expect to know the slot leader and otherwise // skip the shred. - let slot_leader = match leader_schedule_cache.slot_leader_at(slot, Some(&working_bank)) - { - Some(pubkey) => pubkey, - None => { - stats.unknown_shred_slot_leader += shreds.len(); - return None; - } + let Some(slot_leader) = leader_schedule_cache.slot_leader_at(slot, Some(&working_bank)) + else { + stats.unknown_shred_slot_leader += shreds.len(); + return None; }; let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); @@ -260,7 +257,7 @@ fn retransmit( &cluster_nodes, socket_addr_space, &sockets[index % sockets.len()], - quic_connection_cache, + quic_endpoint_sender, stats, ) .map_err(|err| { @@ -285,7 +282,7 @@ fn retransmit( &cluster_nodes, socket_addr_space, &sockets[index % sockets.len()], - quic_connection_cache, + quic_endpoint_sender, stats, ) .map_err(|err| { @@ -314,7 +311,7 @@ fn retransmit_shred( cluster_nodes: &ClusterNodes, socket_addr_space: &SocketAddrSpace, socket: &UdpSocket, - quic_connection_cache: &QuicConnectionCache, + quic_endpoint_sender: &AsyncSender<(SocketAddr, Bytes)>, stats: &RetransmitStats, ) -> Result<(/*root_distance:*/ usize, /*num_nodes:*/ usize), Error> { let mut compute_turbine_peers = Measure::start("turbine_start"); @@ -331,16 +328,15 @@ fn retransmit_shred( .fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed); let mut retransmit_time = Measure::start("retransmit_to"); + let num_addrs = addrs.len(); let num_nodes = match cluster_nodes::get_broadcast_protocol(key) { - Protocol::QUIC => addrs - .iter() - .filter_map(|addr| { - quic_connection_cache - .get_connection(addr) - .send_data(shred) - .ok() - }) - .count(), + Protocol::QUIC => { + let shred = Bytes::copy_from_slice(shred); + addrs + .into_iter() + .filter_map(|addr| quic_endpoint_sender.try_send((addr, shred.clone())).ok()) + .count() + } Protocol::UDP => match multi_target_send(socket, shred, &addrs) { Ok(()) => addrs.len(), Err(SendPktsError::IoError(ioerr, num_failed)) => { @@ -357,7 +353,7 @@ fn retransmit_shred( retransmit_time.stop(); stats .num_addrs_failed - .fetch_add(addrs.len() - num_nodes, Ordering::Relaxed); + .fetch_add(num_addrs - num_nodes, Ordering::Relaxed); stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed); stats .retransmit_total @@ -375,7 +371,7 @@ fn retransmit_shred( /// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes. pub fn retransmitter( sockets: Arc>, - quic_connection_cache: Arc, + quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, bank_forks: Arc>, leader_schedule_cache: Arc, cluster_info: Arc, @@ -407,7 +403,7 @@ pub fn retransmitter( &cluster_info, &shreds_receiver, &sockets, - &quic_connection_cache, + &quic_endpoint_sender, &mut stats, &cluster_nodes_cache, &mut shred_deduper, @@ -432,14 +428,14 @@ impl RetransmitStage { leader_schedule_cache: Arc, cluster_info: Arc, retransmit_sockets: Arc>, - quic_connection_cache: Arc, + quic_endpoint_sender: AsyncSender<(SocketAddr, Bytes)>, retransmit_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, ) -> Self { let retransmit_thread_handle = retransmitter( retransmit_sockets, - quic_connection_cache, + quic_endpoint_sender, bank_forks, leader_schedule_cache, cluster_info, diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index d93798e998ef54..f6df5693c0260d 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -71,14 +71,13 @@ impl Dashboard { let progress_bar = new_spinner_progress_bar(); progress_bar.set_message("Connecting..."); - let (rpc_addr, start_time) = match runtime.block_on(wait_for_validator_startup( + let Some((rpc_addr, start_time)) = runtime.block_on(wait_for_validator_startup( &ledger_path, &exit, progress_bar, refresh_interval, - )) { - None => continue, - Some(results) => results, + )) else { + continue; }; let rpc_client = RpcClient::new_socket(rpc_addr); diff --git a/validator/src/main.rs b/validator/src/main.rs index 2255ea9c0e1f0a..fb4794757b0688 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -125,9 +125,8 @@ fn wait_for_restart_window( .block_on(async move { admin_client.await?.rpc_addr().await }) .map_err(|err| format!("Unable to get validator RPC address: {err}"))?; - let rpc_client = match rpc_addr { - None => return Err("RPC not available".into()), - Some(rpc_addr) => RpcClient::new_socket(rpc_addr), + let Some(rpc_client) = rpc_addr.map(RpcClient::new_socket) else { + return Err("RPC not available".into()); }; let my_identity = rpc_client.get_identity()?;