diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 49fe4e463..c9aa2a267 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -128,6 +128,9 @@ jobs: - name: Run cargo doc run: cargo doc --release --no-deps --workspace + - name: Run kip-10 example + run: cargo run --example kip-10 + # test-release: # name: Test Suite Release @@ -210,7 +213,7 @@ jobs: run: cargo fmt --all -- --check - name: Run cargo clippy - run: cargo clippy --workspace --tests --benches -- -D warnings + run: cargo clippy --workspace --tests --benches --examples -- -D warnings check-wasm32: diff --git a/Cargo.lock b/Cargo.lock index 1296a8922..a951993e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,24 +11,18 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -42,7 +36,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -63,7 +57,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.15", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -78,17 +72,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex 0.2.4", - "num-traits", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -170,18 +153,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" - -[[package]] -name = "approx" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -203,9 +177,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -248,14 +222,14 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", - "futures-lite 2.3.0", + "fastrand", + "futures-lite", "slab", ] @@ -267,61 +241,32 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io 2.3.4", - "async-lock 3.4.0", + "async-io", + "async-lock", "blocking", - "futures-lite 2.3.0", + "futures-lite", "once_cell", ] -[[package]] -name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if 1.0.0", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.4.0", + "async-lock", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "parking", - "polling 3.7.3", - "rustix 0.38.34", + "polling", + "rustix", "slab", "tracing", "windows-sys 0.59.0", ] -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", -] - [[package]] name = "async-lock" version = "3.4.0" @@ -335,20 +280,20 @@ dependencies = [ [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite", "gloo-timers", "kv-log-macro", "log", @@ -362,9 +307,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -373,13 +318,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -390,13 +335,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -429,24 +374,23 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "itoa", "matchit", "memchr", @@ -455,50 +399,47 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 0.1.2", - "tower", + "sync_wrapper 1.0.1", + "tower 0.5.1", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.1" @@ -560,7 +501,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -601,7 +542,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -613,7 +554,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "piper", ] @@ -637,7 +578,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "syn_derive", ] @@ -665,9 +606,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2-sys" @@ -720,9 +661,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.13" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -888,9 +829,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -898,9 +839,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -910,14 +851,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -966,9 +907,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -1009,9 +950,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1034,7 +975,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.16", + "clap 4.5.19", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1130,8 +1071,8 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", + "generic-array", + "rand_core", "typenum", ] @@ -1159,7 +1100,7 @@ dependencies = [ "aead", "chacha20", "cipher", - "generic-array 0.14.7", + "generic-array", "poly1305", "salsa20", "subtle", @@ -1199,7 +1140,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1223,7 +1164,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1234,14 +1175,14 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "dashmap" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1266,7 +1207,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1292,33 +1233,33 @@ dependencies = [ [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ "derive_builder_core", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1331,7 +1272,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1394,6 +1335,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "downcast" version = "0.11.0" @@ -1453,7 +1400,7 @@ checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c" dependencies = [ "num-traits", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1530,7 +1477,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -1544,18 +1491,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fiat-crypto" @@ -1565,9 +1503,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1583,21 +1521,21 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "fixedstr" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e049f021908beff8f8c430a99f5c136d3be69f1667346e581f446b173bc012" +checksum = "60aba7afd9b1b9e1950c2b7e8bcac3cc44a273c62a02717dedca2d0a1aee694d" dependencies = [ "serde", ] [[package]] name = "flate2" -version = "1.0.32" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -1617,9 +1555,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1632,9 +1570,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1642,15 +1580,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1659,24 +1597,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-lite" -version = "1.13.0" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1684,7 +1607,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand", "futures-core", "futures-io", "parking", @@ -1693,32 +1616,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1732,15 +1655,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f797e67af32588215eaaab8327027ee8e71b9dd0b2b26996aedf20c030fce309" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1752,17 +1666,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.15" @@ -1772,15 +1675,15 @@ dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -1790,9 +1693,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -1812,7 +1715,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1831,7 +1734,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.4.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1863,6 +1766,15 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + [[package]] name = "hashbrown" version = "0.14.5" @@ -1872,6 +1784,12 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "heapless" version = "0.8.0" @@ -2008,17 +1926,11 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2049,7 +1961,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -2069,6 +1981,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -2078,39 +1991,40 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", - "webpki-roots 0.26.5", + "webpki-roots", ] [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 0.14.30", + "hyper 1.4.1", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -2119,18 +2033,17 @@ dependencies = [ "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", - "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2178,7 +2091,7 @@ dependencies = [ "http 0.2.12", "hyper 0.14.30", "log", - "rand 0.8.5", + "rand", "tokio", "url", "xmltree", @@ -2214,12 +2127,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -2229,7 +2142,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -2267,22 +2180,11 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -2363,7 +2265,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2380,7 +2282,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "igd-next", @@ -2392,38 +2294,37 @@ dependencies = [ "local-ip-address", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", + "rv", "serde", - "statest", - "statrs", "thiserror", "tokio", ] [[package]] name = "kaspa-alloc" -version = "0.15.2" +version = "0.15.3" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "bs58", "faster-hex", - "getrandom 0.2.15", + "getrandom", "hmac", "js-sys", "kaspa-consensus-core", "kaspa-utils", "once_cell", "pbkdf2", - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", "ripemd", "secp256k1", "serde", @@ -2437,7 +2338,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2484,7 +2385,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures-util", @@ -2495,13 +2396,13 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] [[package]] name = "kaspa-consensus" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -2511,8 +2412,9 @@ dependencies = [ "faster-hex", "flate2", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", + "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensus-notify", "kaspa-consensusmanager", @@ -2530,8 +2432,8 @@ dependencies = [ "log", "once_cell", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -2544,7 +2446,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "cfg-if 1.0.0", @@ -2559,7 +2461,7 @@ dependencies = [ "kaspa-txscript", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2572,8 +2474,9 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ + "arc-swap", "async-trait", "bincode", "borsh", @@ -2581,7 +2484,7 @@ dependencies = [ "criterion", "faster-hex", "futures-util", - "getrandom 0.2.15", + "getrandom", "itertools 0.13.0", "js-sys", "kaspa-addresses", @@ -2592,7 +2495,7 @@ dependencies = [ "kaspa-muhash", "kaspa-txscript-errors", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2610,7 +2513,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", @@ -2629,7 +2532,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "faster-hex", @@ -2640,7 +2543,7 @@ dependencies = [ "kaspa-hashes", "kaspa-txscript", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2653,7 +2556,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.15.2" +version = "0.15.3" dependencies = [ "duration-string", "futures", @@ -2665,13 +2568,13 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] [[package]] name = "kaspa-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2689,7 +2592,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2711,19 +2614,19 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "enum-primitive-derive", "faster-hex", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-hashes", "kaspa-utils", "num-traits", "num_cpus", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "smallvec", @@ -2733,7 +2636,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2753,9 +2656,9 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", + "rand", "regex", - "rustls 0.23.12", + "rustls", "thiserror", "tokio", "tokio-stream", @@ -2765,7 +2668,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2781,7 +2684,7 @@ dependencies = [ "log", "paste", "prost", - "rand 0.8.5", + "rand", "regex", "thiserror", "tokio", @@ -2794,7 +2697,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-stream", @@ -2818,8 +2721,8 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", - "rustls 0.23.12", + "rand", + "rustls", "thiserror", "tokio", "tokio-stream", @@ -2830,7 +2733,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -2841,7 +2744,7 @@ dependencies = [ "kaspa-utils", "keccak", "once_cell", - "rand 0.8.5", + "rand", "serde", "sha2", "sha3", @@ -2851,7 +2754,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2870,7 +2773,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2890,7 +2793,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "thiserror", "tokio", "triggered", @@ -2898,7 +2801,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "criterion", @@ -2907,7 +2810,7 @@ dependencies = [ "kaspa-utils", "malachite-base", "malachite-nz", - "rand_chacha 0.3.1", + "rand_chacha", "serde", "serde-wasm-bindgen", "thiserror", @@ -2919,14 +2822,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -2942,7 +2845,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "futures-util", @@ -2958,7 +2861,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", "smallvec", @@ -2969,7 +2872,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -2977,20 +2880,20 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "kaspa-hashes", "kaspa-math", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rayon", "serde", ] [[package]] name = "kaspa-notify" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -2999,7 +2902,7 @@ dependencies = [ "derive_more", "futures", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", @@ -3013,7 +2916,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "serde", "thiserror", "tokio", @@ -3026,12 +2929,12 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "chrono", "futures", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addressmanager", "kaspa-connectionmanager", @@ -3048,7 +2951,7 @@ dependencies = [ "kaspa-utils-tower", "log", "parking_lot", - "rand 0.8.5", + "rand", "thiserror", "tokio", "tokio-stream", @@ -3057,7 +2960,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.15.2" +version = "0.15.3" dependencies = [ "borsh", "ctrlc", @@ -3075,7 +2978,7 @@ dependencies = [ "log", "parking_lot", "prost", - "rand 0.8.5", + "rand", "seqlock", "serde", "thiserror", @@ -3088,7 +2991,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.15.2" +version = "0.15.3" dependencies = [ "kaspa-core", "log", @@ -3100,7 +3003,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.15.2" +version = "0.15.3" dependencies = [ "criterion", "js-sys", @@ -3116,7 +3019,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", @@ -3143,7 +3046,7 @@ dependencies = [ "kaspa-utils", "log", "paste", - "rand 0.8.5", + "rand", "serde", "serde-wasm-bindgen", "serde_json", @@ -3158,7 +3061,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3170,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "kaspa-addresses", @@ -3199,20 +3102,20 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "async-trait", "bincode", "chrono", - "clap 4.5.16", + "clap 4.5.19", "criterion", "crossbeam-channel", "dhat", "faster-hex", "flate2", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", @@ -3243,8 +3146,8 @@ dependencies = [ "kaspad", "log", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -3259,7 +3162,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.15.2" +version = "0.15.3" dependencies = [ "blake2b_simd", "borsh", @@ -3267,7 +3170,7 @@ dependencies = [ "criterion", "hex", "hexplay", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", @@ -3277,7 +3180,7 @@ dependencies = [ "kaspa-wasm-core", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -3291,7 +3194,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.15.2" +version = "0.15.3" dependencies = [ "secp256k1", "thiserror", @@ -3299,7 +3202,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.15.2" +version = "0.15.3" dependencies = [ "arc-swap", "async-channel 2.3.1", @@ -3319,7 +3222,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "rlimit", "serde", "serde_json", @@ -3335,21 +3238,23 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.15.2" +version = "0.15.3" dependencies = [ + "bytes", "cfg-if 1.0.0", "futures", - "hyper 0.14.30", + "http-body 1.0.1", + "http-body-util", "log", "pin-project-lite", "tokio", - "tower", + "tower 0.5.1", "tower-http", ] [[package]] name = "kaspa-utxoindex" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-consensus", @@ -3362,7 +3267,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "thiserror", @@ -3370,7 +3275,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3382,7 +3287,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "js-sys", @@ -3396,7 +3301,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "aes", "ahash", @@ -3404,7 +3309,7 @@ dependencies = [ "async-channel 2.3.1", "async-std", "async-trait", - "base64 0.22.1", + "base64", "borsh", "cfb-mode", "cfg-if 1.0.0", @@ -3447,7 +3352,7 @@ dependencies = [ "md-5", "pad", "pbkdf2", - "rand 0.8.5", + "rand", "regex", "ripemd", "secp256k1", @@ -3477,7 +3382,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3492,7 +3397,7 @@ dependencies = [ "kaspa-txscript-errors", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "ripemd", "secp256k1", "serde", @@ -3510,7 +3415,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.15.2" +version = "0.15.3" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3523,7 +3428,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-pskt" -version = "0.15.2" +version = "0.15.3" dependencies = [ "bincode", "derive_builder", @@ -3550,7 +3455,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3578,7 +3483,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.15.2" +version = "0.15.3" dependencies = [ "faster-hex", "hexplay", @@ -3589,7 +3494,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-std", "async-trait", @@ -3604,9 +3509,9 @@ dependencies = [ "kaspa-rpc-core", "kaspa-rpc-macros", "paste", - "rand 0.8.5", + "rand", "regex", - "rustls 0.23.12", + "rustls", "serde", "serde-wasm-bindgen", "serde_json", @@ -3625,7 +3530,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ctrlc", "futures", @@ -3640,10 +3545,10 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", - "clap 4.5.16", + "clap 4.5.19", "kaspa-consensus-core", "kaspa-grpc-client", "kaspa-rpc-core", @@ -3659,7 +3564,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-trait", "borsh", @@ -3675,7 +3580,7 @@ dependencies = [ "log", "num_cpus", "paste", - "rustls 0.23.12", + "rustls", "serde", "thiserror", "tokio", @@ -3687,7 +3592,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-simple-client-example" -version = "0.15.2" +version = "0.15.3" dependencies = [ "futures", "kaspa-rpc-core", @@ -3697,7 +3602,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.15.2" +version = "0.15.3" dependencies = [ "ahash", "async-std", @@ -3727,14 +3632,15 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.16", + "clap 4.5.19", "dhat", "dirs", "futures-util", + "itertools 0.13.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3760,8 +3666,9 @@ dependencies = [ "kaspa-wrpc-server", "log", "num_cpus", - "rand 0.8.5", + "rand", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", @@ -3803,9 +3710,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" @@ -3862,9 +3769,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.19" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc53a7799a7496ebc9fd29f31f7df80e83c9bda5299768af5f9e59eeea74647" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -3891,12 +3798,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "linux-raw-sys" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3905,14 +3806,14 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3959,7 +3860,7 @@ dependencies = [ "log-mdc", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "serde", "serde-value", "serde_json", @@ -3970,11 +3871,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "lru" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" +dependencies = [ + "hashbrown 0.13.2", +] + [[package]] name = "lz4-sys" -version = "1.10.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -4019,7 +3929,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4030,7 +3940,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4043,14 +3953,14 @@ dependencies = [ "macroific_core", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "malachite-base" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f8d7930df6fcb9c86761ca0999ba484d7b6469c81cee4a7d38da5386440f96" +checksum = "46059721011b0458b7bd6d9179be5d0b60294281c23320c207adceaecc54d13b" dependencies = [ "hashbrown 0.14.5", "itertools 0.11.0", @@ -4060,9 +3970,9 @@ dependencies = [ [[package]] name = "malachite-nz" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa263ca62420c1f65cf6758f55c979a49ad83169f332e602b1890f1e1277a429" +checksum = "1503b27e825cabd1c3d0ff1e95a39fb2ec9eab6fd3da6cfa41aec7091d273e78" dependencies = [ "itertools 0.11.0", "libm", @@ -4086,11 +3996,15 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "matrixmultiply" -version = "0.2.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" dependencies = [ + "autocfg", + "num_cpus", + "once_cell", "rawpointer", + "thread-tree", ] [[package]] @@ -4149,15 +4063,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -4181,7 +4086,7 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -4193,7 +4098,7 @@ checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.52.0", ] @@ -4203,44 +4108,13 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" -[[package]] -name = "nalgebra" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abb021006c01b126a936a8dd1351e0720d83995f4fc942d0d426c654f990745" -dependencies = [ - "alga", - "approx", - "generic-array 0.13.3", - "matrixmultiply", - "num-complex 0.2.4", - "num-rational 0.2.4", - "num-traits", - "rand 0.7.3", - "rand_distr 0.2.2", - "typenum", -] - [[package]] name = "nanoid" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "ndarray" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac06db03ec2f46ee0ecdca1a1c34a99c0d188a0d83439b84bf0cb4b386e4ab09" -dependencies = [ - "matrixmultiply", - "num-complex 0.2.4", - "num-integer", - "num-traits", - "rawpointer", + "rand", ] [[package]] @@ -4332,10 +4206,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", - "num-complex 0.4.6", + "num-complex", "num-integer", "num-iter", - "num-rational 0.4.2", + "num-rational", "num-traits", ] @@ -4349,16 +4223,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - [[package]] name = "num-complex" version = "0.4.6" @@ -4394,17 +4258,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-rational" version = "0.4.2" @@ -4466,18 +4319,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -4497,6 +4350,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "order-stat" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" + [[package]] name = "ordered-float" version = "2.10.1" @@ -4527,9 +4386,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -4571,7 +4430,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4581,7 +4440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -4613,6 +4472,30 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "peroxide" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703b5fbdc1f9018a66e2db8758633cec31d39ad3127bfd38c9b6ad510637519c" +dependencies = [ + "matrixmultiply", + "order-stat", + "peroxide-ad", + "puruspe", + "rand", + "rand_distr", +] + +[[package]] +name = "peroxide-ad" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6fba8ff3f40b67996f7c745f699babaa3e57ef5c8178ec999daf7eedc51dc8c" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -4620,27 +4503,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.4.0", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -4662,31 +4545,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand", "futures-io", ] [[package]] name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "polling" -version = "2.8.0" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" -dependencies = [ - "autocfg", - "bitflags 1.3.2", - "cfg-if 1.0.0", - "concurrent-queue", - "libc", - "log", - "pin-project-lite", - "windows-sys 0.48.0", -] +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" @@ -4698,7 +4565,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.34", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -4716,9 +4583,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -4737,21 +4604,21 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -4788,9 +4655,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", "prost-derive", @@ -4798,13 +4665,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", "heck", - "itertools 0.12.1", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -4813,32 +4680,38 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.75", + "syn 2.0.79", "tempfile", ] [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "prost-types" -version = "0.12.6" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" dependencies = [ "prost", ] +[[package]] +name = "puruspe" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3804877ffeba468c806c2ad9057bbbae92e4b2c410c2f108baaa0042f241fa4c" + [[package]] name = "quinn" version = "0.11.5" @@ -4850,8 +4723,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.12", - "socket2 0.5.7", + "rustls", + "socket2", "thiserror", "tokio", "tracing", @@ -4864,10 +4737,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", - "rand 0.8.5", + "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.12", + "rustls", "slab", "thiserror", "tinyvec", @@ -4882,33 +4755,20 @@ checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -4916,18 +4776,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -4937,16 +4787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -4955,16 +4796,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_distr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" -dependencies = [ - "rand 0.7.3", + "getrandom", ] [[package]] @@ -4974,16 +4806,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "rand", ] [[package]] @@ -5014,9 +4837,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] @@ -5027,16 +4850,16 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -5046,9 +4869,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -5057,17 +4880,17 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "encoding_rs", "futures-core", @@ -5087,8 +4910,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", - "rustls-pemfile 2.1.3", + "rustls", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -5096,13 +4919,13 @@ dependencies = [ "sync_wrapper 1.0.1", "system-configuration", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.5", + "webpki-roots", "windows-registry", ] @@ -5114,7 +4937,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom 0.2.15", + "getrandom", "libc", "spin", "untrusted", @@ -5132,9 +4955,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ "libc", ] @@ -5151,10 +4974,10 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", - "clap 4.5.16", + "clap 4.5.19", "criterion", "faster-hex", "itertools 0.13.0", @@ -5193,106 +5016,61 @@ checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.37.27" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" -dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustix" -version = "0.38.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.14", + "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.23.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" -dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki", "subtle", "zeroize", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" - -[[package]] -name = "rustls-webpki" -version = "0.101.7" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -5305,6 +5083,22 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +[[package]] +name = "rv" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c07e0a3b756794c7ea2f05d93760ffb946ff4f94b255d92444d94c19fd71f4ab" +dependencies = [ + "doc-comment", + "lru", + "num", + "num-traits", + "peroxide", + "rand", + "rand_distr", + "special", +] + [[package]] name = "ryu" version = "1.0.18" @@ -5341,32 +5135,22 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "secp256k1" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "rand 0.8.5", + "rand", "secp256k1-sys", "serde", ] [[package]] name = "secp256k1-sys" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] @@ -5397,9 +5181,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5427,20 +5211,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -5456,14 +5240,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -5482,15 +5266,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.4.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -5500,14 +5284,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5516,7 +5300,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5603,15 +5387,15 @@ dependencies = [ [[package]] name = "simpa" -version = "0.15.2" +version = "0.15.3" dependencies = [ "async-channel 2.3.1", "cfg-if 1.0.0", - "clap 4.5.16", + "clap 4.5.19", "dhat", "futures", "futures-util", - "indexmap 2.4.0", + "indexmap 2.6.0", "itertools 0.13.0", "kaspa-alloc", "kaspa-consensus", @@ -5624,8 +5408,8 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "secp256k1", "tokio", @@ -5665,16 +5449,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -5691,6 +5465,15 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd62203d74a728ae353b4d716fc2a80e8da881dfdf8bbc0c012d877a58c4030" +[[package]] +name = "special" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b89cf0d71ae639fdd8097350bfac415a41aabf1d5ddd356295fdc95f09760382" +dependencies = [ + "libm", +] + [[package]] name = "spin" version = "0.9.8" @@ -5703,27 +5486,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" -[[package]] -name = "statest" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ed65138bd1680f47e4d980ac7d3cf5e827fa99c2fa6683e640094a494602b4" -dependencies = [ - "ndarray", - "num-traits", - "statrs", -] - -[[package]] -name = "statrs" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e34b58a8f9b7462b6922e0b4e3c83d1b3c2075f7f996a56d6c66afa81590064" -dependencies = [ - "nalgebra", - "rand 0.7.3", -] - [[package]] name = "strsim" version = "0.8.0" @@ -5761,9 +5523,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.75" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -5779,7 +5541,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5799,9 +5561,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.31.2" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4115055da5f572fff541dd0c4e61b0262977f453cc9fe04be83aba25a89bdab" +checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" dependencies = [ "core-foundation-sys", "libc", @@ -5834,14 +5596,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.1.0", + "fastrand", "once_cell", - "rustix 0.38.34", + "rustix", "windows-sys 0.59.0", ] @@ -5876,22 +5638,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -5910,6 +5672,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "thread-tree" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbd370cb847953a25954d9f63e14824a36113f8c72eecf6eccef5dc4b45d630" +dependencies = [ + "crossbeam-channel", +] + [[package]] name = "time" version = "0.3.36" @@ -5970,9 +5741,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -5980,21 +5751,11 @@ dependencies = [ "mio 1.0.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.4.0" @@ -6003,17 +5764,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", + "syn 2.0.79", ] [[package]] @@ -6022,16 +5773,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6046,19 +5797,19 @@ checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.12", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls", "tungstenite", - "webpki-roots 0.26.5", + "webpki-roots", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6076,7 +5827,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit", ] [[package]] @@ -6090,71 +5841,63 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.4.0", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" -dependencies = [ - "indexmap 2.4.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.18", + "winnow", ] [[package]] name = "tonic" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.7", + "base64", "bytes", "flate2", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.30", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls-pemfile", + "socket2", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", - "webpki-roots 0.25.4", + "webpki-roots", ] [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", "prost-build", + "prost-types", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6168,7 +5911,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -6177,19 +5920,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "bitflags 2.6.0", "bytes", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", "pin-project-lite", "tower-layer", "tower-service", @@ -6213,7 +5968,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -6227,7 +5981,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6263,8 +6017,8 @@ dependencies = [ "http 1.1.0", "httparse", "log", - "rand 0.8.5", - "rustls 0.23.12", + "rand", + "rustls", "rustls-pki-types", "sha1", "thiserror", @@ -6288,15 +6042,15 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-linebreak" @@ -6306,24 +6060,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "universal-hash" @@ -6385,7 +6139,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.15", + "getrandom", ] [[package]] @@ -6394,8 +6148,8 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom", + "rand", "serde", "wasm-bindgen", ] @@ -6439,12 +6193,6 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" -[[package]] -name = "waker-fn" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" - [[package]] name = "walkdir" version = "2.5.0" @@ -6464,12 +6212,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6500,7 +6242,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "wasm-bindgen-shared", ] @@ -6534,7 +6276,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6568,7 +6310,7 @@ checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6583,15 +6325,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - -[[package]] -name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -6605,7 +6341,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix", ] [[package]] @@ -6678,7 +6414,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6689,7 +6425,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] @@ -6881,18 +6617,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -6927,10 +6654,10 @@ dependencies = [ "dirs", "faster-hex", "futures", - "getrandom 0.2.15", + "getrandom", "instant", "js-sys", - "rand 0.8.5", + "rand", "rlimit", "serde", "serde-wasm-bindgen", @@ -7058,7 +6785,7 @@ dependencies = [ "futures", "js-sys", "nw-sys", - "rand 0.8.5", + "rand", "serde", "serde-wasm-bindgen", "thiserror", @@ -7108,9 +6835,9 @@ dependencies = [ "downcast-rs", "futures", "futures-util", - "getrandom 0.2.15", + "getrandom", "manual_future", - "rand 0.8.5", + "rand", "serde", "serde_json", "thiserror", @@ -7156,7 +6883,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d161c4b844eee479f81306f2526266f9608a663e0a679d9fc0572ee15c144e06" dependencies = [ "async-std", - "base64 0.22.1", + "base64", "cfg-if 1.0.0", "chrome-sys", "faster-hex", @@ -7316,9 +7043,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "539a77ee7c0de333dcc6da69b177380a0b81e0dacfa4f7344c465a36871ee601" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" @@ -7353,7 +7080,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.75", + "syn 2.0.79", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 37acfb172..aa304d37f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,8 +62,8 @@ members = [ ] [workspace.package] -rust-version = "1.81.0" -version = "0.15.2" +rust-version = "1.82.0" +version = "0.15.3" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -80,61 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } -kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } -kaspa-cli = { version = "0.15.2", path = "cli" } -kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.15.2", path = "consensus" } -kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } -kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } -kaspa-core = { version = "0.15.2", path = "core" } -kaspa-daemon = { version = "0.15.2", path = "daemon" } -kaspa-database = { version = "0.15.2", path = "database" } -kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } -kaspa-index-core = { version = "0.15.2", path = "indexes/core" } -kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } -kaspa-math = { version = "0.15.2", path = "math" } -kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } -kaspa-mining = { version = "0.15.2", path = "mining" } -kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } -kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } -kaspa-notify = { version = "0.15.2", path = "notify" } -kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.15.2", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } -kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.15.2", path = "utils" } -kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } -kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.15.2", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } -kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } -kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } -kaspa-wasm = { version = "0.15.2", path = "wasm" } -kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } -kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.15.2", path = "kaspad" } -kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.3", path = "testing/integration" } +kaspa-addresses = { version = "0.15.3", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.3", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.3", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.3", path = "cli" } +kaspa-connectionmanager = { version = "0.15.3", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.3", path = "consensus" } +kaspa-consensus-core = { version = "0.15.3", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.3", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.3", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.3", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.3", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.3", path = "core" } +kaspa-daemon = { version = "0.15.3", path = "daemon" } +kaspa-database = { version = "0.15.3", path = "database" } +kaspa-grpc-client = { version = "0.15.3", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.3", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.3", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.3", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.3", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.3", path = "indexes/processor" } +kaspa-math = { version = "0.15.3", path = "math" } +kaspa-merkle = { version = "0.15.3", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.3", path = "metrics/core" } +kaspa-mining = { version = "0.15.3", path = "mining" } +kaspa-mining-errors = { version = "0.15.3", path = "mining/errors" } +kaspa-muhash = { version = "0.15.3", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.3", path = "notify" } +kaspa-p2p-flows = { version = "0.15.3", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.3", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.3", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.3", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.3", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.3", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.3", path = "rpc/service" } +kaspa-txscript = { version = "0.15.3", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.3", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.3", path = "utils" } +kaspa-utils-tower = { version = "0.15.3", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.3", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.3", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.3", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.3", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.3", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.3", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.3", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.3", path = "wasm" } +kaspa-wasm-core = { version = "0.15.3", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.3", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.3", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.3", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.3", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.3", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.3", path = "kaspad" } +kaspa-alloc = { version = "0.15.3", path = "utils/alloc" } # external aes = "0.8.3" @@ -150,6 +150,7 @@ bincode = { version = "1.3.3", default-features = false } blake2b_simd = "1.0.2" borsh = { version = "1.5.1", features = ["derive", "rc"] } bs58 = { version = "0.5.0", features = ["check"], default-features = false } +bytes = "1.7.1" cc = "1.0.83" cfb-mode = "0.8.2" cfg-if = "1.0.0" @@ -188,6 +189,8 @@ hex-literal = "0.4.1" hexplay = "0.3.0" hmac = { version = "0.12.1", default-features = false } home = "0.5.5" +http-body = "1.0.1" +http-body-util = "0.1.2" igd-next = { version = "0.14.2", features = ["aio_tokio"] } indexmap = "2.1.0" intertrait = "0.2.2" @@ -211,8 +214,7 @@ parking_lot = "0.12.1" paste = "1.0.14" pbkdf2 = "0.12.2" portable-atomic = { version = "1.5.1", features = ["float"] } -prost = "0.12.1" -# prost = "0.13.1" +prost = "0.13.2" rand = "0.8.5" rand_chacha = "0.3.1" rand_core = { version = "0.6.4", features = ["std"] } @@ -222,6 +224,7 @@ regex = "1.10.2" ripemd = { version = "0.1.3", default-features = false } rlimit = "0.10.1" rocksdb = "0.22.0" +rv = "0.16.4" secp256k1 = { version = "0.29.0", features = [ "global-context", "rand-std", @@ -241,8 +244,6 @@ sha3 = "0.10.8" slugify-rs = "0.0.3" smallvec = { version = "1.11.1", features = ["serde"] } sorted-insert = "0.2.3" -statest = "0.2.2" -statrs = "0.13.0" # TODO "0.16.0" subtle = { version = "2.5.0", default-features = false } sysinfo = "0.31.2" tempfile = "3.8.1" @@ -251,8 +252,8 @@ thiserror = "1.0.50" tokio = { version = "1.33.0", features = ["sync", "rt-multi-thread"] } tokio-stream = "0.1.14" toml = "0.8.8" -tonic = { version = "0.10.2", features = ["tls-webpki-roots", "gzip", "transport"] } -tonic-build = { version = "0.10.2", features = ["prost"] } +tonic = { version = "0.12.3", features = ["tls-webpki-roots", "gzip", "transport"] } +tonic-build = { version = "0.12.3", features = ["prost"] } triggered = "0.1.2" uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] } wasm-bindgen = { version = "0.2.93", features = ["serde-serialize"] } @@ -262,12 +263,11 @@ web-sys = "0.3.70" xxhash-rust = { version = "0.8.7", features = ["xxh3"] } zeroize = { version = "1.6.0", default-features = false, features = ["alloc"] } pin-project-lite = "0.2.13" -tower-http = { version = "0.4.4", features = [ +tower-http = { version = "0.5.2", features = [ "map-response-body", "map-request-body", ] } -tower = "0.4.7" -hyper = "0.14.27" +tower = "0.5.1" chrono = "0.4.31" indexed_db_futures = "0.5.0" # workflow dependencies that are not a part of core libraries diff --git a/README.md b/README.md index ada38c55d..d9066efa8 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,15 @@ The default branch of this repository is `master` and new contributions are cons ## Installation
Building on Linux - + 1. Install general prerequisites ```bash - sudo apt install curl git build-essential libssl-dev pkg-config + sudo apt install curl git build-essential libssl-dev pkg-config ``` 2. Install Protobuf (required for gRPC) - + ```bash sudo apt install protobuf-compiler libprotobuf-dev #Required for gRPC ``` @@ -36,8 +36,8 @@ The default branch of this repository is `master` and new contributions are cons llvm python3-clang ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -45,7 +45,7 @@ The default branch of this repository is `master` and new contributions are cons 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa @@ -55,7 +55,7 @@ The default branch of this repository is `master` and new contributions are cons -
+
Building on Windows @@ -63,18 +63,18 @@ The default branch of this repository is `master` and new contributions are cons 2. Install [Protocol Buffers](https://github.com/protocolbuffers/protobuf/releases/download/v21.10/protoc-21.10-win64.zip) and add the `bin` directory to your `Path` - + 3. Install [LLVM-15.0.6-win64.exe](https://github.com/llvm/llvm-project/releases/download/llvmorg-15.0.6/LLVM-15.0.6-win64.exe) Add the `bin` directory of the LLVM installation (`C:\Program Files\LLVM\bin`) to PATH - + set `LIBCLANG_PATH` environment variable to point to the `bin` directory as well **IMPORTANT:** Due to C++ dependency configuration issues, LLVM `AR` installation on Windows may not function correctly when switching between WASM and native C++ code compilation (native `RocksDB+secp256k1` vs WASM32 builds of `secp256k1`). Unfortunately, manually setting `AR` environment variable also confuses C++ build toolchain (it should not be set for native but should be set for WASM32 targets). Currently, the best way to address this, is as follows: after installing LLVM on Windows, go to the target `bin` installation directory and copy or rename `LLVM_AR.exe` to `AR.exe`. - + 4. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 5. Install wasm-pack ```bash cargo install wasm-pack @@ -82,16 +82,16 @@ The default branch of this repository is `master` and new contributions are cons 6. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 7. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
-
+
Building on Mac OS @@ -99,8 +99,8 @@ The default branch of this repository is `master` and new contributions are cons ```bash brew install protobuf ``` - 2. Install llvm. - + 2. Install llvm. + The default XCode installation of `llvm` does not support WASM build targets. To build WASM on MacOS you need to install `llvm` from homebrew (at the time of writing, the llvm version for MacOS is 16.0.1). ```bash @@ -133,8 +133,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of source ~/.zshrc ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -142,14 +142,14 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
@@ -184,7 +184,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of Kaspa CLI + Wallet -`kaspa-cli` crate provides cli-driven RPC interface to the node and a + +`kaspa-cli` crate provides a cli-driven RPC interface to the node and a terminal interface to the Rusty Kaspa Wallet runtime. These wallets are compatible with WASM SDK Wallet API and Kaspa NG projects. @@ -235,7 +236,7 @@ cargo run --release --bin kaspad -- --testnet ``` **Testnet 11** - + For participation in the 10BPS test network (TN11), see the following detailed [guide](docs/testnet11.md).
@@ -249,7 +250,7 @@ cargo run --release --bin kaspad -- --configfile /path/to/configfile.toml # or cargo run --release --bin kaspad -- -C /path/to/configfile.toml ``` - - The config file should be a list of \ = \ separated by newlines. + - The config file should be a list of \ = \ separated by newlines. - Whitespace around the `=` is fine, `arg=value` and `arg = value` are both parsed correctly. - Values with special characters like `.` or `=` will require quoting the value i.e \ = "\". - Arguments with multiple values should be surrounded with brackets like `addpeer = ["10.0.0.1", "1.2.3.4"]`. @@ -297,17 +298,17 @@ wRPC **Sidenote:** Rusty Kaspa integrates an optional wRPC - subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC + subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC implementation that can use [Borsh](https://borsh.io/) and JSON protocol encoding. - JSON protocol messaging - is similar to JSON-RPC 1.0, but differs from the specification due to server-side + JSON protocol messaging + is similar to JSON-RPC 1.0, but differs from the specification due to server-side notifications. [Borsh](https://borsh.io/) encoding is meant for inter-process communication. When using [Borsh](https://borsh.io/) - both client and server should be built from the same codebase. + both client and server should be built from the same codebase. - JSON protocol is based on + JSON protocol is based on Kaspa data structures and is data-structure-version agnostic. You can connect to the JSON endpoint using any WebSocket library. Built-in RPC clients for JavaScript and TypeScript capable of running in web browsers and Node.js are available as a part of @@ -316,27 +317,21 @@ wRPC
- -
- - ## Benchmarking & Testing -
+
Simulation framework (Simpa) -Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.10.0/env_logger/#filtering-results) by either: - -The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). +The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). To see the available commands -```bash +```bash cargo run --release --bin simpa -- --help -``` +``` -The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. +The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. ```bash cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 @@ -347,7 +342,7 @@ cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 -
+
Heap Profiling @@ -362,7 +357,7 @@ It will produce `{bin-name}-heap.json` file in the root of the workdir, that can
-
+
Tests @@ -384,12 +379,21 @@ cd rusty-kaspa cargo nextest run --release ``` +
+ +
+Lints + +```bash +cd rusty-kaspa +./check +```
-
+
Benchmarks @@ -400,7 +404,7 @@ cargo bench
-
+
Logging @@ -415,4 +419,3 @@ Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.1 In this command we set the `loglevel` to `INFO`.
- diff --git a/cli/src/modules/history.rs b/cli/src/modules/history.rs index 8fdf31f4d..f46527f1d 100644 --- a/cli/src/modules/history.rs +++ b/cli/src/modules/history.rs @@ -86,15 +86,7 @@ impl History { } }; let length = ids.size_hint().0; - let skip = if let Some(last) = last { - if last > length { - 0 - } else { - length - last - } - } else { - 0 - }; + let skip = if let Some(last) = last { length.saturating_sub(last) } else { 0 }; let mut index = 0; let page = 25; diff --git a/cli/src/modules/message.rs b/cli/src/modules/message.rs index dce7f3679..d38624dc2 100644 --- a/cli/src/modules/message.rs +++ b/cli/src/modules/message.rs @@ -1,5 +1,6 @@ use kaspa_addresses::Version; use kaspa_bip32::secp256k1::XOnlyPublicKey; +use kaspa_wallet_core::message::SignMessageOptions; use kaspa_wallet_core::{ account::{BIP32_ACCOUNT_KIND, KEYPAIR_ACCOUNT_KIND}, message::{sign_message, verify_message, PersonalMessage}, @@ -87,8 +88,9 @@ impl Message { let pm = PersonalMessage(message); let privkey = self.get_address_private_key(&ctx, kaspa_address).await?; + let sign_options = SignMessageOptions { no_aux_rand: false }; - let sig_result = sign_message(&pm, &privkey); + let sig_result = sign_message(&pm, &privkey, &sign_options); match sig_result { Ok(signature) => { diff --git a/cli/src/utils.rs b/cli/src/utils.rs index 3e1d0ddb1..e52581b4c 100644 --- a/cli/src/utils.rs +++ b/cli/src/utils.rs @@ -8,7 +8,7 @@ pub fn try_parse_required_nonzero_kaspa_as_sompi_u64(kasp let sompi_amount = kaspa_amount .to_string() .parse::() - .map_err(|_| Error::custom(format!("Supplied Kasapa amount is not valid: '{kaspa_amount}'")))? + .map_err(|_| Error::custom(format!("Supplied Kaspa amount is not valid: '{kaspa_amount}'")))? * SOMPI_PER_KASPA as f64; if sompi_amount < 0.0 { Err(Error::custom("Supplied Kaspa amount is not valid: '{kaspa_amount}'")) diff --git a/components/addressmanager/Cargo.toml b/components/addressmanager/Cargo.toml index e4398dc4e..ef735b19c 100644 --- a/components/addressmanager/Cargo.toml +++ b/components/addressmanager/Cargo.toml @@ -27,5 +27,4 @@ thiserror.workspace = true tokio.workspace = true [dev-dependencies] -statrs.workspace = true -statest.workspace = true +rv.workspace = true diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 85f9acb3e..093323e15 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -520,8 +520,7 @@ mod address_store_with_cache { use kaspa_database::create_temp_db; use kaspa_database::prelude::ConnBuilder; use kaspa_utils::networking::IpAddress; - use statest::ks::KSTest; - use statrs::distribution::Uniform; + use rv::{dist::Uniform, misc::ks_test as one_way_ks_test, traits::Cdf}; use std::net::{IpAddr, Ipv6Addr}; #[test] @@ -591,10 +590,11 @@ mod address_store_with_cache { assert!(num_of_buckets >= 12); // Run multiple Kolmogorov–Smirnov tests to offset random noise of the random weighted iterator - let num_of_trials = 512; + let num_of_trials = 2048; // Number of trials to run the test, chosen to reduce random noise. let mut cul_p = 0.; // The target uniform distribution - let target_u_dist = Uniform::new(0.0, (num_of_buckets) as f64).unwrap(); + let target_uniform_dist = Uniform::new(1.0, num_of_buckets as f64).unwrap(); + let uniform_cdf = |x: f64| target_uniform_dist.cdf(&x); for _ in 0..num_of_trials { // The weight sampled expected uniform distibution let prioritized_address_distribution = am @@ -603,13 +603,12 @@ mod address_store_with_cache { .take(num_of_buckets) .map(|addr| addr.prefix_bucket().as_u64() as f64) .collect_vec(); - - let ks_test = KSTest::new(prioritized_address_distribution.as_slice()); - cul_p += ks_test.ks1(&target_u_dist).0; + cul_p += one_way_ks_test(prioritized_address_distribution.as_slice(), uniform_cdf).1; } // Normalize and adjust p to test for uniformity, over average of all trials. - let adjusted_p = (0.5 - cul_p / num_of_trials as f64).abs(); + // we do this to reduce the effect of random noise failing this test. + let adjusted_p = ((cul_p / num_of_trials as f64) - 0.5).abs(); // Define the significance threshold. let significance = 0.10; @@ -619,7 +618,7 @@ mod address_store_with_cache { adjusted_p, significance ); - assert!(adjusted_p <= significance) + assert!(adjusted_p <= significance); } } } diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 3f4a1b456..443e591c8 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -54,9 +54,14 @@ serde_json.workspace = true flate2.workspace = true rand_distr.workspace = true kaspa-txscript-errors.workspace = true +kaspa-addresses.workspace = true [[bench]] -name = "hash_benchmarks" +name = "parallel_muhash" +harness = false + +[[bench]] +name = "check_scripts" harness = false [features] diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs new file mode 100644 index 000000000..6462e04e4 --- /dev/null +++ b/consensus/benches/check_scripts.rs @@ -0,0 +1,163 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus::processes::transaction_validator::tx_validation_in_utxo_context::{ + check_scripts_par_iter, check_scripts_par_iter_pool, check_scripts_sequential, +}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; +use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; +use kaspa_consensus_core::subnets::SubnetworkId; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionInput, TransactionOutpoint, UtxoEntry}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::pay_to_address_script; +use kaspa_utils::iter::parallelism_in_power_steps; +use rand::{thread_rng, Rng}; +use secp256k1::Keypair; + +fn mock_tx_with_payload(inputs_count: usize, non_uniq_signatures: usize, payload_size: usize) -> (Transaction, Vec) { + let mut payload = vec![0u8; payload_size]; + thread_rng().fill(&mut payload[..]); + + let reused_values = SigHashReusedValuesUnsync::new(); + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let mut tx = Transaction::new( + 0, + vec![], + vec![], + 0, + SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, + payload, + ); + let mut utxos = vec![]; + let mut kps = vec![]; + + for _ in 0..inputs_count - non_uniq_signatures { + let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 333, + is_coinbase: false, + }); + kps.push(kp); + } + + for _ in 0..non_uniq_signatures { + let kp = kps.last().unwrap(); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 444, + is_coinbase: false, + }); + } + + for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) { + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + + let length = tx.inputs.len(); + for i in (inputs_count - non_uniq_signatures)..length { + let kp = kps.last().unwrap(); + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + + (tx, utxos) +} + +fn benchmark_check_scripts(c: &mut Criterion) { + for inputs_count in [100, 50, 25, 10, 5, 2] { + for non_uniq_signatures in [0, inputs_count / 2] { + let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, 0); + let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}")); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("single_thread", |b| { + let tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_sequential(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); + }) + }); + + group.bench_function("rayon par iter", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); + }) + }); + + for i in parallelism_in_power_steps() { + if inputs_count >= i { + group.bench_function(format!("rayon, custom thread pool, thread count {i}"), |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter_pool(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool), false) + .unwrap(); + }) + }); + } + } + } + } +} + +/// Benchmarks script checking performance with different payload sizes and input counts. +/// +/// This benchmark evaluates the performance impact of transaction payload size +/// on script validation, testing multiple scenarios: +/// +/// * Payload sizes: 0KB, 16KB, 32KB, 64KB, 128KB +/// * Input counts: 1, 2, 10, 50 transactions +/// +/// The benchmark helps understand: +/// 1. How payload size affects validation performance +/// 2. The relationship between input count and payload processing overhead +fn benchmark_check_scripts_with_payload(c: &mut Criterion) { + let payload_sizes = [0, 16_384, 32_768, 65_536, 131_072]; // 0, 16KB, 32KB, 64KB, 128KB + let input_counts = [1, 2, 10, 50]; + let non_uniq_signatures = 0; + + for inputs_count in input_counts { + for &payload_size in &payload_sizes { + let (tx, utxos) = mock_tx_with_payload(inputs_count, non_uniq_signatures, payload_size); + let mut group = c.benchmark_group(format!("script_check/inputs_{}/payload_{}_kb", inputs_count, payload_size / 1024)); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("parallel_validation", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable()), false).unwrap(); + }) + }); + } + } +} + +criterion_group! { + name = benches; + config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0)); + targets = benchmark_check_scripts, benchmark_check_scripts_with_payload +} + +criterion_main!(benches); diff --git a/consensus/benches/hash_benchmarks.rs b/consensus/benches/hash_benchmarks.rs deleted file mode 100644 index 8ba6836b8..000000000 --- a/consensus/benches/hash_benchmarks.rs +++ /dev/null @@ -1,15 +0,0 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use std::str::FromStr; - -use kaspa_hashes::Hash; - -/// Placeholder for actual benchmarks -pub fn hash_benchmark(c: &mut Criterion) { - c.bench_function("Hash::from_str", |b| { - let hash_str = "8e40af02265360d59f4ecf9ae9ebf8f00a3118408f5a9cdcbcc9c0f93642f3af"; - b.iter(|| Hash::from_str(black_box(hash_str))) - }); -} - -criterion_group!(benches, hash_benchmark); -criterion_main!(benches); diff --git a/consensus/benches/parallel_muhash.rs b/consensus/benches/parallel_muhash.rs new file mode 100644 index 000000000..99ab5b6c3 --- /dev/null +++ b/consensus/benches/parallel_muhash.rs @@ -0,0 +1,66 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use itertools::Itertools; +use kaspa_consensus_core::{ + muhash::MuHashExtensions, + subnets::SUBNETWORK_ID_NATIVE, + tx::{ScriptPublicKey, SignableTransaction, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry}, +}; +use kaspa_hashes::TransactionID; +use kaspa_muhash::MuHash; +use kaspa_utils::iter::parallelism_in_power_steps; +use rayon::prelude::*; + +fn generate_transaction(ins: usize, outs: usize, randomness: u64) -> SignableTransaction { + let mut tx = Transaction::new(0, vec![], vec![], 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + let mut entries = vec![]; + for i in 0..ins { + let mut hasher = TransactionID::new(); + hasher.write(i.to_le_bytes()); + hasher.write(randomness.to_le_bytes()); + let input = TransactionInput::new(TransactionOutpoint::new(hasher.finalize(), 0), vec![10; 66], 0, 1); + let entry = UtxoEntry::new(22222222, ScriptPublicKey::from_vec(0, vec![99; 34]), 23456, false); + tx.inputs.push(input); + entries.push(entry); + } + for _ in 0..outs { + let output = TransactionOutput::new(23456, ScriptPublicKey::from_vec(0, vec![101; 34])); + tx.outputs.push(output); + } + tx.finalize(); + SignableTransaction::with_entries(tx, entries) +} + +pub fn parallel_muhash_benchmark(c: &mut Criterion) { + let mut group = c.benchmark_group("muhash txs"); + let txs = (0..256).map(|i| generate_transaction(2, 2, i)).collect_vec(); + group.bench_function("seq", |b| { + b.iter(|| { + let mut mh = MuHash::new(); + for tx in txs.iter() { + mh.add_transaction(&tx.as_verifiable(), 222); + } + black_box(mh) + }) + }); + + for threads in parallelism_in_power_steps() { + group.bench_function(format!("par {threads}"), |b| { + let pool = rayon::ThreadPoolBuilder::new().num_threads(threads).build().unwrap(); + b.iter(|| { + pool.install(|| { + let mh = + txs.par_iter().map(|tx| MuHash::from_transaction(&tx.as_verifiable(), 222)).reduce(MuHash::new, |mut a, b| { + a.combine(&b); + a + }); + black_box(mh) + }) + }) + }); + } + + group.finish(); +} + +criterion_group!(benches, parallel_muhash_benchmark); +criterion_main!(benches); diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs index 6f04a73c4..7d2e25b39 100644 --- a/consensus/client/src/header.rs +++ b/consensus/client/src/header.rs @@ -266,7 +266,7 @@ impl Header { impl TryCastFromJs for Header { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/input.rs b/consensus/client/src/input.rs index a5018199d..0c5e052f2 100644 --- a/consensus/client/src/input.rs +++ b/consensus/client/src/input.rs @@ -200,7 +200,7 @@ impl AsRef for TransactionInput { impl TryCastFromJs for TransactionInput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/output.rs b/consensus/client/src/output.rs index 17b4a58c8..01772dde3 100644 --- a/consensus/client/src/output.rs +++ b/consensus/client/src/output.rs @@ -139,7 +139,7 @@ impl From<&TransactionOutput> for cctx::TransactionOutput { impl TryCastFromJs for TransactionOutput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index 4044dc570..18ff3c849 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -7,7 +7,7 @@ use core::iter::once; use itertools::Itertools; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, tx::PopulatedTransaction, @@ -44,7 +44,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; { let input_len = tx.inner().inputs.len(); @@ -59,7 +59,7 @@ pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> }; let script = script_pub_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) diff --git a/consensus/client/src/signing.rs b/consensus/client/src/signing.rs index ef993d011..cd8604677 100644 --- a/consensus/client/src/signing.rs +++ b/consensus/client/src/signing.rs @@ -75,7 +75,7 @@ impl SigHashCache { } } - pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { + pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } @@ -178,23 +178,23 @@ pub fn calc_schnorr_signature_hash( let utxo = cctx::UtxoEntry::from(utxo.as_ref()); let hash_type = SIG_HASH_ALL; - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); // let input = verifiable_tx.populated_input(input_index); // let tx = verifiable_tx.tx(); let mut hasher = TransactionSigningHash::new(); hasher .write_u16(tx.version) - .update(previous_outputs_hash(&tx, hash_type, &mut reused_values)) - .update(sequences_hash(&tx, hash_type, &mut reused_values)) - .update(sig_op_counts_hash(&tx, hash_type, &mut reused_values)); + .update(previous_outputs_hash(&tx, hash_type, &reused_values)) + .update(sequences_hash(&tx, hash_type, &reused_values)) + .update(sig_op_counts_hash(&tx, hash_type, &reused_values)); hash_outpoint(&mut hasher, input.previous_outpoint); hash_script_public_key(&mut hasher, &utxo.script_public_key); hasher .write_u64(utxo.amount) .write_u64(input.sequence) .write_u8(input.sig_op_count) - .update(outputs_hash(&tx, hash_type, &mut reused_values, input_index)) + .update(outputs_hash(&tx, hash_type, &reused_values, input_index)) .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) diff --git a/consensus/client/src/transaction.rs b/consensus/client/src/transaction.rs index 17cc38126..4026ac1eb 100644 --- a/consensus/client/src/transaction.rs +++ b/consensus/client/src/transaction.rs @@ -280,7 +280,7 @@ impl Transaction { impl TryCastFromJs for Transaction { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/client/src/utxo.rs b/consensus/client/src/utxo.rs index bbfc1199d..99a663fd0 100644 --- a/consensus/client/src/utxo.rs +++ b/consensus/client/src/utxo.rs @@ -282,7 +282,7 @@ impl TryIntoUtxoEntryReferences for JsValue { impl TryCastFromJs for UtxoEntry { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { @@ -405,7 +405,7 @@ impl TryFrom for UtxoEntries { impl TryCastFromJs for UtxoEntryReference { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 44dbedd38..228b4ac11 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -15,6 +15,7 @@ wasm32-sdk = [] default = [] [dependencies] +arc-swap.workspace = true async-trait.workspace = true borsh.workspace = true cfg-if.workspace = true diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 365b8404c..7c244b914 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -17,7 +17,7 @@ use crate::{ tx::TxResult, }, header::Header, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, @@ -203,7 +203,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + fn validate_pruning_proof(&self, proof: &PruningPointProof, proof_metadata: &PruningProofMetadata) -> PruningImportResult<()> { unimplemented!() } diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index c4635083b..899773bbf 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -36,7 +36,7 @@ pub mod consensus { /// Size of the **sampled** median time window (independent of BPS) pub const MEDIAN_TIME_SAMPLED_WINDOW_SIZE: u64 = - ((2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1) + PAST_MEDIAN_TIME_SAMPLE_INTERVAL - 1) / PAST_MEDIAN_TIME_SAMPLE_INTERVAL; + (2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~~~~~~~ Max difficulty target ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -71,8 +71,7 @@ pub mod consensus { pub const DIFFICULTY_WINDOW_SAMPLE_INTERVAL: u64 = 4; /// Size of the **sampled** difficulty window (independent of BPS) - pub const DIFFICULTY_SAMPLED_WINDOW_SIZE: u64 = - (NEW_DIFFICULTY_WINDOW_DURATION + DIFFICULTY_WINDOW_SAMPLE_INTERVAL - 1) / DIFFICULTY_WINDOW_SAMPLE_INTERVAL; + pub const DIFFICULTY_SAMPLED_WINDOW_SIZE: u64 = NEW_DIFFICULTY_WINDOW_DURATION.div_ceil(DIFFICULTY_WINDOW_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~ Finality & Pruning ~~~~~~~~~~~~~~~~~~~ @@ -121,7 +120,7 @@ pub mod perf { const BASELINE_HEADER_DATA_CACHE_SIZE: usize = 10_000; const BASELINE_BLOCK_DATA_CACHE_SIZE: usize = 200; - const BASELINE_BLOCK_WINDOW_CACHE_SIZE: usize = 2000; + const BASELINE_BLOCK_WINDOW_CACHE_SIZE: usize = 2_000; const BASELINE_UTXOSET_CACHE_SIZE: usize = 10_000; #[derive(Clone, Debug)] diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index f3479b4c2..e5da18c25 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -15,6 +15,33 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct ForkActivation(u64); + +impl ForkActivation { + pub const fn new(daa_score: u64) -> Self { + Self(daa_score) + } + + pub const fn never() -> Self { + Self(u64::MAX) + } + + pub const fn always() -> Self { + Self(0) + } + + pub fn is_active(self, current_daa_score: u64) -> bool { + current_daa_score >= self.0 + } + + /// Checks if the fork was "recently" activated, i.e., in the time frame of the provided range. + /// This function returns false for forks that were always active, since they were never activated. + pub fn is_within_range_from_activation(self, current_daa_score: u64, range: u64) -> bool { + self != Self::always() && self.is_active(current_daa_score) && current_daa_score < self.0 + range + } +} + /// Consensus parameters. Contains settings and configurations which are consensus-sensitive. /// Changing one of these on a network node would exclude and prevent it from reaching consensus /// with the other unmodified nodes. @@ -41,7 +68,7 @@ pub struct Params { pub target_time_per_block: u64, /// DAA score from which the window sampling starts for difficulty and past median time calculation - pub sampling_activation_daa_score: u64, + pub sampling_activation: ForkActivation, /// Defines the highest allowed proof of work difficulty value for a block as a [`Uint256`] pub max_difficulty_target: Uint256, @@ -81,7 +108,19 @@ pub struct Params { pub storage_mass_parameter: u64, /// DAA score from which storage mass calculation and transaction mass field are activated as a consensus rule - pub storage_mass_activation_daa_score: u64, + pub storage_mass_activation: ForkActivation, + + /// DAA score from which tx engine: + /// 1. Supports 8-byte integer arithmetic operations (previously limited to 4 bytes) + /// 2. Supports transaction introspection opcodes: + /// - OpTxInputCount (0xb3): Get number of inputs + /// - OpTxOutputCount (0xb4): Get number of outputs + /// - OpTxInputIndex (0xb9): Get current input index + /// - OpTxInputAmount (0xbe): Get input amount + /// - OpTxInputSpk (0xbf): Get input script public key + /// - OpTxOutputAmount (0xc2): Get output amount + /// - OpTxOutputSpk (0xc3): Get output script public key + pub kip10_activation: ForkActivation, /// DAA score after which the pre-deflationary period switches to the deflationary period pub deflationary_phase_daa_score: u64, @@ -91,6 +130,9 @@ pub struct Params { pub skip_proof_of_work: bool, pub max_block_level: BlockLevel, pub pruning_proof_m: u64, + + /// Activation rules for when to enable using the payload field in transactions + pub payload_activation: ForkActivation, } fn unix_now() -> u64 { @@ -117,10 +159,10 @@ impl Params { #[inline] #[must_use] pub fn past_median_time_window_size(&self, selected_parent_daa_score: u64) -> usize { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_past_median_time_window_size() - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.sampled_past_median_time_window_size() + } else { + self.legacy_past_median_time_window_size() } } @@ -129,10 +171,10 @@ impl Params { #[inline] #[must_use] pub fn timestamp_deviation_tolerance(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_timestamp_deviation_tolerance - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.new_timestamp_deviation_tolerance + } else { + self.legacy_timestamp_deviation_tolerance } } @@ -141,10 +183,10 @@ impl Params { #[inline] #[must_use] pub fn past_median_time_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - 1 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.past_median_time_sample_rate + } else { + 1 } } @@ -153,10 +195,10 @@ impl Params { #[inline] #[must_use] pub fn difficulty_window_size(&self, selected_parent_daa_score: u64) -> usize { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_difficulty_window_size - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.sampled_difficulty_window_size + } else { + self.legacy_difficulty_window_size } } @@ -165,10 +207,10 @@ impl Params { #[inline] #[must_use] pub fn difficulty_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - 1 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.difficulty_sample_rate + } else { + 1 } } @@ -188,18 +230,18 @@ impl Params { } pub fn daa_window_duration_in_blocks(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.legacy_difficulty_window_size as u64 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 + } else { + self.legacy_difficulty_window_size as u64 } } fn expected_daa_window_duration_in_milliseconds(&self, selected_parent_daa_score: u64) -> u64 { - if selected_parent_daa_score < self.sampling_activation_daa_score { - self.target_time_per_block * self.legacy_difficulty_window_size as u64 - } else { + if self.sampling_activation.is_active(selected_parent_daa_score) { self.target_time_per_block * self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 + } else { + self.target_time_per_block * self.legacy_difficulty_window_size as u64 } } @@ -322,7 +364,7 @@ pub const MAINNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -352,7 +394,8 @@ pub const MAINNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -366,6 +409,8 @@ pub const MAINNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 225, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; pub const TESTNET_PARAMS: Params = Params { @@ -385,7 +430,7 @@ pub const TESTNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -415,8 +460,8 @@ pub const TESTNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, - + storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: // We define a year as 365.25 days @@ -429,6 +474,8 @@ pub const TESTNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; pub const TESTNET11_PARAMS: Params = Params { @@ -447,7 +494,7 @@ pub const TESTNET11_PARAMS: Params = Params { legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation_daa_score: 0, // Sampling is activated from network inception + sampling_activation: ForkActivation::always(), // Sampling is activated from network inception max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, @@ -485,10 +532,13 @@ pub const TESTNET11_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: 0, + storage_mass_activation: ForkActivation::always(), + kip10_activation: ForkActivation::never(), skip_proof_of_work: false, max_block_level: 250, + + payload_activation: ForkActivation::never(), }; pub const SIMNET_PARAMS: Params = Params { @@ -498,7 +548,7 @@ pub const SIMNET_PARAMS: Params = Params { legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation_daa_score: 0, // Sampling is activated from network inception + sampling_activation: ForkActivation::always(), // Sampling is activated from network inception max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, @@ -538,10 +588,13 @@ pub const SIMNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: 0, + storage_mass_activation: ForkActivation::always(), + kip10_activation: ForkActivation::never(), skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, + + payload_activation: ForkActivation::never(), }; pub const DEVNET_PARAMS: Params = Params { @@ -554,7 +607,7 @@ pub const DEVNET_PARAMS: Params = Params { past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, target_time_per_block: 1000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), @@ -584,7 +637,8 @@ pub const DEVNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -598,4 +652,6 @@ pub const DEVNET_PARAMS: Params = Params { skip_proof_of_work: false, max_block_level: 250, pruning_proof_m: 1000, + + payload_activation: ForkActivation::never(), }; diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index f5c235476..132c6619f 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -64,8 +64,8 @@ pub enum RuleError { #[error("expected header blue work {0} but got {1}")] UnexpectedHeaderBlueWork(BlueWorkType, BlueWorkType), - #[error("block difficulty of {0} is not the expected value of {1}")] - UnexpectedDifficulty(u32, u32), + #[error("block {0} difficulty of {1} is not the expected value of {2}")] + UnexpectedDifficulty(Hash, u32, u32), #[error("block timestamp of {0} is not after expected {1}")] TimeTooOld(u64, u64), diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs index d61437f87..a9686e023 100644 --- a/consensus/core/src/errors/pruning.rs +++ b/consensus/core/src/errors/pruning.rs @@ -59,6 +59,9 @@ pub enum PruningImportError { #[error("process exit was initiated while validating pruning point proof")] PruningValidationInterrupted, + + #[error("block {0} at level {1} has invalid proof of work for level")] + ProofOfWorkFailed(Hash, BlockLevel), } pub type PruningImportResult = std::result::Result; diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index c1b6133e8..2c8006f75 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -1,9 +1,9 @@ +use arc_swap::ArcSwapOption; use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, TransactionSigningHashECDSA, ZERO_HASH}; +use std::cell::Cell; +use std::sync::Arc; -use crate::{ - subnets::SUBNETWORK_ID_NATIVE, - tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction}, -}; +use crate::tx::{ScriptPublicKey, Transaction, TransactionOutpoint, TransactionOutput, VerifiableTransaction}; use super::{sighash_type::SigHashType, HasherExtensions}; @@ -11,88 +11,190 @@ use super::{sighash_type::SigHashType, HasherExtensions}; /// the same for all transaction inputs. /// Reuse of such values prevents the quadratic hashing problem. #[derive(Default)] -pub struct SigHashReusedValues { - previous_outputs_hash: Option, - sequences_hash: Option, - sig_op_counts_hash: Option, - outputs_hash: Option, +pub struct SigHashReusedValuesUnsync { + previous_outputs_hash: Cell>, + sequences_hash: Cell>, + sig_op_counts_hash: Cell>, + outputs_hash: Cell>, + payload_hash: Cell>, } -impl SigHashReusedValues { +impl SigHashReusedValuesUnsync { pub fn new() -> Self { - Self { previous_outputs_hash: None, sequences_hash: None, sig_op_counts_hash: None, outputs_hash: None } + Self::default() } } -pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +#[derive(Default)] +pub struct SigHashReusedValuesSync { + previous_outputs_hash: ArcSwapOption, + sequences_hash: ArcSwapOption, + sig_op_counts_hash: ArcSwapOption, + outputs_hash: ArcSwapOption, + payload_hash: ArcSwapOption, +} + +impl SigHashReusedValuesSync { + pub fn new() -> Self { + Self::default() + } +} + +pub trait SigHashReusedValues { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash; +} + +impl SigHashReusedValues for SigHashReusedValuesUnsync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.previous_outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.previous_outputs_hash.set(Some(hash)); + hash + }) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sequences_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sequences_hash.set(Some(hash)); + hash + }) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sig_op_counts_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sig_op_counts_hash.set(Some(hash)); + hash + }) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.outputs_hash.set(Some(hash)); + hash + }) + } + + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.payload_hash.get().unwrap_or_else(|| { + let hash = set(); + self.payload_hash.set(Some(hash)); + hash + }) + } +} + +impl SigHashReusedValues for SigHashReusedValuesSync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.previous_outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.previous_outputs_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sequences_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sequences_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sig_op_counts_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sig_op_counts_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.outputs_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn payload_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.payload_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.payload_hash.rcu(|_| Arc::new(hash)); + hash + } +} + +pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - - if let Some(previous_outputs_hash) = reused_values.previous_outputs_hash { - previous_outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.update(input.previous_outpoint.transaction_id.as_bytes()); hasher.write_u32(input.previous_outpoint.index); } - let previous_outputs_hash = hasher.finalize(); - reused_values.previous_outputs_hash = Some(previous_outputs_hash); - previous_outputs_hash - } + hasher.finalize() + }; + reused_values.previous_outputs_hash(hash) } -pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_single() || hash_type.is_sighash_anyone_can_pay() || hash_type.is_sighash_none() { return ZERO_HASH; } - - if let Some(sequences_hash) = reused_values.sequences_hash { - sequences_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u64(input.sequence); } - let sequence_hash = hasher.finalize(); - reused_values.sequences_hash = Some(sequence_hash); - sequence_hash - } + hasher.finalize() + }; + reused_values.sequences_hash(hash) } -pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - if let Some(sig_op_counts_hash) = reused_values.sig_op_counts_hash { - sig_op_counts_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u8(input.sig_op_count); } - let sig_op_counts_hash = hasher.finalize(); - reused_values.sig_op_counts_hash = Some(sig_op_counts_hash); - sig_op_counts_hash - } + hasher.finalize() + }; + reused_values.sig_op_counts_hash(hash) } -pub fn payload_hash(tx: &Transaction) -> Hash { - if tx.subnetwork_id == SUBNETWORK_ID_NATIVE { +pub fn payload_hash(tx: &Transaction, reused_values: &impl SigHashReusedValues) -> Hash { + if tx.subnetwork_id.is_native() && tx.payload.is_empty() { return ZERO_HASH; } - // TODO: Right now this branch will never be executed, since payload is disabled - // for all non coinbase transactions. Once payload is enabled, the payload hash - // should be cached to make it cost O(1) instead of O(tx.inputs.len()). - let mut hasher = TransactionSigningHash::new(); - hasher.write_var_bytes(&tx.payload); - hasher.finalize() + let hash = || { + let mut hasher = TransactionSigningHash::new(); + hasher.write_var_bytes(&tx.payload); + hasher.finalize() + }; + reused_values.payload_hash(hash) } -pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues, input_index: usize) -> Hash { +pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash { if hash_type.is_sighash_none() { return ZERO_HASH; } @@ -107,19 +209,15 @@ pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mu hash_output(&mut hasher, &tx.outputs[input_index]); return hasher.finalize(); } - - // Otherwise, return hash of all outputs. Re-use hash if available. - if let Some(outputs_hash) = reused_values.outputs_hash { - outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for output in tx.outputs.iter() { hash_output(&mut hasher, output); } - let outputs_hash = hasher.finalize(); - reused_values.outputs_hash = Some(outputs_hash); - outputs_hash - } + hasher.finalize() + }; + // Otherwise, return hash of all outputs. Re-use hash if available. + reused_values.outputs_hash(hash) } pub fn hash_outpoint(hasher: &mut impl Hasher, outpoint: TransactionOutpoint) { @@ -141,7 +239,7 @@ pub fn calc_schnorr_signature_hash( verifiable_tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let input = verifiable_tx.populated_input(input_index); let tx = verifiable_tx.tx(); @@ -161,7 +259,7 @@ pub fn calc_schnorr_signature_hash( .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) - .update(payload_hash(tx)) + .update(payload_hash(tx, reused_values)) .write_u8(hash_type.to_u8()); hasher.finalize() } @@ -170,7 +268,7 @@ pub fn calc_ecdsa_signature_hash( tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, reused_values); let mut hasher = TransactionSigningHashECDSA::new(); @@ -186,7 +284,7 @@ mod tests { use crate::{ hashing::sighash_type::{SIG_HASH_ALL, SIG_HASH_ANY_ONE_CAN_PAY, SIG_HASH_NONE, SIG_HASH_SINGLE}, - subnets::SubnetworkId, + subnets::{SubnetworkId, SUBNETWORK_ID_NATIVE}, tx::{PopulatedTransaction, Transaction, TransactionId, TransactionInput, UtxoEntry}, }; @@ -509,6 +607,14 @@ mod tests { action: ModifyAction::NoAction, expected_hash: "846689131fb08b77f83af1d3901076732ef09d3f8fdff945be89aa4300562e5f", // should change the hash }, + TestVector { + name: "native-all-0-modify-payload", + populated_tx: &native_populated_tx, + hash_type: SIG_HASH_ALL, + input_index: 0, + action: ModifyAction::Payload, + expected_hash: "72ea6c2871e0f44499f1c2b556f265d9424bfea67cca9cb343b4b040ead65525", // should change the hash + }, // subnetwork transaction TestVector { name: "subnetwork-all-0", @@ -573,9 +679,9 @@ mod tests { } } let populated_tx = PopulatedTransaction::new(&tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); assert_eq!( - calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &mut reused_values).to_string(), + calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &reused_values).to_string(), test.expected_hash, "test {} failed", test.name diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs index 019f2a8f5..9216a1c16 100644 --- a/consensus/core/src/hashing/tx.rs +++ b/consensus/core/src/hashing/tx.rs @@ -157,6 +157,13 @@ mod tests { expected_hash: "31da267d5c34f0740c77b8c9ebde0845a01179ec68074578227b804bac306361", }); + // Test #8, same as 7 but with a non-zero payload. The test checks id and hash are affected by payload change + tests.push(Test { + tx: Transaction::new(2, inputs.clone(), outputs.clone(), 54, subnets::SUBNETWORK_ID_REGISTRY, 3, vec![1, 2, 3]), + expected_id: "1f18b18ab004ff1b44dd915554b486d64d7ebc02c054e867cc44e3d746e80b3b", + expected_hash: "a2029ebd66d29d41aa7b0c40230c1bfa7fe8e026fb44b7815dda4e991b9a5fad", + }); + for (i, test) in tests.iter().enumerate() { assert_eq!(test.tx.id(), Hash::from_str(test.expected_id).unwrap(), "transaction id failed for test {}", i + 1); assert_eq!( diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 188b2403b..e4591f218 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -41,6 +41,10 @@ pub mod utxo; /// overall blocks, so 2^192 is definitely a justified upper-bound. pub type BlueWorkType = kaspa_math::Uint192; +/// The extends directly from the expectation above about having no more than +/// 2^128 work in a single block +pub const MAX_WORK_LEVEL: BlockLevel = 128; + /// The type used to represent the GHOSTDAG K parameter pub type KType = u16; diff --git a/consensus/core/src/muhash.rs b/consensus/core/src/muhash.rs index 3782f855c..0286596eb 100644 --- a/consensus/core/src/muhash.rs +++ b/consensus/core/src/muhash.rs @@ -8,6 +8,8 @@ use kaspa_muhash::MuHash; pub trait MuHashExtensions { fn add_transaction(&mut self, tx: &impl VerifiableTransaction, block_daa_score: u64); fn add_utxo(&mut self, outpoint: &TransactionOutpoint, entry: &UtxoEntry); + fn from_transaction(tx: &impl VerifiableTransaction, block_daa_score: u64) -> Self; + fn from_utxo(outpoint: &TransactionOutpoint, entry: &UtxoEntry) -> Self; } impl MuHashExtensions for MuHash { @@ -30,6 +32,18 @@ impl MuHashExtensions for MuHash { write_utxo(&mut writer, entry, outpoint); writer.finalize(); } + + fn from_transaction(tx: &impl VerifiableTransaction, block_daa_score: u64) -> Self { + let mut mh = Self::new(); + mh.add_transaction(tx, block_daa_score); + mh + } + + fn from_utxo(outpoint: &TransactionOutpoint, entry: &UtxoEntry) -> Self { + let mut mh = Self::new(); + mh.add_utxo(outpoint, entry); + mh + } } fn write_utxo(writer: &mut impl HasherBase, entry: &UtxoEntry, outpoint: &TransactionOutpoint) { diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index 18e52eacb..2f81444b3 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -344,7 +344,7 @@ impl Serialize for NetworkId { struct NetworkIdVisitor; -impl<'de> de::Visitor<'de> for NetworkIdVisitor { +impl de::Visitor<'_> for NetworkIdVisitor { type Value = NetworkId; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -413,7 +413,7 @@ impl TryFrom for NetworkId { impl TryCastFromJs for NetworkId { type Error = NetworkIdError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/core/src/pruning.rs b/consensus/core/src/pruning.rs index d99890389..aa029b40e 100644 --- a/consensus/core/src/pruning.rs +++ b/consensus/core/src/pruning.rs @@ -1,6 +1,7 @@ use crate::{ header::Header, trusted::{TrustedGhostdagData, TrustedHeader}, + BlueWorkType, }; use kaspa_hashes::Hash; use std::sync::Arc; @@ -19,3 +20,15 @@ pub struct PruningPointTrustedData { /// Union of GHOSTDAG data required to verify blocks in the future of the pruning point pub ghostdag_blocks: Vec, } + +#[derive(Clone, Copy)] +pub struct PruningProofMetadata { + /// The claimed work of the initial relay block (from the prover) + pub relay_block_blue_work: BlueWorkType, +} + +impl PruningProofMetadata { + pub fn new(relay_block_blue_work: BlueWorkType) -> Self { + Self { relay_block_blue_work } + } +} diff --git a/consensus/core/src/sign.rs b/consensus/core/src/sign.rs index a40b949e3..1a87d03f1 100644 --- a/consensus/core/src/sign.rs +++ b/consensus/core/src/sign.rs @@ -1,6 +1,6 @@ use crate::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::{SigHashType, SIG_HASH_ALL}, }, tx::{SignableTransaction, VerifiableTransaction}, @@ -84,9 +84,9 @@ pub fn sign(mut signable_tx: SignableTransaction, schnorr_key: secp256k1::Keypai signable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..signable_tx.tx.inputs.len() { - let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -106,11 +106,11 @@ pub fn sign_with_multiple(mut mutable_tx: SignableTransaction, privkeys: Vec<[u8 mutable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -132,12 +132,12 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -155,9 +155,9 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u /// Sign a transaction input with a sighash_type using schnorr pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Vec { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); - let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &mut reused_values); + let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); @@ -167,7 +167,7 @@ pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_k } pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { if input.signature_script.is_empty() { return Err(Error::Message(format!("Signature is empty for input: {i}"))); @@ -175,7 +175,7 @@ pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { let pk = &entry.script_public_key.script()[1..33]; let pk = secp256k1::XOnlyPublicKey::from_slice(pk)?; let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65])?; - let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; sig.verify(&msg, &pk)?; } diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index a4dd7dd45..769d29452 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -293,6 +293,8 @@ pub trait VerifiableTransaction { fn id(&self) -> TransactionId { self.tx().id() } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry>; } /// A custom iterator written only so that `populated_inputs` has a known return type and can de defined on the trait level @@ -319,7 +321,7 @@ impl<'a, T: VerifiableTransaction> Iterator for PopulatedInputIterator<'a, T> { } } -impl<'a, T: VerifiableTransaction> ExactSizeIterator for PopulatedInputIterator<'a, T> {} +impl ExactSizeIterator for PopulatedInputIterator<'_, T> {} /// Represents a read-only referenced transaction along with fully populated UTXO entry data pub struct PopulatedTransaction<'a> { @@ -334,7 +336,7 @@ impl<'a> PopulatedTransaction<'a> { } } -impl<'a> VerifiableTransaction for PopulatedTransaction<'a> { +impl VerifiableTransaction for PopulatedTransaction<'_> { fn tx(&self) -> &Transaction { self.tx } @@ -342,6 +344,10 @@ impl<'a> VerifiableTransaction for PopulatedTransaction<'a> { fn populated_input(&self, index: usize) -> (&TransactionInput, &UtxoEntry) { (&self.tx.inputs[index], &self.entries[index]) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.entries.get(index) + } } /// Represents a validated transaction with populated UTXO entry data and a calculated fee @@ -362,7 +368,7 @@ impl<'a> ValidatedTransaction<'a> { } } -impl<'a> VerifiableTransaction for ValidatedTransaction<'a> { +impl VerifiableTransaction for ValidatedTransaction<'_> { fn tx(&self) -> &Transaction { self.tx } @@ -370,6 +376,10 @@ impl<'a> VerifiableTransaction for ValidatedTransaction<'a> { fn populated_input(&self, index: usize) -> (&TransactionInput, &UtxoEntry) { (&self.tx.inputs[index], &self.entries[index]) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.entries.get(index) + } } impl AsRef for Transaction { @@ -507,6 +517,10 @@ impl> VerifiableTransaction for MutableTransactionVerifiab self.inner.entries[index].as_ref().expect("expected to be called only following full UTXO population"), ) } + + fn utxo(&self, index: usize) -> Option<&UtxoEntry> { + self.inner.entries.get(index).and_then(Option::as_ref) + } } /// Specialized impl for `T=Arc` diff --git a/consensus/core/src/tx/script_public_key.rs b/consensus/core/src/tx/script_public_key.rs index dfed2ab5c..b0a475606 100644 --- a/consensus/core/src/tx/script_public_key.rs +++ b/consensus/core/src/tx/script_public_key.rs @@ -94,7 +94,7 @@ impl Serialize for ScriptPublicKey { } } -impl<'de: 'a, 'a> Deserialize<'de> for ScriptPublicKey { +impl<'de> Deserialize<'de> for ScriptPublicKey { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -374,7 +374,7 @@ impl BorshDeserialize for ScriptPublicKey { type CastError = workflow_wasm::error::Error; impl TryCastFromJs for ScriptPublicKey { type Error = workflow_wasm::error::Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/consensus/pow/src/lib.rs b/consensus/pow/src/lib.rs index c3fbdd867..1eaa1a2ce 100644 --- a/consensus/pow/src/lib.rs +++ b/consensus/pow/src/lib.rs @@ -54,12 +54,22 @@ impl State { } pub fn calc_block_level(header: &Header, max_block_level: BlockLevel) -> BlockLevel { + let (block_level, _) = calc_block_level_check_pow(header, max_block_level); + block_level +} + +pub fn calc_block_level_check_pow(header: &Header, max_block_level: BlockLevel) -> (BlockLevel, bool) { if header.parents_by_level.is_empty() { - return max_block_level; // Genesis has the max block level + return (max_block_level, true); // Genesis has the max block level } let state = State::new(header); - let (_, pow) = state.check_pow(header.nonce); + let (passed, pow) = state.check_pow(header.nonce); + let block_level = calc_level_from_pow(pow, max_block_level); + (block_level, passed) +} + +pub fn calc_level_from_pow(pow: Uint256, max_block_level: BlockLevel) -> BlockLevel { let signed_block_level = max_block_level as i64 - pow.bits() as i64; max(signed_block_level, 0) as BlockLevel } diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f8af5fb5a 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 1731729a3..99719d4ac 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -64,7 +64,7 @@ use kaspa_consensus_core::{ merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, @@ -81,6 +81,7 @@ use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_txscript::caches::TxScriptCacheCounters; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::{ cmp::Reverse, @@ -240,23 +241,13 @@ impl Consensus { body_receiver, virtual_sender, block_processors_pool, + params, db.clone(), - storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), - storage.headers_store.clone(), - storage.block_transactions_store.clone(), - storage.body_tips_store.clone(), - services.reachability_service.clone(), - services.coinbase_manager.clone(), - services.mass_calculator.clone(), - services.transaction_validator.clone(), - services.window_manager.clone(), - params.max_block_mass, - params.genesis.clone(), + &storage, + &services, pruning_lock.clone(), notification_root.clone(), counters.clone(), - params.storage_mass_activation_daa_score, )); let virtual_processor = Arc::new(VirtualStateProcessor::new( @@ -499,7 +490,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -532,7 +523,7 @@ impl ConsensusApi for Consensus { for child in initial_children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -559,7 +550,7 @@ impl ConsensusApi for Consensus { for child in children { if visited.insert(child) { - let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + let blue_work = self.ghostdag_store.get_blue_work(child).unwrap(); heap.push(Reverse(SortableBlock::new(child, blue_work))); } } @@ -752,12 +743,16 @@ impl ConsensusApi for Consensus { } fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { - let storage_mass_activated = pov_daa_score > self.config.storage_mass_activation_daa_score; + let storage_mass_activated = self.config.storage_mass_activation.is_active(pov_daa_score); calc_hash_merkle_root(txs.iter(), storage_mass_activated) } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> { - self.services.pruning_proof_manager.validate_pruning_point_proof(proof) + fn validate_pruning_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> Result<(), PruningImportError> { + self.services.pruning_proof_manager.validate_pruning_point_proof(proof, proof_metadata) } fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { @@ -771,9 +766,16 @@ impl ConsensusApi for Consensus { fn append_imported_pruning_point_utxos(&self, utxoset_chunk: &[(TransactionOutpoint, UtxoEntry)], current_multiset: &mut MuHash) { let mut pruning_utxoset_write = self.pruning_utxoset_stores.write(); pruning_utxoset_write.utxo_set.write_many(utxoset_chunk).unwrap(); - for (outpoint, entry) in utxoset_chunk { - current_multiset.add_utxo(outpoint, entry); - } + + // Parallelize processing using the context of an existing thread pool. + let inner_multiset = self.virtual_processor.install(|| { + utxoset_chunk.par_iter().map(|(outpoint, entry)| MuHash::from_utxo(outpoint, entry)).reduce(MuHash::new, |mut a, b| { + a.combine(&b); + a + }) + }); + + current_multiset.combine(&inner_multiset); } fn import_pruning_point_utxo_set(&self, new_pruning_point: Hash, imported_utxo_multiset: MuHash) -> PruningImportResult<()> { @@ -902,7 +904,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -978,7 +980,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::DifficultyWindow) .unwrap() .deref() .iter() @@ -1017,7 +1019,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 38e283a14..06abb4e0b 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,8 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -82,20 +81,20 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), storage.block_window_cache_for_past_median_time.clone(), params.max_difficulty_target, params.target_time_per_block, - params.sampling_activation_daa_score, + params.sampling_activation, params.legacy_difficulty_window_size, params.sampled_difficulty_window_size, params.min_difficulty_window_len, @@ -110,27 +109,16 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - ) - }) - .collect_vec(), + let ghostdag_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -157,7 +145,9 @@ impl ConsensusServices { params.coinbase_maturity, tx_script_cache_counters, mass_calculator.clone(), - params.storage_mass_activation_daa_score, + params.storage_mass_activation, + params.kip10_activation, + params.payload_activation, ); let pruning_point_manager = PruningPointManager::new( @@ -165,7 +155,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -184,7 +174,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -199,7 +189,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -213,8 +203,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 89a0f5e26..ad3b95d1b 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,8 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,8 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index a705d9ecc..87790d093 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -13,11 +13,8 @@ use kaspa_hashes::Hash; use kaspa_notify::subscription::context::SubscriptionContext; use parking_lot::RwLock; -use kaspa_database::create_temp_db; -use kaspa_database::prelude::ConnBuilder; -use std::future::Future; -use std::{sync::Arc, thread::JoinHandle}; - +use super::services::{DbDagTraversalManager, DbGhostdagManager, DbWindowManager}; +use super::Consensus; use crate::pipeline::virtual_processor::test_block_builder::TestBlockBuilder; use crate::processes::window::WindowManager; use crate::{ @@ -35,9 +32,10 @@ use crate::{ pipeline::{body_processor::BlockBodyProcessor, virtual_processor::VirtualStateProcessor, ProcessingCounters}, test_helpers::header_from_precomputed_hash, }; - -use super::services::{DbDagTraversalManager, DbGhostdagManager, DbWindowManager}; -use super::Consensus; +use kaspa_database::create_temp_db; +use kaspa_database::prelude::ConnBuilder; +use std::future::Future; +use std::{sync::Arc, thread::JoinHandle}; pub struct TestConsensus { params: Params, @@ -118,7 +116,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -138,6 +136,12 @@ impl TestConsensus { self.validate_and_insert_block(self.build_block_with_parents(hash, parents).to_immutable()).virtual_state_task } + /// Adds a valid block with the given transactions and parents to the consensus. + /// + /// # Panics + /// + /// Panics if block builder validation rules are violated. + /// See `kaspa_consensus_core::errors::block::RuleError` for the complete list of possible validation rules. pub fn add_utxo_valid_block_with_parents( &self, hash: Hash, @@ -149,6 +153,12 @@ impl TestConsensus { .virtual_state_task } + /// Builds a valid block with the given transactions, parents, and miner data. + /// + /// # Panics + /// + /// Panics if block builder validation rules are violated. + /// See `kaspa_consensus_core::errors::block::RuleError` for the complete list of possible validation rules. pub fn build_utxo_valid_block_with_parents( &self, hash: Hash, @@ -201,7 +211,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +243,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/model/services/reachability.rs b/consensus/src/model/services/reachability.rs index 39f5ceba2..a3aa83c7a 100644 --- a/consensus/src/model/services/reachability.rs +++ b/consensus/src/model/services/reachability.rs @@ -9,14 +9,35 @@ use crate::processes::reachability::{inquirer, Result}; use kaspa_hashes::Hash; pub trait ReachabilityService { + /// Checks if `this` block is a chain ancestor of `queried` block (i.e., `this ∈ chain(queried) ∪ {queried}`). + /// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + + /// Result version of [`is_dag_ancestor_of`] (avoids unwrapping internally) fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result; + + /// Returns true if `this` is a DAG ancestor of `queried` (i.e., `queried ∈ future(this) ∪ {this}`). + /// Note: this method will return true if `this == queried`. + /// The complexity of this method is `O(log(|future_covering_set(this)|))` fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; + + /// Checks if `this` is DAG ancestor of any of the blocks in `queried`. See [`is_dag_ancestor_of`] as well. fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator) -> bool; + + /// Checks if any of the blocks in `list` is DAG ancestor of `queried`. See [`is_dag_ancestor_of`] as well. fn is_any_dag_ancestor(&self, list: &mut impl Iterator, queried: Hash) -> bool; + + /// Result version of [`is_any_dag_ancestor`] (avoids unwrapping internally) fn is_any_dag_ancestor_result(&self, list: &mut impl Iterator, queried: Hash) -> Result; + + /// Finds the tree child of `ancestor` which is also a chain ancestor of `descendant`. + /// (A "tree child of X" is a block which X is its chain parent) fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; + + /// Returns the chain parent of `this` fn get_chain_parent(&self, this: Hash) -> Hash; + + /// Checks whether `this` has reachability data fn has_reachability_data(&self, this: Hash) -> bool; } @@ -154,7 +175,6 @@ impl MTReachabilityService { /// a compromise where the lock is released every constant number of items. /// /// TODO: decide if these alternatives require overall system benchmarking - struct BackwardChainIterator { store: Arc>, current: Option, diff --git a/consensus/src/model/stores/block_window_cache.rs b/consensus/src/model/stores/block_window_cache.rs index 5fee0e1f8..2088cd2d1 100644 --- a/consensus/src/model/stores/block_window_cache.rs +++ b/consensus/src/model/stores/block_window_cache.rs @@ -1,6 +1,6 @@ use crate::processes::ghostdag::ordering::SortableBlock; use kaspa_consensus_core::BlockHasher; -use kaspa_database::prelude::Cache; +use kaspa_database::prelude::{Cache, CachePolicy}; use kaspa_hashes::Hash; use kaspa_utils::mem_size::MemSizeEstimator; use std::{ @@ -10,7 +10,7 @@ use std::{ sync::Arc, }; -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum WindowOrigin { Full, Sampled, @@ -54,16 +54,46 @@ impl DerefMut for BlockWindowHeap { } } +/// A newtype wrapper over `[Cache]` meant to prevent erroneous reads of windows from different origins +#[derive(Clone)] +pub struct BlockWindowCacheStore { + inner: Cache, BlockHasher>, +} + +impl BlockWindowCacheStore { + pub fn new(policy: CachePolicy) -> Self { + Self { inner: Cache::new(policy) } + } + + pub fn contains_key(&self, key: &Hash) -> bool { + self.inner.contains_key(key) + } + + pub fn remove(&self, key: &Hash) -> Option> { + self.inner.remove(key) + } +} + /// Reader API for `BlockWindowCacheStore`. pub trait BlockWindowCacheReader { - fn get(&self, hash: &Hash) -> Option>; + /// Get the cache entry to this hash conditioned that *it matches the provided origin*. + /// We demand the origin to be provided in order to prevent reader errors. + fn get(&self, hash: &Hash, origin: WindowOrigin) -> Option>; } -pub type BlockWindowCacheStore = Cache, BlockHasher>; - impl BlockWindowCacheReader for BlockWindowCacheStore { #[inline(always)] - fn get(&self, hash: &Hash) -> Option> { - self.get(hash) + fn get(&self, hash: &Hash, origin: WindowOrigin) -> Option> { + self.inner.get(hash).and_then(|win| if win.origin() == origin { Some(win) } else { None }) + } +} + +pub trait BlockWindowCacheWriter { + fn insert(&self, hash: Hash, window: Arc); +} + +impl BlockWindowCacheWriter for BlockWindowCacheStore { + fn insert(&self, hash: Hash, window: Arc) { + self.inner.insert(hash, window); } } diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index bcf860b3a..f74fa125b 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -48,6 +48,7 @@ impl MemSizeEstimator for GhostdagData { impl MemSizeEstimator for CompactGhostdagData {} impl From<&GhostdagData> for CompactGhostdagData { + #[inline(always)] fn from(value: &GhostdagData) -> Self { Self { blue_score: value.blue_score, blue_work: value.blue_work, selected_parent: value.selected_parent } } @@ -115,7 +116,7 @@ impl GhostdagData { pub fn ascending_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.mergeset_blues .iter() .skip(1) // Skip the selected parent @@ -138,7 +139,7 @@ impl GhostdagData { pub fn descending_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.mergeset_blues .iter() .skip(1) // Skip the selected parent @@ -174,7 +175,7 @@ impl GhostdagData { pub fn consensus_ordered_mergeset<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { once(self.selected_parent).chain(self.ascending_mergeset_without_selected_parent(store).map(|s| s.hash)) } @@ -182,7 +183,7 @@ impl GhostdagData { pub fn consensus_ordered_mergeset_without_selected_parent<'a>( &'a self, store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { self.ascending_mergeset_without_selected_parent(store).map(|s| s.hash) } @@ -270,6 +271,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/model/stores/relations.rs b/consensus/src/model/stores/relations.rs index 4734f099a..2971a3a87 100644 --- a/consensus/src/model/stores/relations.rs +++ b/consensus/src/model/stores/relations.rs @@ -145,7 +145,7 @@ pub struct StagingRelationsStore<'a> { children_deletions: BlockHashMap, } -impl<'a> ChildrenStore for StagingRelationsStore<'a> { +impl ChildrenStore for StagingRelationsStore<'_> { fn insert_child(&mut self, _writer: impl DbWriter, parent: Hash, child: Hash) -> Result<(), StoreError> { self.check_not_in_entry_deletions(parent)?; self.check_not_in_children_deletions(parent, child)?; // We expect deletion to be permanent diff --git a/consensus/src/model/stores/utxo_diffs.rs b/consensus/src/model/stores/utxo_diffs.rs index 079f08ecb..20ddd9b10 100644 --- a/consensus/src/model/stores/utxo_diffs.rs +++ b/consensus/src/model/stores/utxo_diffs.rs @@ -14,7 +14,6 @@ use rocksdb::WriteBatch; /// blocks. However, once the diff is computed, it is permanent. This store has a relation to /// block status, such that if a block has status `StatusUTXOValid` then it is expected to have /// utxo diff data as well as utxo multiset data and acceptance data. - pub trait UtxoDiffsStoreReader { fn get(&self, hash: Hash) -> Result, StoreError>; } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index b03643df8..08eb49f63 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -1,13 +1,19 @@ use super::BlockBodyProcessor; use crate::{ errors::{BlockProcessResult, RuleError}, - model::stores::{ghostdag::GhostdagStoreReader, statuses::StatusesStoreReader}, - processes::window::WindowManager, + model::stores::statuses::StatusesStoreReader, + processes::{ + transaction_validator::{ + tx_validation_in_header_context::{LockTimeArg, LockTimeType}, + TransactionValidator, + }, + window::WindowManager, + }, }; use kaspa_consensus_core::block::Block; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; -use kaspa_utils::option::OptionExtensions; +use once_cell::unsync::Lazy; use std::sync::Arc; impl BlockBodyProcessor { @@ -18,13 +24,20 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + // Use lazy evaluation to avoid unnecessary work, as most of the time we expect the txs not to have lock time. + let lazy_pmt_res = Lazy::new(|| self.window_manager.calc_past_median_time_for_known_hash(block.hash())); + for tx in block.transactions.iter() { - if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { + let lock_time_arg = match TransactionValidator::get_lock_time_type(tx) { + LockTimeType::Finalized => LockTimeArg::Finalized, + LockTimeType::DaaScore => LockTimeArg::DaaScore(block.header.daa_score), + // We only evaluate the pmt calculation when actually needed + LockTimeType::Time => LockTimeArg::MedianTime((*lazy_pmt_res).clone()?), + }; + if let Err(e) = self.transaction_validator.validate_tx_in_header_context(tx, block.header.daa_score, lock_time_arg) { return Err(RuleError::TxInContextFailed(tx.id(), e)); - } + }; } - Ok(()) } @@ -37,7 +50,7 @@ impl BlockBodyProcessor { .copied() .filter(|parent| { let status_option = statuses_read_guard.get(*parent).unwrap_option(); - status_option.is_none_or_ex(|s| !s.has_block_body()) + status_option.is_none_or(|s| !s.has_block_body()) }) .collect(); if !missing.is_empty() { diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index c413552b9..4c6139846 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -6,7 +6,7 @@ use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root, tx::Tran impl BlockBodyProcessor { pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { - let storage_mass_activated = block.header.daa_score > self.storage_mass_activation_daa_score; + let storage_mass_activated = self.storage_mass_activation.is_active(block.header.daa_score); Self::check_has_transactions(block)?; Self::check_hash_merkle_root(block, storage_mass_activated)?; diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 4191a01ce..7bad12ce3 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -1,5 +1,8 @@ use crate::{ - consensus::services::DbWindowManager, + consensus::{ + services::{ConsensusServices, DbWindowManager}, + storage::ConsensusStorage, + }, errors::{BlockProcessResult, RuleError}, model::{ services::reachability::MTReachabilityService, @@ -23,7 +26,10 @@ use crossbeam_channel::{Receiver, Sender}; use kaspa_consensus_core::{ block::Block, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::genesis::GenesisBlock, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, Params}, + }, mass::MassCalculator, tx::Transaction, }; @@ -81,57 +87,50 @@ pub struct BlockBodyProcessor { counters: Arc, /// Storage mass hardfork DAA score - pub(crate) storage_mass_activation_daa_score: u64, + pub(crate) storage_mass_activation: ForkActivation, } impl BlockBodyProcessor { - #[allow(clippy::too_many_arguments)] pub fn new( receiver: Receiver, sender: Sender, thread_pool: Arc, + params: &Params, db: Arc, - statuses_store: Arc>, - ghostdag_store: Arc, - headers_store: Arc, - block_transactions_store: Arc, - body_tips_store: Arc>, - - reachability_service: MTReachabilityService, - coinbase_manager: CoinbaseManager, - mass_calculator: MassCalculator, - transaction_validator: TransactionValidator, - window_manager: DbWindowManager, - max_block_mass: u64, - genesis: GenesisBlock, + storage: &Arc, + services: &Arc, + pruning_lock: SessionLock, notification_root: Arc, counters: Arc, - storage_mass_activation_daa_score: u64, ) -> Self { Self { receiver, sender, thread_pool, db, - statuses_store, - reachability_service, - ghostdag_store, - headers_store, - block_transactions_store, - body_tips_store, - coinbase_manager, - mass_calculator, - transaction_validator, - window_manager, - max_block_mass, - genesis, + + max_block_mass: params.max_block_mass, + genesis: params.genesis.clone(), + + statuses_store: storage.statuses_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), + headers_store: storage.headers_store.clone(), + block_transactions_store: storage.block_transactions_store.clone(), + body_tips_store: storage.body_tips_store.clone(), + + reachability_service: services.reachability_service.clone(), + coinbase_manager: services.coinbase_manager.clone(), + mass_calculator: services.mass_calculator.clone(), + transaction_validator: services.transaction_validator.clone(), + window_manager: services.window_manager.clone(), + pruning_lock, task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - storage_mass_activation_daa_score, + storage_mass_activation: params.storage_mass_activation, } } diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index 47094ed7f..cce641105 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -9,7 +9,7 @@ use kaspa_consensus_core::header::Header; use kaspa_consensus_core::BlockLevel; use kaspa_core::time::unix_now; use kaspa_database::prelude::StoreResultExtensions; -use std::cmp::max; +use kaspa_pow::calc_level_from_pow; impl HeaderProcessor { /// Validates the header in isolation including pow check against header declared bits. @@ -102,8 +102,7 @@ impl HeaderProcessor { let state = kaspa_pow::State::new(header); let (passed, pow) = state.check_pow(header.nonce); if passed || self.skip_proof_of_work { - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - Ok(max(signed_block_level, 0) as BlockLevel) + Ok(calc_level_from_pow(pow, self.max_block_level)) } else { Err(RuleError::InvalidPoW) } diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index a4dfb8b1e..7764e1c15 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -35,7 +35,7 @@ impl HeaderProcessor { ctx.mergeset_non_daa = Some(daa_window.mergeset_non_daa); if header.bits != expected_bits { - return Err(RuleError::UnexpectedDifficulty(header.bits, expected_bits)); + return Err(RuleError::UnexpectedDifficulty(header.hash, header.bits, expected_bits)); } ctx.block_window_for_difficulty = Some(daa_window.window); diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 6c93b91d9..f467b6d97 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -10,7 +10,7 @@ use crate::{ model::{ services::reachability::MTReachabilityService, stores::{ - block_window_cache::{BlockWindowCacheStore, BlockWindowHeap}, + block_window_cache::{BlockWindowCacheStore, BlockWindowCacheWriter, BlockWindowHeap}, daa::DbDaaStore, depth::DbDepthStore, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + self.ghostdag_data.as_ref().unwrap() } } @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -344,18 +344,14 @@ impl HeaderProcessor { .collect_vec() } - /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) + /// Runs the GHOSTDAG algorithm and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); - - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + let ghostdag_data = self + .ghostdag_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,10 +365,8 @@ impl HeaderProcessor { // // Append-only stores: these require no lock and hence done first in order to reduce locking time // + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap(); - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -393,8 +387,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_data.selected_parent; + let mut reachability_mergeset = ghostdag_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. @@ -448,10 +442,8 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + // This data might have been already written when applying the pruning proof. + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -491,8 +483,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 35dc211d5..2de19c265 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbParentsManager, DbPruningPointManager}, + services::{ConsensusServices, DbParentsManager, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, parents_manager: DbParentsManager, @@ -107,7 +106,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), parents_manager: services.parents_manager.clone(), @@ -284,7 +282,7 @@ impl PruningProcessor { let mut batch = WriteBatch::default(); // At this point keep_relations only holds level-0 relations which is the correct filtering criteria for primary GHOSTDAG for kept in keep_relations.keys().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains_key(&h)) { @@ -296,7 +294,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -444,7 +442,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[lower_level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[lower_level].delete_batch(&mut batch, current).unwrap_option(); + + if lower_level == 0 { + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + } } } else { // Count only blocks which get fully pruned including DAG relations @@ -463,9 +464,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); }); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 88fee97bf..a8e1f7f2f 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -16,6 +16,7 @@ use crate::{ stores::{ acceptance_data::{AcceptanceDataStoreReader, DbAcceptanceDataStore}, block_transactions::{BlockTransactionsStoreReader, DbBlockTransactionsStore}, + block_window_cache::{BlockWindowCacheStore, BlockWindowCacheWriter}, daa::DbDaaStore, depth::{DbDepthStore, DepthStoreReader}, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, @@ -42,7 +43,7 @@ use crate::{ processes::{ coinbase::CoinbaseManager, ghostdag::ordering::SortableBlock, - transaction_validator::{errors::TxResult, transaction_validator_populated::TxValidationFlags, TransactionValidator}, + transaction_validator::{errors::TxResult, tx_validation_in_utxo_context::TxValidationFlags, TransactionValidator}, window::WindowManager, }, }; @@ -52,7 +53,7 @@ use kaspa_consensus_core::{ block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, - config::genesis::GenesisBlock, + config::{genesis::GenesisBlock, params::ForkActivation}, header::Header, merkle::calc_hash_merkle_root, pruning::PruningPointsList, @@ -76,6 +77,7 @@ use kaspa_database::prelude::{StoreError, StoreResultEmptyTuple, StoreResultExte use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_notify::{events::EventType, notifier::Notify}; +use once_cell::unsync::Lazy; use super::errors::{PruningImportError, PruningImportResult}; use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; @@ -116,7 +118,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -149,6 +151,10 @@ pub struct VirtualStateProcessor { pub(super) parents_manager: DbParentsManager, pub(super) depth_manager: DbBlockDepthManager, + // block window caches + pub(super) block_window_cache_for_difficulty: Arc, + pub(super) block_window_cache_for_past_median_time: Arc, + // Pruning lock pruning_lock: SessionLock, @@ -159,7 +165,7 @@ pub struct VirtualStateProcessor { counters: Arc, // Storage mass hardfork DAA score - pub(crate) storage_mass_activation_daa_score: u64, + pub(crate) storage_mass_activation: ForkActivation, } impl VirtualStateProcessor { @@ -191,7 +197,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -206,7 +212,10 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), + block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), + + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -220,7 +229,7 @@ impl VirtualStateProcessor { pruning_lock, notification_root, counters, - storage_mass_activation_daa_score: params.storage_mass_activation_daa_score, + storage_mass_activation: params.storage_mass_activation, } } @@ -291,6 +300,10 @@ impl VirtualStateProcessor { let sink_multiset = self.utxo_multisets_store.get(new_sink).unwrap(); let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink, None); + let sink_ghostdag_data = Lazy::new(|| self.ghostdag_store.get_data(new_sink).unwrap()); + // Cache the DAA and Median time windows of the sink for future use, as well as prepare for virtual's window calculations + self.cache_sink_windows(new_sink, prev_sink, &sink_ghostdag_data); + let new_virtual_state = self .calculate_and_commit_virtual_state( virtual_read, @@ -302,12 +315,19 @@ impl VirtualStateProcessor { ) .expect("all possible rule errors are unexpected here"); + let compact_sink_ghostdag_data = if let Some(sink_ghostdag_data) = Lazy::get(&sink_ghostdag_data) { + // If we had to retrieve the full data, we convert it to compact + sink_ghostdag_data.to_compact() + } else { + // Else we query the compact data directly. + self.ghostdag_store.get_compact_data(new_sink).unwrap() + }; + // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); - self.pruning_sender.send(PruningProcessingMessage::Process { sink_ghostdag_data }).unwrap(); + self.pruning_sender.send(PruningProcessingMessage::Process { sink_ghostdag_data: compact_sink_ghostdag_data }).unwrap(); // Emit notifications let accumulated_diff = Arc::new(accumulated_diff); @@ -319,7 +339,7 @@ impl VirtualStateProcessor { .notify(Notification::UtxosChanged(UtxosChangedNotification::new(accumulated_diff, virtual_parents))) .expect("expecting an open unbounded channel"); self.notification_root - .notify(Notification::SinkBlueScoreChanged(SinkBlueScoreChangedNotification::new(sink_ghostdag_data.blue_score))) + .notify(Notification::SinkBlueScoreChanged(SinkBlueScoreChangedNotification::new(compact_sink_ghostdag_data.blue_score))) .expect("expecting an open unbounded channel"); self.notification_root .notify(Notification::VirtualDaaScoreChanged(VirtualDaaScoreChangedNotification::new(new_virtual_state.daa_score))) @@ -404,7 +424,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -540,6 +560,26 @@ impl VirtualStateProcessor { drop(selected_chain_write); } + /// Caches the DAA and Median time windows of the sink block (if needed). Following, virtual's window calculations will + /// naturally hit the cache finding the sink's windows and building upon them. + fn cache_sink_windows(&self, new_sink: Hash, prev_sink: Hash, sink_ghostdag_data: &impl Deref>) { + // We expect that the `new_sink` is cached (or some close-enough ancestor thereof) if it is equal to the `prev_sink`, + // Hence we short-circuit the check of the keys in such cases, thereby reducing the access of the read-lock + if new_sink != prev_sink { + // this is only important for ibd performance, as we incur expensive cache misses otherwise. + // this occurs because we cannot rely on header processing to pre-cache in this scenario. + if !self.block_window_cache_for_difficulty.contains_key(&new_sink) { + self.block_window_cache_for_difficulty + .insert(new_sink, self.window_manager.block_daa_window(sink_ghostdag_data.deref()).unwrap().window); + }; + + if !self.block_window_cache_for_past_median_time.contains_key(&new_sink) { + self.block_window_cache_for_past_median_time + .insert(new_sink, self.window_manager.calc_past_median_time(sink_ghostdag_data.deref()).unwrap().1); + }; + } + } + /// Returns the max number of tips to consider as virtual parents in a single virtual resolve operation. /// /// Guaranteed to be `>= self.max_block_parents` @@ -569,7 +609,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -591,7 +631,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -609,7 +649,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -767,7 +807,11 @@ impl VirtualStateProcessor { args: &TransactionValidationArgs, ) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; - self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; + self.transaction_validator.validate_tx_in_header_context_with_args( + &mutable_tx.tx, + virtual_daa_score, + virtual_past_median_time, + )?; self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score, args)?; Ok(()) } @@ -778,7 +822,10 @@ impl VirtualStateProcessor { let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + // Run within the thread pool since par_iter might be internally applied to inputs + self.thread_pool.install(|| { + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) + }) } pub fn validate_mempool_transactions_in_parallel( @@ -853,7 +900,11 @@ impl VirtualStateProcessor { // No need to validate the transaction in isolation since we rely on the mining manager to submit transactions // which were previously validated through `validate_mempool_transaction_and_populate`, hence we only perform // in-context validations - self.transaction_validator.utxo_free_tx_validation(tx, virtual_state.daa_score, virtual_state.past_median_time)?; + self.transaction_validator.validate_tx_in_header_context_with_args( + tx, + virtual_state.daa_score, + virtual_state.past_median_time, + )?; let ValidatedTransaction { calculated_fee, .. } = self.validate_transaction_in_utxo_context(tx, utxo_view, virtual_state.daa_score, TxValidationFlags::Full)?; Ok(calculated_fee) @@ -980,7 +1031,7 @@ impl VirtualStateProcessor { let parents_by_level = self.parents_manager.calc_block_parents(pruning_info.pruning_point, &virtual_state.parents); // Hash according to hardfork activation - let storage_mass_activated = virtual_state.daa_score > self.storage_mass_activation_daa_score; + let storage_mass_activated = self.storage_mass_activation.is_active(virtual_state.daa_score); let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); let accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(virtual_state.accepted_tx_ids.iter().copied()); @@ -1144,7 +1195,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), @@ -1159,6 +1210,15 @@ impl VirtualStateProcessor { true } } + + /// Executes `op` within the thread pool associated with this processor. + pub fn install(&self, op: OP) -> R + where + OP: FnOnce() -> R + Send, + R: Send, + { + self.thread_pool.install(op) + } } enum MergesetIncreaseResult { diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 306f81446..2e9c7ddb4 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -7,13 +7,14 @@ use crate::{ model::stores::{block_transactions::BlockTransactionsStoreReader, daa::DaaStoreReader, ghostdag::GhostdagData}, processes::transaction_validator::{ errors::{TxResult, TxRuleError}, - transaction_validator_populated::TxValidationFlags, + tx_validation_in_utxo_context::TxValidationFlags, }, }; use kaspa_consensus_core::{ acceptance_data::{AcceptedTxEntry, MergesetBlockAcceptanceData}, api::args::TransactionValidationArgs, coinbase::*, + config::params::ForkActivation, hashing, header::Header, mass::Kip9Version, @@ -31,6 +32,7 @@ use kaspa_muhash::MuHash; use kaspa_utils::refs::Refs; use rayon::prelude::*; +use smallvec::{smallvec, SmallVec}; use std::{iter::once, ops::Deref}; /// A context for processing the UTXO state of a block with respect to its selected parent. @@ -81,7 +83,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() @@ -95,12 +97,14 @@ impl VirtualStateProcessor { // No need to fully validate selected parent transactions since selected parent txs were already validated // as part of selected parent UTXO state verification with the exact same UTXO context. let validation_flags = if is_selected_parent { TxValidationFlags::SkipScriptChecks } else { TxValidationFlags::Full }; - let validated_transactions = self.validate_transactions_in_parallel(&txs, &composed_view, pov_daa_score, validation_flags); + let (validated_transactions, inner_multiset) = + self.validate_transactions_with_muhash_in_parallel(&txs, &composed_view, pov_daa_score, validation_flags); + + ctx.multiset_hash.combine(&inner_multiset); let mut block_fee = 0u64; for (validated_tx, _) in validated_transactions.iter() { ctx.mergeset_diff.add_transaction(validated_tx, pov_daa_score).unwrap(); - ctx.multiset_hash.add_transaction(validated_tx, pov_daa_score); ctx.accepted_tx_ids.push(validated_tx.id()); block_fee += validated_tx.calculated_fee; } @@ -229,6 +233,37 @@ impl VirtualStateProcessor { }) } + /// Same as validate_transactions_in_parallel except during the iteration this will also + /// calculate the muhash in parallel for valid transactions + pub(crate) fn validate_transactions_with_muhash_in_parallel<'a, V: UtxoView + Sync>( + &self, + txs: &'a Vec, + utxo_view: &V, + pov_daa_score: u64, + flags: TxValidationFlags, + ) -> (SmallVec<[(ValidatedTransaction<'a>, u32); 2]>, MuHash) { + self.thread_pool.install(|| { + txs + .par_iter() // We can do this in parallel without complications since block body validation already ensured + // that all txs within each block are independent + .enumerate() + .skip(1) // Skip the coinbase tx. + .filter_map(|(i, tx)| self.validate_transaction_in_utxo_context(tx, &utxo_view, pov_daa_score, flags).ok().map(|vtx| { + let mh = MuHash::from_transaction(&vtx, pov_daa_score); + (smallvec![(vtx, i as u32)], mh) + } + )) + .reduce( + || (smallvec![], MuHash::new()), + |mut a, mut b| { + a.0.append(&mut b.0); + a.1.combine(&b.1); + a + }, + ) + }) + } + /// Attempts to populate the transaction with UTXO entries and performs all utxo-related tx validations pub(super) fn validate_transaction_in_utxo_context<'a>( &self, @@ -294,7 +329,8 @@ impl VirtualStateProcessor { // For non-activated nets (mainnet, TN10) we can update mempool rules to KIP9 beta asap. For // TN11 we need to hard-fork consensus first (since the new beta rules are more permissive) - let kip9_version = if self.storage_mass_activation_daa_score == u64::MAX { Kip9Version::Beta } else { Kip9Version::Alpha }; + let kip9_version = + if self.storage_mass_activation == ForkActivation::never() { Kip9Version::Beta } else { Kip9Version::Alpha }; // Calc the full contextual mass including storage mass let contextual_mass = self @@ -318,3 +354,60 @@ impl VirtualStateProcessor { Ok(()) } } + +#[cfg(test)] +mod tests { + use itertools::Itertools; + + use super::*; + + #[test] + fn test_rayon_reduce_retains_order() { + // this is an independent test to replicate the behavior of + // validate_txs_in_parallel and validate_txs_with_muhash_in_parallel + // and assert that the order of data is retained when doing par_iter + let data: Vec = (1..=1000).collect(); + + let collected: Vec = data + .par_iter() + .filter_map(|a| { + let chance: f64 = rand::random(); + if chance < 0.05 { + return None; + } + Some(*a) + }) + .collect(); + + println!("collected len: {}", collected.len()); + + collected.iter().tuple_windows().for_each(|(prev, curr)| { + // Data was originally sorted, so we check if they remain sorted after filtering + assert!(prev < curr, "expected {} < {} if original sort was preserved", prev, curr); + }); + + let reduced: SmallVec<[u16; 2]> = data + .par_iter() + .filter_map(|a: &u16| { + let chance: f64 = rand::random(); + if chance < 0.05 { + return None; + } + Some(smallvec![*a]) + }) + .reduce( + || smallvec![], + |mut arr, mut curr_data| { + arr.append(&mut curr_data); + arr + }, + ); + + println!("reduced len: {}", reduced.len()); + + reduced.iter().tuple_windows().for_each(|(prev, curr)| { + // Data was originally sorted, so we check if they remain sorted after filtering + assert!(prev < curr, "expected {} < {} if original sort was preserved", prev, curr); + }); + } +} diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index f79bbed75..d67f922c8 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -72,7 +72,7 @@ impl CoinbaseManager { // Precomputed subsidy by month table for the actual block per second rate // Here values are rounded up so that we keep the same number of rewarding months as in the original 1 BPS table. // In a 10 BPS network, the induced increase in total rewards is 51 KAS (see tests::calc_high_bps_total_rewards_delta()) - let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| (SUBSIDY_BY_MONTH_TABLE[i] + bps - 1) / bps); + let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps)); Self { coinbase_payload_script_public_key_max_len, max_coinbase_payload_len, @@ -288,10 +288,7 @@ mod tests { let total_rewards: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| x * SECONDS_PER_MONTH).sum::(); let testnet_11_bps = TESTNET11_PARAMS.bps(); let total_high_bps_rewards_rounded_up: u64 = pre_deflationary_rewards - + SUBSIDY_BY_MONTH_TABLE - .iter() - .map(|x| ((x + testnet_11_bps - 1) / testnet_11_bps * testnet_11_bps) * SECONDS_PER_MONTH) - .sum::(); + + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| (x.div_ceil(testnet_11_bps) * testnet_11_bps) * SECONDS_PER_MONTH).sum::(); let cbm = create_manager(&TESTNET11_PARAMS); let total_high_bps_rewards: u64 = @@ -316,7 +313,7 @@ mod tests { let cbm = create_manager(&network_id.into()); cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { assert_eq!( - (SUBSIDY_BY_MONTH_TABLE[i] + cbm.bps() - 1) / cbm.bps(), + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps()), *x, "{}: locally computed and precomputed values must match", network_id @@ -376,7 +373,7 @@ mod tests { Test { name: "after 32 halvings", daa_score: params.deflationary_phase_daa_score + 32 * blocks_per_halving, - expected: ((DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)) + cbm.bps() - 1) / cbm.bps(), + expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(cbm.bps()), }, Test { name: "just before subsidy depleted", diff --git a/consensus/src/processes/difficulty.rs b/consensus/src/processes/difficulty.rs index f02efdb55..a27da68a2 100644 --- a/consensus/src/processes/difficulty.rs +++ b/consensus/src/processes/difficulty.rs @@ -6,7 +6,7 @@ use crate::model::stores::{ use kaspa_consensus_core::{ config::params::MIN_DIFFICULTY_WINDOW_LEN, errors::difficulty::{DifficultyError, DifficultyResult}, - BlockHashSet, BlueWorkType, + BlockHashSet, BlueWorkType, MAX_WORK_LEVEL, }; use kaspa_math::{Uint256, Uint320}; use std::{ @@ -282,6 +282,16 @@ pub fn calc_work(bits: u32) -> BlueWorkType { res.try_into().expect("Work should not exceed 2**192") } +pub fn level_work(level: u8, max_block_level: u8) -> BlueWorkType { + // Need to make a special condition for level 0 to ensure true work is always used + if level == 0 { + return 0.into(); + } + // We use 256 here so the result corresponds to the work at the level from calc_level_from_pow + let exp = (level as u32) + 256 - (max_block_level as u32); + BlueWorkType::from_u64(1) << exp.min(MAX_WORK_LEVEL as u32) +} + #[derive(Eq)] struct DifficultyBlock { timestamp: u64, @@ -307,3 +317,55 @@ impl Ord for DifficultyBlock { self.timestamp.cmp(&other.timestamp).then_with(|| self.sortable_block.cmp(&other.sortable_block)) } } + +#[cfg(test)] +mod tests { + use kaspa_consensus_core::{BlockLevel, BlueWorkType, MAX_WORK_LEVEL}; + use kaspa_math::{Uint256, Uint320}; + use kaspa_pow::calc_level_from_pow; + + use crate::processes::difficulty::{calc_work, level_work}; + use kaspa_utils::hex::ToHex; + + #[test] + fn test_target_levels() { + let max_block_level: BlockLevel = 225; + for level in 1..=max_block_level { + // required pow for level + let level_target = (Uint320::from_u64(1) << (max_block_level - level).max(MAX_WORK_LEVEL) as u32) - Uint320::from_u64(1); + let level_target = Uint256::from_be_bytes(level_target.to_be_bytes()[8..40].try_into().unwrap()); + let calculated_level = calc_level_from_pow(level_target, max_block_level); + + let true_level_work = calc_work(level_target.compact_target_bits()); + let calc_level_work = level_work(level, max_block_level); + + // A "good enough" estimate of level work is within 1% diff from work with actual level target + // It's hard to calculate percentages with these large numbers, so to get around using floats + // we multiply the difference by 100. if the result is <= the calc_level_work it means + // difference must have been less than 1% + let (percent_diff, overflowed) = (true_level_work - calc_level_work).overflowing_mul(BlueWorkType::from_u64(100)); + let is_good_enough = percent_diff <= calc_level_work; + + println!("Level {}:", level); + println!( + " data | {} | {} | {} / {} |", + level_target.compact_target_bits(), + level_target.bits(), + calculated_level, + max_block_level + ); + println!(" pow | {}", level_target.to_hex()); + println!(" work | 0000000000000000{}", true_level_work.to_hex()); + println!(" lvwork | 0000000000000000{}", calc_level_work.to_hex()); + println!(" diff<1% | {}", !overflowed && (is_good_enough)); + + assert!(is_good_enough); + } + } + + #[test] + fn test_base_level_work() { + // Expect that at level 0, the level work is always 0 + assert_eq!(BlueWorkType::from(0), level_work(0, 255)); + } +} diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 8dfe4e793..1032868ee 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions, BlockHashes}, - BlockHashMap, BlueWorkType, HashMapCustomHasher, + BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; use kaspa_utils::refs::Refs; @@ -16,7 +16,7 @@ use crate::{ relations::RelationsStoreReader, }, }, - processes::difficulty::calc_work, + processes::difficulty::{calc_work, level_work}, }; use super::ordering::*; @@ -29,6 +29,15 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + + /// Level work is a lower-bound for the amount of work represented by each block. + /// When running GD for higher-level sub-DAGs, this value should be set accordingly + /// to the work represented by that level, and then used as a lower bound + /// for the work calculated from header bits (which depends on current difficulty). + /// For instance, assuming level 80 (i.e., pow hash has at least 80 zeros) is always + /// above the difficulty target, all blocks in it should represent the same amount of + /// work regardless of whether current difficulty requires 20 zeros or 25 zeros. + level_work: BlueWorkType, } impl GhostdagManager { @@ -40,7 +49,29 @@ impl, reachability_service: U, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + // For ordinary GD, always keep level_work=0 so the lower bound is ineffective + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, level_work: 0.into() } + } + + pub fn with_level( + genesis_hash: Hash, + k: KType, + ghostdag_store: Arc, + relations_store: S, + headers_store: Arc, + reachability_service: U, + level: BlockLevel, + max_block_level: BlockLevel, + ) -> Self { + Self { + genesis_hash, + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + level_work: level_work(level, max_block_level), + } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -113,16 +144,22 @@ impl PruningImportResult<()> { + let pruning_point_header = proof[0].last().unwrap().clone(); + let pruning_point = pruning_point_header.hash; + + // Create a copy of the proof, since we're going to be mutating the proof passed to us + let proof_sets = (0..=self.max_block_level) + .map(|level| BlockHashSet::from_iter(proof[level as usize].iter().map(|header| header.hash))) + .collect_vec(); + + let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); + for tb in trusted_set.iter() { + trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); + let tb_block_level = calc_block_level(&tb.block.header, self.max_block_level); + + (0..=tb_block_level).for_each(|current_proof_level| { + // If this block was in the original proof, ignore it + if proof_sets[current_proof_level as usize].contains(&tb.block.hash()) { + return; + } + + proof[current_proof_level as usize].push(tb.block.header.clone()); + }); + } + + proof.iter_mut().for_each(|level_proof| { + level_proof.sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + }); + + self.populate_reachability_and_headers(&proof); + + { + let reachability_read = self.reachability_store.read(); + for tb in trusted_set.iter() { + // Header-only trusted blocks are expected to be in pruning point past + if tb.block.is_header_only() && !reachability_read.is_dag_ancestor_of(tb.block.hash(), pruning_point) { + return Err(PruningImportError::PruningPointPastMissingReachability(tb.block.hash())); + } + } + } + + for (level, headers) in proof.iter().enumerate() { + trace!("Applying level {} from the pruning point proof", level); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + + for header in headers.iter() { + let parents = Arc::new( + self.parents_manager + .parents_at_level(header, level as BlockLevel) + .iter() + .copied() + .filter(|parent| level_ancestors.contains(parent)) + .collect_vec() + .push_if_empty(ORIGIN), + ); + + self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { + gd.clone() + } else { + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); + // Override the ghostdag data with the real blue score and blue work + GhostdagData { + blue_score: header.blue_score, + blue_work: header.blue_work, + selected_parent: calculated_gd.selected_parent, + mergeset_blues: calculated_gd.mergeset_blues, + mergeset_reds: calculated_gd.mergeset_reds, + blues_anticone_sizes: calculated_gd.blues_anticone_sizes, + } + }; + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); + } + + level_ancestors.insert(header.hash); + } + } + + let virtual_parents = vec![pruning_point]; + let virtual_state = Arc::new(VirtualState { + parents: virtual_parents.clone(), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), + ..VirtualState::default() + }); + self.virtual_stores.write().state.set(virtual_state).unwrap(); + + let mut batch = WriteBatch::default(); + self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); + self.headers_selected_tip_store + .write() + .set_batch(&mut batch, SortableBlock { hash: pruning_point, blue_work: pruning_point_header.blue_work }) + .unwrap(); + self.selected_chain_store.write().init_with_pruning_point(&mut batch, pruning_point).unwrap(); + self.depth_store.insert_batch(&mut batch, pruning_point, ORIGIN, ORIGIN).unwrap(); + self.db.write(batch).unwrap(); + + Ok(()) + } + + pub fn populate_reachability_and_headers(&self, proof: &PruningPointProof) { + let capacity_estimate = self.estimate_proof_unique_size(proof); + let mut dag = BlockHashMap::with_capacity(capacity_estimate); + let mut up_heap = BinaryHeap::with_capacity(capacity_estimate); + for header in proof.iter().flatten().cloned() { + if let Vacant(e) = dag.entry(header.hash) { + // pow passing has already been checked during validation + let block_level = calc_block_level(&header, self.max_block_level); + self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); + + let mut parents = BlockHashSet::with_capacity(header.direct_parents().len() * 2); + // We collect all available parent relations in order to maximize reachability information. + // By taking into account parents from all levels we ensure that the induced DAG has valid + // reachability information for each level-specific sub-DAG -- hence a single reachability + // oracle can serve them all + for level in 0..=self.max_block_level { + for parent in self.parents_manager.parents_at_level(&header, level) { + parents.insert(*parent); + } + } + + struct DagEntry { + header: Arc
, + parents: Arc, + } + + up_heap.push(Reverse(SortableBlock { hash: header.hash, blue_work: header.blue_work })); + e.insert(DagEntry { header, parents: Arc::new(parents) }); + } + } + + debug!("Estimated proof size: {}, actual size: {}", capacity_estimate, dag.len()); + + for reverse_sortable_block in up_heap.into_sorted_iter() { + // TODO: Convert to into_iter_sorted once it gets stable + let hash = reverse_sortable_block.0.hash; + let dag_entry = dag.get(&hash).unwrap(); + + // Filter only existing parents + let parents_in_dag = BinaryHeap::from_iter( + dag_entry + .parents + .iter() + .cloned() + .filter(|parent| dag.contains_key(parent)) + .map(|parent| SortableBlock { hash: parent, blue_work: dag.get(&parent).unwrap().header.blue_work }), + ); + + let reachability_read = self.reachability_store.upgradable_read(); + + // Find the maximal parent antichain from the possibly redundant set of existing parents + let mut reachability_parents: Vec = Vec::new(); + for parent in parents_in_dag.into_sorted_iter() { + if reachability_read.is_dag_ancestor_of_any(parent.hash, &mut reachability_parents.iter().map(|parent| parent.hash)) { + continue; + } + + reachability_parents.push(parent); + } + let reachability_parents_hashes = + BlockHashes::new(reachability_parents.iter().map(|parent| parent.hash).collect_vec().push_if_empty(ORIGIN)); + let selected_parent = reachability_parents.iter().max().map(|parent| parent.hash).unwrap_or(ORIGIN); + + // Prepare batch + let mut batch = WriteBatch::default(); + let mut reachability_relations_write = self.reachability_relations_store.write(); + let mut staging_reachability = StagingReachabilityStore::new(reachability_read); + let mut staging_reachability_relations = StagingRelationsStore::new(&mut reachability_relations_write); + + // Stage + staging_reachability_relations.insert(hash, reachability_parents_hashes.clone()).unwrap(); + let mergeset = unordered_mergeset_without_selected_parent( + &staging_reachability_relations, + &staging_reachability, + selected_parent, + &reachability_parents_hashes, + ); + reachability::add_block(&mut staging_reachability, hash, selected_parent, &mut mergeset.iter().copied()).unwrap(); + + // Commit + let reachability_write = staging_reachability.commit(&mut batch).unwrap(); + staging_reachability_relations.commit(&mut batch).unwrap(); + + // Write + self.db.write(batch).unwrap(); + + // Drop + drop(reachability_write); + drop(reachability_relations_write); + } + } +} diff --git a/consensus/src/processes/pruning_proof/build.rs b/consensus/src/processes/pruning_proof/build.rs new file mode 100644 index 000000000..664eb5981 --- /dev/null +++ b/consensus/src/processes/pruning_proof/build.rs @@ -0,0 +1,527 @@ +use std::{cmp::Reverse, collections::BinaryHeap, sync::Arc}; + +use itertools::Itertools; +use kaspa_consensus_core::{ + blockhash::{BlockHashExtensions, BlockHashes}, + header::Header, + pruning::PruningPointProof, + BlockHashSet, BlockLevel, HashMapCustomHasher, +}; +use kaspa_core::debug; +use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions, DB}; +use kaspa_hashes::Hash; + +use crate::{ + model::{ + services::reachability::ReachabilityService, + stores::{ + ghostdag::{DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, + headers::{HeaderStoreReader, HeaderWithBlockLevel}, + relations::RelationsStoreReader, + }, + }, + processes::{ + ghostdag::{ordering::SortableBlock, protocol::GhostdagManager}, + pruning_proof::PruningProofManagerInternalError, + }, +}; + +use super::{PruningProofManager, PruningProofManagerInternalResult}; + +#[derive(Clone)] +struct RelationsStoreInFutureOfRoot { + relations_store: T, + reachability_service: U, + root: Hash, +} + +impl RelationsStoreReader for RelationsStoreInFutureOfRoot { + fn get_parents(&self, hash: Hash) -> Result { + self.relations_store.get_parents(hash).map(|hashes| { + Arc::new(hashes.iter().copied().filter(|h| self.reachability_service.is_dag_ancestor_of(self.root, *h)).collect_vec()) + }) + } + + fn get_children(&self, hash: Hash) -> StoreResult> { + // We assume hash is in future of root + assert!(self.reachability_service.is_dag_ancestor_of(self.root, hash)); + self.relations_store.get_children(hash) + } + + fn has(&self, hash: Hash) -> Result { + if self.reachability_service.is_dag_ancestor_of(self.root, hash) { + Ok(false) + } else { + self.relations_store.has(hash) + } + } + + fn counts(&self) -> Result<(usize, usize), kaspa_database::prelude::StoreError> { + unimplemented!() + } +} + +impl PruningProofManager { + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { + if pp == self.genesis_hash { + return vec![]; + } + + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); + + (0..=self.max_block_level) + .map(|level| { + let level = level as usize; + let selected_tip = selected_tip_by_level[level]; + let block_at_depth_2m = self + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .map_err(|err| format!("level: {}, err: {}", level, err)) + .unwrap(); + + // TODO (relaxed): remove the assertion below + // (New Logic) This is the root we calculated by going through block relations + let root = roots_by_level[level]; + // (Old Logic) This is the root we can calculate given that the GD records are already filled + // The root calc logic below is the original logic before the on-demand higher level GD calculation + // We only need old_root to sanity check the new logic + let old_root = if level != self.max_block_level as usize { + let block_at_depth_m_at_next_level = self + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else { + self.find_common_ancestor_in_chain_of_a( + &*ghostdag_stores[level], + block_at_depth_m_at_next_level, + block_at_depth_2m, + ) + .map_err(|err| format!("level: {}, err: {}", level, err)) + .unwrap() + } + } else { + block_at_depth_2m + }; + + // new root is expected to be always an ancestor of old_root because new root takes a safety margin + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + + let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); + let mut queue = BinaryHeap::>::new(); + let mut visited = BlockHashSet::new(); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + while let Some(current) = queue.pop() { + let current = current.0.hash; + if !visited.insert(current) { + continue; + } + + // The second condition is always expected to be true (ghostdag store will have the entry) + // because we are traversing the exact diamond (future(root) â‹‚ past(tip)) for which we calculated + // GD for (see fill_level_proof_ghostdag_data). TODO (relaxed): remove the condition or turn into assertion + if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) + || !ghostdag_stores[level].has(current).is_ok_and(|found| found) + { + continue; + } + + headers.push(self.headers_store.get_header(current).unwrap()); + for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); + } + } + + // TODO (relaxed): remove the assertion below + // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof + let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); + let chain_2m = self + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .map_err(|err| { + dbg!(level, selected_tip, block_at_depth_2m, root); + format!("Assert 2M chain -- level: {}, err: {}", level, err) + }) + .unwrap(); + let chain_2m_len = chain_2m.len(); + for (i, chain_hash) in chain_2m.into_iter().enumerate() { + if !set.contains(&chain_hash) { + let next_level_tip = selected_tip_by_level[level + 1]; + let next_level_chain_m = + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + let next_level_block_m = next_level_chain_m.last().copied().unwrap(); + dbg!(next_level_chain_m.len()); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(level, selected_tip, block_at_depth_2m, root); + panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); + } + } + + headers + }) + .collect_vec() + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + + /// Find a sufficient root at a given level by going through the headers store and looking + /// for a deep enough level block + /// For each root candidate, fill in the ghostdag data to see if it actually is deep enough. + /// If the root is deep enough, it will satisfy these conditions + /// 1. block at depth 2m at this level ∈ Future(root) + /// 2. block at depth m at the next level ∈ Future(root) + /// + /// Returns: the filled ghostdag store from root to tip, the selected tip and the root + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + // Step 1: Determine which selected tip to use + let selected_tip = if pp_header.block_level >= level { + pp_header.header.hash + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)?.hash + }; + + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let required_level_depth = 2 * self.pruning_proof_m; + + // We only have the headers store (which has level 0 blue_scores) to assemble the proof data from. + // We need to look deeper at higher levels (2x deeper every level) to find 2M (plus margin) blocks at that level + let mut required_base_level_depth = self.estimated_blue_depth_at_level_0( + level, + required_level_depth + 100, // We take a safety margin + current_dag_level, + ); + + let mut is_last_level_header; + let mut tries = 0; + + let block_at_depth_m_at_next_level = required_block.unwrap_or(selected_tip); + + loop { + // Step 2 - Find a deep enough root candidate + let block_at_depth_2m = match self.level_block_at_base_depth(level, selected_tip, required_base_level_depth) { + Ok((header, is_last_header)) => { + is_last_level_header = is_last_header; + header + } + Err(e) => return Err(e), + }; + + let root = if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { + block_at_depth_2m + } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { + block_at_depth_m_at_next_level + } else { + // find common ancestor of block_at_depth_m_at_next_level and block_at_depth_2m in chain of block_at_depth_m_at_next_level + let mut common_ancestor = self.headers_store.get_header(block_at_depth_m_at_next_level).unwrap(); + + while !self.reachability_service.is_dag_ancestor_of(common_ancestor.hash, block_at_depth_2m) { + common_ancestor = match self.find_selected_parent_header_at_level(&common_ancestor, level) { + Ok(header) => header, + // Try to give this last header a chance at being root + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => break, + Err(e) => return Err(e), + }; + } + + common_ancestor.hash + }; + + if level == 0 { + return Ok((self.ghostdag_store.clone(), selected_tip, root)); + } + + // Step 3 - Fill the ghostdag data from root to tip + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let has_required_block = self.fill_level_proof_ghostdag_data( + root, + pp_header.header.hash, + &ghostdag_store, + Some(block_at_depth_m_at_next_level), + level, + ); + + // Step 4 - Check if we actually have enough depth. + // Need to ensure this does the same 2M+1 depth that block_at_depth does + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if is_last_level_header { + if has_required_block { + // Normally this scenario doesn't occur when syncing with nodes that already have the safety margin change in place. + // However, when syncing with an older node version that doesn't have a safety margin for the proof, it's possible to + // try to find 2500 depth worth of headers at a level, but the proof only contains about 2000 headers. To be able to sync + // with such an older node. As long as we found the required block, we can still proceed. + debug!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned. Required block found so trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); + } else { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_base_level_depth} are already pruned"); + } + } + + // If we don't have enough depth now, we need to look deeper + required_base_level_depth = (required_base_level_depth as f64 * 1.1) as u64; + debug!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_base_level_depth}"); + } + } + + /// BFS forward iterates from root until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_level_proof_ghostdag_data( + &self, + root: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + required_block: Option, + level: BlockLevel, + ) -> bool { + let relations_service = RelationsStoreInFutureOfRoot { + relations_store: self.level_relations_services[level as usize].clone(), + reachability_service: self.reachability_service.clone(), + root, + }; + let gd_manager = GhostdagManager::with_level( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + level, + self.max_block_level, + ); + + // Note there is no need to initialize origin since we have a single root + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let current_gd = gd_manager.ghostdag(&relations_service.get_parents(current_hash).unwrap()); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); + } + } + + has_required_block + } + + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) + } + + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. + let sp = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|p| self.level_relations_services[level as usize].has(*p).unwrap()) + .filter_map(|p| self.headers_store.get_header(p).unwrap_option().map(|h| SortableBlock::new(p, h.blue_work))) + .max() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof("no parents with header".to_string()))?; + Ok(self.headers_store.get_header(sp.hash).expect("unwrapped above")) + } + + /// Finds the block on a given level that is at base_depth deep from it. + /// Also returns if the block was the last one in the level + /// base_depth = the blue score depth at level 0 + fn level_block_at_base_depth( + &self, + level: BlockLevel, + high: Hash, + base_depth: u64, + ) -> PruningProofManagerInternalResult<(Hash, bool)> { + let high_header = self + .headers_store + .get_header(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {base_depth}, {err}")))?; + let high_header_score = high_header.blue_score; + let mut current_header = high_header; + + let mut is_last_header = false; + + while current_header.blue_score + base_depth >= high_header_score { + if current_header.direct_parents().is_empty() { + break; + } + + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + // We want to give this root a shot if all its past is pruned + is_last_header = true; + break; + } + Err(e) => return Err(e), + }; + } + Ok((current_header.hash, is_last_header)) + } + + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. + fn chain_up_to_depth( + &self, + ghostdag_store: &impl GhostdagStoreReader, + high: Hash, + depth: u64, + ) -> Result, PruningProofManagerInternalError> { + let high_gd = ghostdag_store + .get_compact_data(high) + .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; + let mut current_gd = high_gd; + let mut current = high; + let mut res = vec![current]; + while current_gd.blue_score + depth >= high_gd.blue_score { + if current_gd.selected_parent.is_origin() { + break; + } + let prev = current; + current = current_gd.selected_parent; + res.push(current); + current_gd = ghostdag_store.get_compact_data(current).map_err(|err| { + PruningProofManagerInternalError::BlockAtDepth(format!( + "high: {}, depth: {}, current: {}, high blue score: {}, current blue score: {}, {}", + high, depth, prev, high_gd.blue_score, current_gd.blue_score, err + )) + })?; + } + Ok(res) + } + + fn find_common_ancestor_in_chain_of_a( + &self, + ghostdag_store: &impl GhostdagStoreReader, + a: Hash, + b: Hash, + ) -> Result { + let a_gd = ghostdag_store + .get_compact_data(a) + .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; + let mut current_gd = a_gd; + let mut current; + let mut loop_counter = 0; + loop { + current = current_gd.selected_parent; + loop_counter += 1; + if current.is_origin() { + break Err(PruningProofManagerInternalError::NoCommonAncestor(format!("a: {a}, b: {b} ({loop_counter} loop steps)"))); + } + if self.reachability_service.is_dag_ancestor_of(current, b) { + break Ok(current); + } + current_gd = ghostdag_store + .get_compact_data(current) + .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; + } + } +} diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6324aa4ee..a9412bbf6 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1,35 +1,31 @@ +mod apply; +mod build; +mod validate; + use std::{ - cmp::{max, Reverse}, - collections::{hash_map::Entry, BinaryHeap}, - collections::{hash_map::Entry::Vacant, VecDeque}, - ops::{Deref, DerefMut}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, + collections::{ + hash_map::Entry::{self}, + VecDeque, }, + sync::{atomic::AtomicBool, Arc}, }; use itertools::Itertools; -use kaspa_math::int::SignedInteger; use parking_lot::{Mutex, RwLock}; use rocksdb::WriteBatch; use kaspa_consensus_core::{ - blockhash::{self, BlockHashExtensions, BlockHashes, ORIGIN}, - errors::{ - consensus::{ConsensusError, ConsensusResult}, - pruning::{PruningImportError, PruningImportResult}, - }, + blockhash::{self, BlockHashExtensions}, + errors::consensus::{ConsensusError, ConsensusResult}, header::Header, pruning::{PruningPointProof, PruningPointTrustedData}, - trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, + trusted::{TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace}; -use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_core::info; +use kaspa_database::{prelude::StoreResultExtensions, utils::DbLifetime}; use kaspa_hashes::Hash; use kaspa_pow::calc_block_level; -use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; use thiserror::Error; use crate::{ @@ -38,32 +34,26 @@ use crate::{ storage::ConsensusStorage, }, model::{ - services::reachability::{MTReachabilityService, ReachabilityService}, + services::{reachability::MTReachabilityService, relations::MTRelationsService}, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{DbGhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, - reachability::{DbReachabilityStore, ReachabilityStoreReader, StagingReachabilityStore}, - relations::{DbRelationsStore, RelationsStoreReader, StagingRelationsStore}, - selected_chain::{DbSelectedChainStore, SelectedChainStore}, + reachability::DbReachabilityStore, + relations::{DbRelationsStore, RelationsStoreReader}, + selected_chain::DbSelectedChainStore, tips::DbTipsStore, - virtual_state::{VirtualState, VirtualStateStore, VirtualStateStoreReader, VirtualStores}, + virtual_state::{VirtualStateStoreReader, VirtualStores}, DB, }, }, - processes::{ - ghostdag::ordering::SortableBlock, reachability::inquirer as reachability, relations::RelationsStoreExtensions, - window::WindowType, - }, + processes::window::WindowType, }; -use super::{ - ghostdag::{mergeset::unordered_mergeset_without_selected_parent, protocol::GhostdagManager}, - window::WindowManager, -}; +use super::{ghostdag::protocol::GhostdagManager, window::WindowManager}; #[derive(Error, Debug)] enum PruningProofManagerInternalError { @@ -75,7 +65,11 @@ enum PruningProofManagerInternalError { #[error("cannot find a common ancestor: {0}")] NoCommonAncestor(String), + + #[error("missing headers to build proof: {0}")] + NotEnoughHeadersToBuildProof(String), } +type PruningProofManagerInternalResult = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -88,6 +82,16 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + pub struct PruningProofManager { db: Arc, @@ -95,8 +99,9 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, + ghostdag_store: Arc, relations_stores: Arc>>, + level_relations_services: Vec>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -105,7 +110,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -129,7 +134,7 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -145,7 +150,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -155,7 +160,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -168,8 +172,13 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_manager, is_consensus_exiting, + + level_relations_services: (0..=max_block_level) + .map(|level| MTRelationsService::new(storage.relations_stores.clone().clone(), level)) + .collect_vec(), } } @@ -181,10 +190,7 @@ impl PruningProofManager { continue; } - let state = kaspa_pow::State::new(header); - let (_, pow) = state.check_pow(header.nonce); - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - let block_level = max(signed_block_level, 0) as BlockLevel; + let block_level = calc_block_level(header, self.max_block_level); self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); } @@ -199,560 +205,14 @@ impl PruningProofManager { drop(pruning_point_write); } - pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { - let pruning_point_header = proof[0].last().unwrap().clone(); - let pruning_point = pruning_point_header.hash; - - let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); - let mut trusted_gd_map: BlockHashMap = BlockHashMap::new(); - for tb in trusted_set.iter() { - trusted_gd_map.insert(tb.block.hash(), tb.ghostdag.clone().into()); - if proof_zero_set.contains(&tb.block.hash()) { - continue; - } - - proof[0].push(tb.block.header.clone()); - } - - proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); - self.populate_reachability_and_headers(&proof); - - { - let reachability_read = self.reachability_store.read(); - for tb in trusted_set.iter() { - // Header-only trusted blocks are expected to be in pruning point past - if tb.block.is_header_only() && !reachability_read.is_dag_ancestor_of(tb.block.hash(), pruning_point) { - return Err(PruningImportError::PruningPointPastMissingReachability(tb.block.hash())); - } - } - } - - for (level, headers) in proof.iter().enumerate() { - trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); - for header in headers.iter() { - let parents = Arc::new( - self.parents_manager - .parents_at_level(header, level as BlockLevel) - .iter() - .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) - .collect_vec() - .push_if_empty(ORIGIN), - ); - - self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { - gd.clone() - } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); - // Override the ghostdag data with the real blue score and blue work - GhostdagData { - blue_score: header.blue_score, - blue_work: header.blue_work, - selected_parent: calculated_gd.selected_parent, - mergeset_blues: calculated_gd.mergeset_blues.clone(), - mergeset_reds: calculated_gd.mergeset_reds.clone(), - blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), - } - } - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); - } - } - - let virtual_parents = vec![pruning_point]; - let virtual_state = Arc::new(VirtualState { - parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), - ..VirtualState::default() - }); - self.virtual_stores.write().state.set(virtual_state).unwrap(); - - let mut batch = WriteBatch::default(); - self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); - self.headers_selected_tip_store - .write() - .set_batch(&mut batch, SortableBlock { hash: pruning_point, blue_work: pruning_point_header.blue_work }) - .unwrap(); - self.selected_chain_store.write().init_with_pruning_point(&mut batch, pruning_point).unwrap(); - self.depth_store.insert_batch(&mut batch, pruning_point, ORIGIN, ORIGIN).unwrap(); - self.db.write(batch).unwrap(); - - Ok(()) - } - + // Used in apply and validate fn estimate_proof_unique_size(&self, proof: &PruningPointProof) -> usize { let approx_history_size = proof[0][0].daa_score; let approx_unique_full_levels = f64::log2(approx_history_size as f64 / self.pruning_proof_m as f64).max(0f64) as usize; proof.iter().map(|l| l.len()).sum::().min((approx_unique_full_levels + 1) * self.pruning_proof_m as usize) } - pub fn populate_reachability_and_headers(&self, proof: &PruningPointProof) { - let capacity_estimate = self.estimate_proof_unique_size(proof); - let mut dag = BlockHashMap::with_capacity(capacity_estimate); - let mut up_heap = BinaryHeap::with_capacity(capacity_estimate); - for header in proof.iter().flatten().cloned() { - if let Vacant(e) = dag.entry(header.hash) { - let state = kaspa_pow::State::new(&header); - let (_, pow) = state.check_pow(header.nonce); // TODO: Check if pow passes - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - let block_level = max(signed_block_level, 0) as BlockLevel; - self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); - - let mut parents = BlockHashSet::with_capacity(header.direct_parents().len() * 2); - // We collect all available parent relations in order to maximize reachability information. - // By taking into account parents from all levels we ensure that the induced DAG has valid - // reachability information for each level-specific sub-DAG -- hence a single reachability - // oracle can serve them all - for level in 0..=self.max_block_level { - for parent in self.parents_manager.parents_at_level(&header, level) { - parents.insert(*parent); - } - } - - struct DagEntry { - header: Arc
, - parents: Arc, - } - - up_heap.push(Reverse(SortableBlock { hash: header.hash, blue_work: header.blue_work })); - e.insert(DagEntry { header, parents: Arc::new(parents) }); - } - } - - debug!("Estimated proof size: {}, actual size: {}", capacity_estimate, dag.len()); - - for reverse_sortable_block in up_heap.into_sorted_iter() { - // TODO: Convert to into_iter_sorted once it gets stable - let hash = reverse_sortable_block.0.hash; - let dag_entry = dag.get(&hash).unwrap(); - - // Filter only existing parents - let parents_in_dag = BinaryHeap::from_iter( - dag_entry - .parents - .iter() - .cloned() - .filter(|parent| dag.contains_key(parent)) - .map(|parent| SortableBlock { hash: parent, blue_work: dag.get(&parent).unwrap().header.blue_work }), - ); - - let reachability_read = self.reachability_store.upgradable_read(); - - // Find the maximal parent antichain from the possibly redundant set of existing parents - let mut reachability_parents: Vec = Vec::new(); - for parent in parents_in_dag.into_sorted_iter() { - if reachability_read.is_dag_ancestor_of_any(parent.hash, &mut reachability_parents.iter().map(|parent| parent.hash)) { - continue; - } - - reachability_parents.push(parent); - } - let reachability_parents_hashes = - BlockHashes::new(reachability_parents.iter().map(|parent| parent.hash).collect_vec().push_if_empty(ORIGIN)); - let selected_parent = reachability_parents.iter().max().map(|parent| parent.hash).unwrap_or(ORIGIN); - - // Prepare batch - let mut batch = WriteBatch::default(); - let mut reachability_relations_write = self.reachability_relations_store.write(); - let mut staging_reachability = StagingReachabilityStore::new(reachability_read); - let mut staging_reachability_relations = StagingRelationsStore::new(&mut reachability_relations_write); - - // Stage - staging_reachability_relations.insert(hash, reachability_parents_hashes.clone()).unwrap(); - let mergeset = unordered_mergeset_without_selected_parent( - &staging_reachability_relations, - &staging_reachability, - selected_parent, - &reachability_parents_hashes, - ); - reachability::add_block(&mut staging_reachability, hash, selected_parent, &mut mergeset.iter().copied()).unwrap(); - - // Commit - let reachability_write = staging_reachability.commit(&mut batch).unwrap(); - staging_reachability_relations.commit(&mut batch).unwrap(); - - // Write - self.db.write(batch).unwrap(); - - // Drop - drop(reachability_write); - drop(reachability_relations_write); - } - } - - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } - if proof[0].is_empty() { - return Err(PruningImportError::PruningProofNotEnoughHeaders); - } - - let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); - let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); - let headers_store = - Arc::new(DbHeadersStore::new(db.clone(), CachePolicy::Count(headers_estimate), CachePolicy::Count(headers_estimate))); - let ghostdag_stores = (0..=self.max_block_level) - .map(|level| Arc::new(DbGhostdagStore::new(db.clone(), level, cache_policy, cache_policy))) - .collect_vec(); - let mut relations_stores = - (0..=self.max_block_level).map(|level| DbRelationsStore::new(db.clone(), level, cache_policy, cache_policy)).collect_vec(); - let reachability_stores = (0..=self.max_block_level) - .map(|level| Arc::new(RwLock::new(DbReachabilityStore::with_block_level(db.clone(), cache_policy, cache_policy, level)))) - .collect_vec(); - - let reachability_services = (0..=self.max_block_level) - .map(|level| MTReachabilityService::new(reachability_stores[level as usize].clone())) - .collect_vec(); - - let ghostdag_managers = ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - self.genesis_hash, - self.ghostdag_k, - ghostdag_store, - relations_stores[level].clone(), - headers_store.clone(), - reachability_services[level].clone(), - ) - }) - .collect_vec(); - - { - let mut batch = WriteBatch::default(); - for level in 0..=self.max_block_level { - let level = level as usize; - reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); - relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); - } - - db.write(batch).unwrap(); - } - - let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; - for level in (0..=self.max_block_level).rev() { - // Before processing this level, check if the process is exiting so we can end early - if self.is_consensus_exiting.load(Ordering::Relaxed) { - return Err(PruningImportError::PruningValidationInterrupted); - } - - info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); - let level_idx = level as usize; - let mut selected_tip = None; - for (i, header) in proof[level as usize].iter().enumerate() { - let header_level = calc_block_level(header, self.max_block_level); - if header_level < level { - return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); - } - - headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); - - let parents = self - .parents_manager - .parents_at_level(header, level) - .iter() - .copied() - .filter(|parent| ghostdag_stores[level_idx].has(*parent).unwrap()) - .collect_vec(); - - // Only the first block at each level is allowed to have no known parents - if parents.is_empty() && i != 0 { - return Err(PruningImportError::PruningProofHeaderWithNoKnownParents(header.hash, level)); - } - - let parents: BlockHashes = parents.push_if_empty(ORIGIN).into(); - - if relations_stores[level_idx].has(header.hash).unwrap() { - return Err(PruningImportError::PruningProofDuplicateHeaderAtLevel(header.hash, level)); - } - - relations_stores[level_idx].insert(header.hash, parents.clone()).unwrap(); - let ghostdag_data = Arc::new(ghostdag_managers[level_idx].ghostdag(&parents)); - ghostdag_stores[level_idx].insert(header.hash, ghostdag_data.clone()).unwrap(); - selected_tip = Some(match selected_tip { - Some(tip) => ghostdag_managers[level_idx].find_selected_parent([tip, header.hash]), - None => header.hash, - }); - - let mut reachability_mergeset = { - let reachability_read = reachability_stores[level_idx].read(); - ghostdag_data - .unordered_mergeset_without_selected_parent() - .filter(|hash| reachability_read.has(*hash).unwrap()) - .collect_vec() // We collect to vector so reachability_read can be released and let `reachability::add_block` use a write lock. - .into_iter() - }; - reachability::add_block( - reachability_stores[level_idx].write().deref_mut(), - header.hash, - ghostdag_data.selected_parent, - &mut reachability_mergeset, - ) - .unwrap(); - - if selected_tip.unwrap() == header.hash { - reachability::hint_virtual_selected_parent(reachability_stores[level_idx].write().deref_mut(), header.hash) - .unwrap(); - } - } - - if level < self.max_block_level { - let block_at_depth_m_at_next_level = self - .block_at_depth( - &*ghostdag_stores[level_idx + 1], - selected_tip_by_level[level_idx + 1].unwrap(), - self.pruning_proof_m, - ) - .unwrap(); - if !relations_stores[level_idx].has(block_at_depth_m_at_next_level).unwrap() { - return Err(PruningImportError::PruningProofMissingBlockAtDepthMFromNextLevel(level, level + 1)); - } - } - - if selected_tip.unwrap() != proof_pp - && !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip.unwrap()) - { - return Err(PruningImportError::PruningProofMissesBlocksBelowPruningPoint(selected_tip.unwrap(), level)); - } - - selected_tip_by_level[level_idx] = selected_tip; - } - - let pruning_read = self.pruning_point_store.read(); - let relations_read = self.relations_stores.read(); - let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { - let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } - - let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); - if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { - continue; - } - - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { - let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); - if parent_blue_work_diff >= selected_tip_blue_work_diff { - return Err(PruningImportError::PruningProofInsufficientBlueWork); - } - } - - return Ok(()); - } - } - - if current_pp == self.genesis_hash { - // If the proof has better tips and the current pruning point is still - // genesis, we consider the proof state to be better. - return Ok(()); - } - - for level in (0..=self.max_block_level).rev() { - let level_idx = level as usize; - match relations_read[level_idx].get_parents(current_pp).unwrap_option() { - Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { - return Ok(()); - } - } - None => { - // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. - return Ok(()); - } - } - } - - drop(pruning_read); - drop(relations_read); - drop(db_lifetime); - - Err(PruningImportError::PruningProofNotEnoughHeaders) - } - - pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { - if pp == self.genesis_hash { - return vec![]; - } - - let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); - - (0..=self.max_block_level) - .map(|level| { - let level = level as usize; - let selected_tip = selected_tip_by_level[level]; - let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) - .map_err(|err| format!("level: {}, err: {}", level, err)) - .unwrap(); - - let root = if level != self.max_block_level as usize { - let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) - .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) - .unwrap(); - if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { - block_at_depth_m_at_next_level - } else if self.reachability_service.is_dag_ancestor_of(block_at_depth_2m, block_at_depth_m_at_next_level) { - block_at_depth_2m - } else { - self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], - block_at_depth_m_at_next_level, - block_at_depth_2m, - ) - .map_err(|err| format!("level: {}, err: {}", level, err)) - .unwrap() - } - } else { - block_at_depth_2m - }; - - let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); - let mut queue = BinaryHeap::>::new(); - let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); - while let Some(current) = queue.pop() { - let current = current.0.hash; - if !visited.insert(current) { - continue; - } - - if !self.reachability_service.is_dag_ancestor_of(current, selected_tip) { - continue; - } - - headers.push(self.headers_store.get_header(current).unwrap()); - for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); - } - } - - // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof - let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); - let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) - .map_err(|err| { - dbg!(level, selected_tip, block_at_depth_2m, root); - format!("Assert 2M chain -- level: {}, err: {}", level, err) - }) - .unwrap(); - let chain_2m_len = chain_2m.len(); - for (i, chain_hash) in chain_2m.into_iter().enumerate() { - if !set.contains(&chain_hash) { - let next_level_tip = selected_tip_by_level[level + 1]; - let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); - let next_level_block_m = next_level_chain_m.last().copied().unwrap(); - dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); - dbg!(level, selected_tip, block_at_depth_2m, root); - panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); - } - } - - headers - }) - .collect_vec() - } - - /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. - fn chain_up_to_depth( - &self, - ghostdag_store: &impl GhostdagStoreReader, - high: Hash, - depth: u64, - ) -> Result, PruningProofManagerInternalError> { - let high_gd = ghostdag_store - .get_compact_data(high) - .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; - let mut current_gd = high_gd; - let mut current = high; - let mut res = vec![current]; - while current_gd.blue_score + depth >= high_gd.blue_score { - if current_gd.selected_parent.is_origin() { - break; - } - let prev = current; - current = current_gd.selected_parent; - res.push(current); - current_gd = ghostdag_store.get_compact_data(current).map_err(|err| { - PruningProofManagerInternalError::BlockAtDepth(format!( - "high: {}, depth: {}, current: {}, high blue score: {}, current blue score: {}, {}", - high, depth, prev, high_gd.blue_score, current_gd.blue_score, err - )) - })?; - } - Ok(res) - } - + // Used in build and validate fn block_at_depth( &self, ghostdag_store: &impl GhostdagStoreReader, @@ -780,33 +240,6 @@ impl PruningProofManager { Ok(current) } - fn find_common_ancestor_in_chain_of_a( - &self, - ghostdag_store: &impl GhostdagStoreReader, - a: Hash, - b: Hash, - ) -> Result { - let a_gd = ghostdag_store - .get_compact_data(a) - .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; - let mut current_gd = a_gd; - let mut current; - let mut loop_counter = 0; - loop { - current = current_gd.selected_parent; - loop_counter += 1; - if current.is_origin() { - break Err(PruningProofManagerInternalError::NoCommonAncestor(format!("a: {a}, b: {b} ({loop_counter} loop steps)"))); - } - if self.reachability_service.is_dag_ancestor_of(current, b) { - break Ok(current); - } - current_gd = ghostdag_store - .get_compact_data(current) - .map_err(|err| PruningProofManagerInternalError::FindCommonAncestor(format!("a: {a}, b: {b}, {err}")))?; - } - } - /// Returns the k + 1 chain blocks below this hash (inclusive). If data is missing /// the search is halted and a partial chain is returned. /// @@ -816,7 +249,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +269,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -845,16 +278,15 @@ impl PruningProofManager { // PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel) for anticone_block in anticone.iter().copied() { - let window = self - .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) - .unwrap(); + let ghostdag = self.ghostdag_store.get_data(anticone_block).unwrap(); + let window = self.window_manager.block_window(&ghostdag, WindowType::DifficultyWindow).unwrap(); + let cover = self.window_manager.consecutive_cover_for_window(ghostdag, &window); - for hash in window.deref().iter().map(|block| block.0.hash) { + for hash in cover { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +294,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +326,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs new file mode 100644 index 000000000..3262b6590 --- /dev/null +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -0,0 +1,390 @@ +use std::{ + ops::DerefMut, + sync::{atomic::Ordering, Arc}, +}; + +use itertools::Itertools; +use kaspa_consensus_core::{ + blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, + errors::pruning::{PruningImportError, PruningImportResult}, + header::Header, + pruning::{PruningPointProof, PruningProofMetadata}, + BlockLevel, +}; +use kaspa_core::info; +use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_hashes::Hash; +use kaspa_pow::{calc_block_level, calc_block_level_check_pow}; +use kaspa_utils::vec::VecExtensions; +use parking_lot::lock_api::RwLock; +use rocksdb::WriteBatch; + +use crate::{ + model::{ + services::reachability::MTReachabilityService, + stores::{ + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers_selected_tip::HeadersSelectedTipStoreReader, + pruning::PruningStoreReader, + reachability::{DbReachabilityStore, ReachabilityStoreReader}, + relations::{DbRelationsStore, RelationsStoreReader}, + }, + }, + processes::{ghostdag::protocol::GhostdagManager, reachability::inquirer as reachability, relations::RelationsStoreExtensions}, +}; + +use super::{PruningProofManager, TempProofContext}; + +impl PruningProofManager { + pub fn validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + // Initialize the stores for the proof + let mut proof_stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let proof_selected_tip_by_level = + self.populate_stores_for_validate_pruning_point_proof(proof, &mut proof_stores_and_processes, true)?; + let proof_ghostdag_stores = proof_stores_and_processes.ghostdag_stores; + + // Get the proof for the current consensus and recreate the stores for it + // This is expected to be fast because if a proof exists, it will be cached. + // If no proof exists, this is empty + let mut current_consensus_proof = self.get_pruning_point_proof(); + if current_consensus_proof.is_empty() { + // An empty proof can only happen if we're at genesis. We're going to create a proof for this case that contains the genesis header only + let genesis_header = self.headers_store.get_header(self.genesis_hash).unwrap(); + current_consensus_proof = Arc::new((0..=self.max_block_level).map(|_| vec![genesis_header.clone()]).collect_vec()); + } + let mut current_consensus_stores_and_processes = + self.init_validate_pruning_point_proof_stores_and_processes(¤t_consensus_proof)?; + let _ = self.populate_stores_for_validate_pruning_point_proof( + ¤t_consensus_proof, + &mut current_consensus_stores_and_processes, + false, + )?; + let current_consensus_ghostdag_stores = current_consensus_stores_and_processes.ghostdag_stores; + + let pruning_read = self.pruning_point_store.read(); + let relations_read = self.relations_stores.read(); + let current_pp = pruning_read.get().unwrap().pruning_point; + let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + + // The accumulated blue work of current consensus from the pruning point onward + let pruning_period_work = + self.headers_selected_tip_store.read().get().unwrap().blue_work.saturating_sub(current_pp_header.blue_work); + // The claimed blue work of the prover from his pruning point and up to the triggering relay block. This work + // will eventually be verified if the proof is accepted so we can treat it as trusted + let prover_claimed_pruning_period_work = proof_metadata.relay_block_blue_work.saturating_sub(proof_pp_header.blue_work); + + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { + let level = level_idx as BlockLevel; + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; + + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the blue work* difference between the + // old current consensus's tips and the common ancestor is less than the blue work difference between the + // proof's tip and the common ancestor. + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &proof_ghostdag_stores, + ¤t_consensus_ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { + let proof_level_blue_work_diff = proof_selected_tip_gd.blue_work.saturating_sub(proof_common_ancestor_gd.blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work_diff = parent_blue_work.saturating_sub(common_ancestor_gd.blue_work); + if parent_blue_work_diff.saturating_add(pruning_period_work) + >= proof_level_blue_work_diff.saturating_add(prover_claimed_pruning_period_work) + { + return Err(PruningImportError::PruningProofInsufficientBlueWork); + } + } + + return Ok(()); + } + } + + if current_pp == self.genesis_hash { + // If the proof has better tips and the current pruning point is still + // genesis, we consider the proof state to be better. + return Ok(()); + } + + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. + for level in (0..=self.max_block_level).rev() { + let level_idx = level as usize; + + let proof_selected_tip = proof_selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = proof_ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { + Some(parents) => { + if parents.iter().copied().any(|parent| { + current_consensus_ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m + }) { + return Ok(()); + } + } + None => { + // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. + return Ok(()); + } + } + } + + drop(pruning_read); + drop(relations_read); + drop(proof_stores_and_processes.db_lifetime); + drop(current_consensus_stores_and_processes.db_lifetime); + + Err(PruningImportError::PruningProofNotEnoughHeaders) + } + + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { + if proof[0].is_empty() { + return Err(PruningImportError::PruningProofNotEnoughHeaders); + } + + let headers_estimate = self.estimate_proof_unique_size(proof); + + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); + let headers_store = + Arc::new(DbHeadersStore::new(db.clone(), CachePolicy::Count(headers_estimate), CachePolicy::Count(headers_estimate))); + let ghostdag_stores = (0..=self.max_block_level) + .map(|level| Arc::new(DbGhostdagStore::new(db.clone(), level, cache_policy, cache_policy))) + .collect_vec(); + let mut relations_stores = + (0..=self.max_block_level).map(|level| DbRelationsStore::new(db.clone(), level, cache_policy, cache_policy)).collect_vec(); + let reachability_stores = (0..=self.max_block_level) + .map(|level| Arc::new(RwLock::new(DbReachabilityStore::with_block_level(db.clone(), cache_policy, cache_policy, level)))) + .collect_vec(); + + let reachability_services = (0..=self.max_block_level) + .map(|level| MTReachabilityService::new(reachability_stores[level as usize].clone())) + .collect_vec(); + + let ghostdag_managers = ghostdag_stores + .iter() + .cloned() + .enumerate() + .map(|(level, ghostdag_store)| { + GhostdagManager::with_level( + self.genesis_hash, + self.ghostdag_k, + ghostdag_store, + relations_stores[level].clone(), + headers_store.clone(), + reachability_services[level].clone(), + level as BlockLevel, + self.max_block_level, + ) + }) + .collect_vec(); + + { + let mut batch = WriteBatch::default(); + for level in 0..=self.max_block_level { + let level = level as usize; + reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); + relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + } + + db.write(batch).unwrap(); + } + + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + ctx: &mut TempProofContext, + log_validating: bool, + ) -> PruningImportResult> { + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + // Before processing this level, check if the process is exiting so we can end early + if self.is_consensus_exiting.load(Ordering::Relaxed) { + return Err(PruningImportError::PruningValidationInterrupted); + } + + if log_validating { + info!("Validating level {level} from the pruning point proof ({} headers)", proof[level as usize].len()); + } + let level_idx = level as usize; + let mut selected_tip = None; + for (i, header) in proof[level as usize].iter().enumerate() { + let (header_level, pow_passes) = calc_block_level_check_pow(header, self.max_block_level); + if header_level < level { + return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); + } + if !pow_passes { + return Err(PruningImportError::ProofOfWorkFailed(header.hash, level)); + } + + headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); + + let parents = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| ghostdag_stores[level_idx].has(*parent).unwrap()) + .collect_vec(); + + // Only the first block at each level is allowed to have no known parents + if parents.is_empty() && i != 0 { + return Err(PruningImportError::PruningProofHeaderWithNoKnownParents(header.hash, level)); + } + + let parents: BlockHashes = parents.push_if_empty(ORIGIN).into(); + + if relations_stores[level_idx].has(header.hash).unwrap() { + return Err(PruningImportError::PruningProofDuplicateHeaderAtLevel(header.hash, level)); + } + + relations_stores[level_idx].insert(header.hash, parents.clone()).unwrap(); + let ghostdag_data = Arc::new(ghostdag_managers[level_idx].ghostdag(&parents)); + ghostdag_stores[level_idx].insert(header.hash, ghostdag_data.clone()).unwrap(); + selected_tip = Some(match selected_tip { + Some(tip) => ghostdag_managers[level_idx].find_selected_parent([tip, header.hash]), + None => header.hash, + }); + + let mut reachability_mergeset = { + let reachability_read = reachability_stores[level_idx].read(); + ghostdag_data + .unordered_mergeset_without_selected_parent() + .filter(|hash| reachability_read.has(*hash).unwrap()) + .collect_vec() // We collect to vector so reachability_read can be released and let `reachability::add_block` use a write lock. + .into_iter() + }; + reachability::add_block( + reachability_stores[level_idx].write().deref_mut(), + header.hash, + ghostdag_data.selected_parent, + &mut reachability_mergeset, + ) + .unwrap(); + + if selected_tip.unwrap() == header.hash { + reachability::hint_virtual_selected_parent(reachability_stores[level_idx].write().deref_mut(), header.hash) + .unwrap(); + } + } + + if level < self.max_block_level { + let block_at_depth_m_at_next_level = self + .block_at_depth( + &*ghostdag_stores[level_idx + 1], + selected_tip_by_level[level_idx + 1].unwrap(), + self.pruning_proof_m, + ) + .unwrap(); + if !relations_stores[level_idx].has(block_at_depth_m_at_next_level).unwrap() { + return Err(PruningImportError::PruningProofMissingBlockAtDepthMFromNextLevel(level, level + 1)); + } + } + + if selected_tip.unwrap() != proof_pp + && !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip.unwrap()) + { + return Err(PruningImportError::PruningProofMissesBlocksBelowPruningPoint(selected_tip.unwrap(), level)); + } + + selected_tip_by_level[level_idx] = selected_tip; + } + + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + proof_ghostdag_stores: &[Arc], + current_consensus_ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match current_consensus_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = proof_ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } +} diff --git a/consensus/src/processes/reachability/inquirer.rs b/consensus/src/processes/reachability/inquirer.rs index ff09849b4..3c1b153de 100644 --- a/consensus/src/processes/reachability/inquirer.rs +++ b/consensus/src/processes/reachability/inquirer.rs @@ -156,21 +156,21 @@ pub fn hint_virtual_selected_parent(store: &mut (impl ReachabilityStore + ?Sized ) } -/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). +/// Checks if the `this` block is a strict chain ancestor of the `queried` block (i.e., `this ∈ chain(queried)`). /// Note that this results in `false` if `this == queried` pub fn is_strict_chain_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { Ok(store.get_interval(this)?.strictly_contains(store.get_interval(queried)?)) } -/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). +/// Checks if `this` block is a chain ancestor of `queried` block (i.e., `this ∈ chain(queried) ∪ {queried}`). /// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. pub fn is_chain_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { Ok(store.get_interval(this)?.contains(store.get_interval(queried)?)) } -/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). +/// Returns true if `this` is a DAG ancestor of `queried` (i.e., `queried ∈ future(this) ∪ {this}`). /// Note: this method will return true if `this == queried`. -/// The complexity of this method is O(log(|future_covering_set(this)|)) +/// The complexity of this method is `O(log(|future_covering_set(this)|))` pub fn is_dag_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: Hash, queried: Hash) -> Result { // First, check if `this` is a chain ancestor of queried if is_chain_ancestor_of(store, this, queried)? { @@ -184,7 +184,7 @@ pub fn is_dag_ancestor_of(store: &(impl ReachabilityStoreReader + ?Sized), this: } } -/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. +/// Finds the tree child of `ancestor` which is also a chain ancestor of `descendant`. pub fn get_next_chain_ancestor(store: &(impl ReachabilityStoreReader + ?Sized), descendant: Hash, ancestor: Hash) -> Result { if descendant == ancestor { // The next ancestor does not exist @@ -200,7 +200,7 @@ pub fn get_next_chain_ancestor(store: &(impl ReachabilityStoreReader + ?Sized), } /// Note: it is important to keep the unchecked version for internal module use, -/// since in some scenarios during reindexing `descendant` might have a modified +/// since in some scenarios during reindexing `ancestor` might have a modified /// interval which was not propagated yet. pub(super) fn get_next_chain_ancestor_unchecked( store: &(impl ReachabilityStoreReader + ?Sized), diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 3978913ba..839e48a9e 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -5,7 +5,6 @@ use kaspa_consensus_core::errors::sync::{SyncManagerError, SyncManagerResult}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_math::uint::malachite_base::num::arithmetic::traits::CeilingLogBase2; -use kaspa_utils::option::OptionExtensions; use parking_lot::RwLock; use crate::model::{ @@ -191,7 +190,7 @@ impl< } } - if highest_with_body.is_none_or_ex(|&h| h == high) { + if highest_with_body.is_none_or(|h| h == high) { return Ok(vec![]); }; diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 008b0c4dd..b4a946c2f 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -1,7 +1,7 @@ pub mod errors; -pub mod transaction_validator_populated; -mod tx_validation_in_isolation; -pub mod tx_validation_not_utxo_related; +pub mod tx_validation_in_header_context; +pub mod tx_validation_in_isolation; +pub mod tx_validation_in_utxo_context; use std::sync::Arc; use crate::model::stores::ghostdag; @@ -11,7 +11,7 @@ use kaspa_txscript::{ SigCacheKey, }; -use kaspa_consensus_core::mass::MassCalculator; +use kaspa_consensus_core::{config::params::ForkActivation, mass::MassCalculator}; #[derive(Clone)] pub struct TransactionValidator { @@ -27,10 +27,14 @@ pub struct TransactionValidator { pub(crate) mass_calculator: MassCalculator, /// Storage mass hardfork DAA score - storage_mass_activation_daa_score: u64, + storage_mass_activation: ForkActivation, + /// KIP-10 hardfork DAA score + kip10_activation: ForkActivation, + payload_activation: ForkActivation, } impl TransactionValidator { + #[allow(clippy::too_many_arguments)] pub fn new( max_tx_inputs: usize, max_tx_outputs: usize, @@ -41,7 +45,9 @@ impl TransactionValidator { coinbase_maturity: u64, counters: Arc, mass_calculator: MassCalculator, - storage_mass_activation_daa_score: u64, + storage_mass_activation: ForkActivation, + kip10_activation: ForkActivation, + payload_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, @@ -53,7 +59,9 @@ impl TransactionValidator { coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator, - storage_mass_activation_daa_score, + storage_mass_activation, + kip10_activation, + payload_activation, } } @@ -77,7 +85,9 @@ impl TransactionValidator { coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), + payload_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs new file mode 100644 index 000000000..129627c59 --- /dev/null +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -0,0 +1,102 @@ +//! Groups transaction validations that depend on the containing header and/or +//! its past headers (but do not depend on UTXO state or other transactions in +//! the containing block) + +use super::{ + errors::{TxResult, TxRuleError}, + TransactionValidator, +}; +use crate::constants::LOCK_TIME_THRESHOLD; +use kaspa_consensus_core::tx::Transaction; + +pub(crate) enum LockTimeType { + Finalized, + DaaScore, + Time, +} + +pub(crate) enum LockTimeArg { + Finalized, + DaaScore(u64), + MedianTime(u64), +} + +impl TransactionValidator { + pub(crate) fn validate_tx_in_header_context_with_args( + &self, + tx: &Transaction, + ctx_daa_score: u64, + ctx_block_time: u64, + ) -> TxResult<()> { + self.validate_tx_in_header_context( + tx, + ctx_daa_score, + match Self::get_lock_time_type(tx) { + LockTimeType::Finalized => LockTimeArg::Finalized, + LockTimeType::DaaScore => LockTimeArg::DaaScore(ctx_daa_score), + LockTimeType::Time => LockTimeArg::MedianTime(ctx_block_time), + }, + ) + } + + pub(crate) fn validate_tx_in_header_context( + &self, + tx: &Transaction, + ctx_daa_score: u64, + lock_time_arg: LockTimeArg, + ) -> TxResult<()> { + self.check_transaction_payload(tx, ctx_daa_score)?; + self.check_tx_is_finalized(tx, lock_time_arg) + } + + pub(crate) fn get_lock_time_type(tx: &Transaction) -> LockTimeType { + match tx.lock_time { + // Lock time of zero means the transaction is finalized. + 0 => LockTimeType::Finalized, + + // The lock time field of a transaction is either a block DAA score at + // which the transaction is finalized or a timestamp depending on if the + // value is before the LOCK_TIME_THRESHOLD. When it is under the + // threshold it is a DAA score + t if t < LOCK_TIME_THRESHOLD => LockTimeType::DaaScore, + + // ..and when equal or above the threshold it represents time + _t => LockTimeType::Time, + } + } + + fn check_tx_is_finalized(&self, tx: &Transaction, lock_time_arg: LockTimeArg) -> TxResult<()> { + let block_time_or_daa_score = match lock_time_arg { + LockTimeArg::Finalized => return Ok(()), + LockTimeArg::DaaScore(ctx_daa_score) => ctx_daa_score, + LockTimeArg::MedianTime(ctx_block_time) => ctx_block_time, + }; + + if tx.lock_time < block_time_or_daa_score { + return Ok(()); + } + + // At this point, the transaction's lock time hasn't occurred yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for (i, input) in tx.inputs.iter().enumerate() { + if input.sequence != u64::MAX { + return Err(TxRuleError::NotFinalized(i)); + } + } + + Ok(()) + } + + fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { + // TODO (post HF): move back to in isolation validation + if self.payload_activation.is_active(ctx_daa_score) { + Ok(()) + } else { + if !tx.is_coinbase() && !tx.payload.is_empty() { + return Err(TxRuleError::NonCoinbaseTxHasPayload); + } + Ok(()) + } + } +} diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index 914624f94..b509a71c7 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -8,6 +8,11 @@ use super::{ }; impl TransactionValidator { + /// Performs a variety of transaction validation checks which are independent of any + /// context -- header or utxo. **Note** that any check performed here should be moved to + /// header contextual validation if it becomes HF activation dependent. This is bcs we rely + /// on checks here to be truly independent and avoid calling it multiple times wherever possible + /// (e.g., BBT relies on mempool in isolation checks even though virtual daa score might have changed) pub fn validate_tx_in_isolation(&self, tx: &Transaction) -> TxResult<()> { self.check_transaction_inputs_in_isolation(tx)?; self.check_transaction_outputs_in_isolation(tx)?; @@ -16,7 +21,6 @@ impl TransactionValidator { check_transaction_output_value_ranges(tx)?; check_duplicate_transaction_inputs(tx)?; check_gas(tx)?; - check_transaction_payload(tx)?; check_transaction_subnetwork(tx)?; check_transaction_version(tx) } @@ -107,14 +111,6 @@ fn check_gas(tx: &Transaction) -> TxResult<()> { Ok(()) } -fn check_transaction_payload(tx: &Transaction) -> TxResult<()> { - // This should be revised if subnetworks are activated (along with other validations that weren't copied from kaspad) - if !tx.is_coinbase() && !tx.payload.is_empty() { - return Err(TxRuleError::NonCoinbaseTxHasPayload); - } - Ok(()) -} - fn check_transaction_version(tx: &Transaction) -> TxResult<()> { if tx.version != TX_VERSION { return Err(TxRuleError::UnknownTxVersion(tx.version)); @@ -304,7 +300,7 @@ mod tests { let mut tx = valid_tx.clone(); tx.payload = vec![0]; - assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::NonCoinbaseTxHasPayload)); + assert_match!(tv.validate_tx_in_isolation(&tx), Ok(())); let mut tx = valid_tx; tx.version = TX_VERSION + 1; diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs similarity index 80% rename from consensus/src/processes/transaction_validator/transaction_validator_populated.rs rename to consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs index 4a8733d2b..cff13d9fb 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs @@ -1,18 +1,24 @@ use crate::constants::{MAX_SOMPI, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK}; use kaspa_consensus_core::{ - hashing::sighash::SigHashReusedValues, + hashing::sighash::{SigHashReusedValuesSync, SigHashReusedValuesUnsync}, mass::Kip9Version, tx::{TransactionInput, VerifiableTransaction}, }; use kaspa_core::warn; -use kaspa_txscript::{get_sig_op_count, TxScriptEngine}; +use kaspa_txscript::{caches::Cache, get_sig_op_count, SigCacheKey, TxScriptEngine}; use kaspa_txscript_errors::TxScriptError; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; +use rayon::ThreadPool; +use std::marker::Sync; use super::{ errors::{TxResult, TxRuleError}, TransactionValidator, }; +/// The threshold above which we apply parallelism to input script processing +const CHECK_SCRIPTS_PARALLELISM_THRESHOLD: usize = 1; + #[derive(Clone, Copy, PartialEq, Eq)] pub enum TxValidationFlags { /// Perform full validation including script verification @@ -29,7 +35,7 @@ pub enum TxValidationFlags { impl TransactionValidator { pub fn validate_populated_transaction_and_get_fee( &self, - tx: &impl VerifiableTransaction, + tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64, flags: TxValidationFlags, mass_and_feerate_threshold: Option<(u64, f64)>, @@ -38,24 +44,24 @@ impl TransactionValidator { let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; let fee = total_in - total_out; - if flags != TxValidationFlags::SkipMassCheck && pov_daa_score > self.storage_mass_activation_daa_score { + if flags != TxValidationFlags::SkipMassCheck && self.storage_mass_activation.is_active(pov_daa_score) { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; - if pov_daa_score < self.storage_mass_activation_daa_score + 10 && self.storage_mass_activation_daa_score > 0 { + if self.storage_mass_activation.is_within_range_from_activation(pov_daa_score, 10) { warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); } } Self::check_sequence_lock(tx, pov_daa_score)?; - // The following call is not a consensus check (it could not be one in the first place since it uses floating number) - // but rather a mempool Replace by Fee validation rule. It was placed here purposely for avoiding unneeded script checks. + // The following call is not a consensus check (it could not be one in the first place since it uses a floating number) + // but rather a mempool Replace by Fee validation rule. It is placed here purposely for avoiding unneeded script checks. Self::check_feerate_threshold(fee, mass_and_feerate_threshold)?; match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { Self::check_sig_op_counts(tx)?; - self.check_scripts(tx)?; + self.check_scripts(tx, pov_daa_score)?; } TxValidationFlags::SkipScriptChecks => {} } @@ -158,7 +164,7 @@ impl TransactionValidator { fn check_sig_op_counts(tx: &T) -> TxResult<()> { for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); + let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); if calculated != input.sig_op_count as u64 { return Err(TxRuleError::WrongSigOpCount(i, input.sig_op_count as u64, calculated)); } @@ -166,16 +172,58 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let mut reused_values = SigHashReusedValues::new(); - for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &mut reused_values, &self.sig_cache) - .map_err(|err| map_script_err(err, input))?; - engine.execute().map_err(|err| map_script_err(err, input))?; - } + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + Sync), pov_daa_score: u64) -> TxResult<()> { + check_scripts(&self.sig_cache, tx, self.kip10_activation.is_active(pov_daa_score)) + } +} - Ok(()) +pub fn check_scripts( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + kip10_enabled: bool, +) -> TxResult<()> { + if tx.inputs().len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD { + check_scripts_par_iter(sig_cache, tx, kip10_enabled) + } else { + check_scripts_sequential(sig_cache, tx, kip10_enabled) + } +} + +pub fn check_scripts_sequential( + sig_cache: &Cache, + tx: &impl VerifiableTransaction, + kip10_enabled: bool, +) -> TxResult<()> { + let reused_values = SigHashReusedValuesUnsync::new(); + for (i, (input, entry)) in tx.populated_inputs().enumerate() { + TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache, kip10_enabled) + .execute() + .map_err(|err| map_script_err(err, input))?; } + Ok(()) +} + +pub fn check_scripts_par_iter( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + kip10_enabled: bool, +) -> TxResult<()> { + let reused_values = SigHashReusedValuesSync::new(); + (0..tx.inputs().len()).into_par_iter().try_for_each(|idx| { + let (input, utxo) = tx.populated_input(idx); + TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache, kip10_enabled) + .execute() + .map_err(|err| map_script_err(err, input)) + }) +} + +pub fn check_scripts_par_iter_pool( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + Sync), + pool: &ThreadPool, + kip10_enabled: bool, +) -> TxResult<()> { + pool.install(|| check_scripts_par_iter(sig_cache, tx, kip10_enabled)) } fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { @@ -189,6 +237,7 @@ fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRule #[cfg(test)] mod tests { use super::super::errors::TxRuleError; + use super::CHECK_SCRIPTS_PARALLELISM_THRESHOLD; use core::str::FromStr; use itertools::Itertools; use kaspa_consensus_core::sign::sign; @@ -202,6 +251,15 @@ mod tests { use crate::{params::MAINNET_PARAMS, processes::transaction_validator::TransactionValidator}; + /// Helper function to duplicate the last input + fn duplicate_input(tx: &Transaction, entries: &[UtxoEntry]) -> (Transaction, Vec) { + let mut tx2 = tx.clone(); + let mut entries2 = entries.to_owned(); + tx2.inputs.push(tx2.inputs.last().unwrap().clone()); + entries2.push(entries2.last().unwrap().clone()); + (tx2, entries2) + } + #[test] fn check_signature_test() { let mut params = MAINNET_PARAMS.clone(); @@ -260,7 +318,15 @@ mod tests { }], ); - tv.check_scripts(&populated_tx).expect("Signature check failed"); + tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + // Duplicated sigs should fail due to wrong sighash + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) + ); } #[test] @@ -322,7 +388,18 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx).is_err(), "Failing Signature Test Failed"); + assert!(tv.check_scripts(&populated_tx, u64::MAX).is_err(), "Expecting signature check to fail"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX).expect_err("Expecting signature check to fail"); + + // Verify we are correctly testing the parallelism case (applied here as sanity for all tests) + assert!( + tx2.inputs.len() > CHECK_SCRIPTS_PARALLELISM_THRESHOLD, + "The script tests must cover the case of a tx with inputs.len() > {}", + CHECK_SCRIPTS_PARALLELISM_THRESHOLD + ); } #[test] @@ -384,7 +461,15 @@ mod tests { is_coinbase: false, }], ); - tv.check_scripts(&populated_tx).expect("Signature check failed"); + tv.check_scripts(&populated_tx, u64::MAX).expect("Signature check failed"); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + // Duplicated sigs should fail due to wrong sighash + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -447,7 +532,14 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx) == Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -510,7 +602,14 @@ mod tests { }], ); - assert!(tv.check_scripts(&populated_tx) == Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::NullFail)) + ); } #[test] @@ -573,8 +672,14 @@ mod tests { }], ); - let result = tv.check_scripts(&populated_tx); - assert!(result == Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse))); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::EvalFalse)) + ); } #[test] @@ -628,8 +733,17 @@ mod tests { }], ); - let result = tv.check_scripts(&populated_tx); - assert!(result == Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly))); + assert_eq!( + tv.check_scripts(&populated_tx, u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) + ); + + // Test a tx with 2 inputs to cover parallelism split points in inner script checking code + let (tx2, entries2) = duplicate_input(&tx, &populated_tx.entries); + assert_eq!( + tv.check_scripts(&PopulatedTransaction::new(&tx2, entries2), u64::MAX), + Err(TxRuleError::SignatureInvalid(TxScriptError::SignatureScriptNotPushOnly)) + ); } #[test] @@ -708,7 +822,7 @@ mod tests { let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, &secret_key.secret_bytes()).unwrap(); let signed_tx = sign(MutableTransaction::with_entries(unsigned_tx, entries), schnorr_key); let populated_tx = signed_tx.as_verifiable(); - assert_eq!(tv.check_scripts(&populated_tx), Ok(())); + assert_eq!(tv.check_scripts(&populated_tx, u64::MAX), Ok(())); assert_eq!(TransactionValidator::check_sig_op_counts(&populated_tx), Ok(())); } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs b/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs deleted file mode 100644 index 4cfa72b46..000000000 --- a/consensus/src/processes/transaction_validator/tx_validation_not_utxo_related.rs +++ /dev/null @@ -1,41 +0,0 @@ -use kaspa_consensus_core::tx::Transaction; - -use crate::constants::LOCK_TIME_THRESHOLD; - -use super::{ - errors::{TxResult, TxRuleError}, - TransactionValidator, -}; - -impl TransactionValidator { - pub fn utxo_free_tx_validation(&self, tx: &Transaction, ctx_daa_score: u64, ctx_block_time: u64) -> TxResult<()> { - self.check_tx_is_finalized(tx, ctx_daa_score, ctx_block_time) - } - - fn check_tx_is_finalized(&self, tx: &Transaction, ctx_daa_score: u64, ctx_block_time: u64) -> TxResult<()> { - // Lock time of zero means the transaction is finalized. - if tx.lock_time == 0 { - return Ok(()); - } - - // The lock time field of a transaction is either a block DAA score at - // which the transaction is finalized or a timestamp depending on if the - // value is before the LOCK_TIME_THRESHOLD. When it is under the - // threshold it is a DAA score. - let block_time_or_daa_score = if tx.lock_time < LOCK_TIME_THRESHOLD { ctx_daa_score } else { ctx_block_time }; - if tx.lock_time < block_time_or_daa_score { - return Ok(()); - } - - // At this point, the transaction's lock time hasn't occurred yet, but - // the transaction might still be finalized if the sequence number - // for all transaction inputs is maxed out. - for (i, input) in tx.inputs.iter().enumerate() { - if input.sequence != u64::MAX { - return Err(TxRuleError::NotFinalized(i)); - } - } - - Ok(()) - } -} diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index 9c582af28..1caff9c00 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -1,6 +1,6 @@ use crate::{ model::stores::{ - block_window_cache::{BlockWindowCacheReader, BlockWindowHeap, WindowOrigin}, + block_window_cache::{BlockWindowCacheReader, BlockWindowCacheWriter, BlockWindowHeap, WindowOrigin}, daa::DaaStoreReader, ghostdag::{GhostdagData, GhostdagStoreReader}, headers::HeaderStoreReader, @@ -9,7 +9,7 @@ use crate::{ }; use kaspa_consensus_core::{ blockhash::BlockHashExtensions, - config::genesis::GenesisBlock, + config::{genesis::GenesisBlock, params::ForkActivation}, errors::{block::RuleError, difficulty::DifficultyResult}, BlockHashSet, BlueWorkType, }; @@ -17,7 +17,12 @@ use kaspa_hashes::Hash; use kaspa_math::Uint256; use kaspa_utils::refs::Refs; use once_cell::unsync::Lazy; -use std::{cmp::Reverse, iter::once, ops::Deref, sync::Arc}; +use std::{ + cmp::Reverse, + iter::once, + ops::{Deref, DerefMut}, + sync::Arc, +}; use super::{ difficulty::{FullDifficultyManager, SampledDifficultyManager}, @@ -26,9 +31,8 @@ use super::{ #[derive(Clone, Copy)] pub enum WindowType { - SampledDifficultyWindow, - FullDifficultyWindow, - SampledMedianTimeWindow, + DifficultyWindow, + MedianTimeWindow, VaryingWindow(usize), } @@ -50,15 +54,44 @@ pub trait WindowManager { fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result; fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32; fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError>; + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result; fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult; fn window_size(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> usize; fn sample_rate(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> u64; + + /// Returns the full consecutive sub-DAG containing all blocks required to restore the (possibly sampled) window. + fn consecutive_cover_for_window(&self, ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec; +} + +trait AffiliatedWindowCacheReader { + fn get(&self, hash: &Hash) -> Option>; +} + +/// A local wrapper over an (optional) block window cache which filters cache hits based on a pre-specified window origin +struct AffiliatedWindowCache<'a, U: BlockWindowCacheReader> { + /// The inner underlying cache + inner: Option<&'a Arc>, + /// The affiliated origin (sampled vs. full) + origin: WindowOrigin, +} + +impl<'a, U: BlockWindowCacheReader> AffiliatedWindowCache<'a, U> { + fn new(inner: Option<&'a Arc>, origin: WindowOrigin) -> Self { + Self { inner, origin } + } +} + +impl AffiliatedWindowCacheReader for AffiliatedWindowCache<'_, U> { + fn get(&self, hash: &Hash) -> Option> { + // Only return the cached window if it originates from the affiliated origin + self.inner.and_then(|cache| cache.get(hash, self.origin)) + } } /// A window manager conforming (indirectly) to the legacy golang implementation /// based on full, hence un-sampled, windows #[derive(Clone)] -pub struct FullWindowManager { +pub struct FullWindowManager { genesis_hash: Hash, ghostdag_store: Arc, block_window_cache_for_difficulty: Arc, @@ -69,7 +102,7 @@ pub struct FullWindowManager, } -impl FullWindowManager { +impl FullWindowManager { pub fn new( genesis: &GenesisBlock, ghostdag_store: Arc, @@ -109,30 +142,29 @@ impl Fu return Ok(Arc::new(BlockWindowHeap::new(WindowOrigin::Full))); } - let cache = if window_size == self.difficulty_window_size { + let inner_cache = if window_size == self.difficulty_window_size { Some(&self.block_window_cache_for_difficulty) } else if window_size == self.past_median_time_window_size { Some(&self.block_window_cache_for_past_median_time) } else { None }; - - if let Some(cache) = cache { - if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { - // Only use the cached window if it originates from here - if let WindowOrigin::Full = selected_parent_binary_heap.origin() { - let mut window_heap = BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone()); - if ghostdag_data.selected_parent != self.genesis_hash { - self.try_push_mergeset( - &mut window_heap, - ghostdag_data, - self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(), - ); - } - - return Ok(Arc::new(window_heap.binary_heap)); - } + // Wrap the inner cache with a cache affiliated with this origin (WindowOrigin::Full). + // This is crucial for hardfork times where the DAA mechanism changes thereby invalidating cache entries + // originating from the prior mechanism + let cache = AffiliatedWindowCache::new(inner_cache, WindowOrigin::Full); + + if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { + let mut window_heap = BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone()); + if ghostdag_data.selected_parent != self.genesis_hash { + self.try_push_mergeset( + &mut window_heap, + ghostdag_data, + self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(), + ); } + + return Ok(Arc::new(window_heap.binary_heap)); } let mut window_heap = BoundedSizeBlockHeap::new(WindowOrigin::Full, window_size); @@ -189,7 +221,9 @@ impl Fu } } -impl WindowManager for FullWindowManager { +impl WindowManager + for FullWindowManager +{ fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { self.build_block_window(ghostdag_data, window_type) } @@ -201,7 +235,7 @@ impl Wi } fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result { - let window = self.block_window(ghostdag_data, WindowType::SampledDifficultyWindow)?; + let window = self.block_window(ghostdag_data, WindowType::DifficultyWindow)?; Ok(self.calc_daa_window(ghostdag_data, window)) } @@ -210,19 +244,31 @@ impl Wi } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { - let window = self.block_window(ghostdag_data, WindowType::SampledMedianTimeWindow)?; + let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; Ok((past_median_time, window)) } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Full) { + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + Ok(past_median_time) + } else { + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); + let (past_median_time, window) = self.calc_past_median_time(&ghostdag_data)?; + self.block_window_cache_for_past_median_time.insert(hash, window); + Ok(past_median_time) + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.difficulty_manager.estimate_network_hashes_per_second(&window) } fn window_size(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { match window_type { - WindowType::SampledDifficultyWindow | WindowType::FullDifficultyWindow => self.difficulty_window_size, - WindowType::SampledMedianTimeWindow => self.past_median_time_window_size, + WindowType::DifficultyWindow => self.difficulty_window_size, + WindowType::MedianTimeWindow => self.past_median_time_window_size, WindowType::VaryingWindow(size) => size, } } @@ -230,6 +276,11 @@ impl Wi fn sample_rate(&self, _ghostdag_data: &GhostdagData, _window_type: WindowType) -> u64 { 1 } + + fn consecutive_cover_for_window(&self, _ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec { + assert_eq!(WindowOrigin::Full, window.origin()); + window.iter().map(|b| b.0.hash).collect() + } } type DaaStatus = Option<(u64, BlockHashSet)>; @@ -241,7 +292,12 @@ enum SampledBlock { /// A sampled window manager implementing [KIP-0004](https://github.com/kaspanet/kips/blob/master/kip-0004.md) #[derive(Clone)] -pub struct SampledWindowManager { +pub struct SampledWindowManager< + T: GhostdagStoreReader, + U: BlockWindowCacheReader + BlockWindowCacheWriter, + V: HeaderStoreReader, + W: DaaStoreReader, +> { genesis_hash: Hash, ghostdag_store: Arc, headers_store: Arc, @@ -249,7 +305,7 @@ pub struct SampledWindowManager, block_window_cache_for_past_median_time: Arc, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, @@ -258,7 +314,9 @@ pub struct SampledWindowManager, } -impl SampledWindowManager { +impl + SampledWindowManager +{ #[allow(clippy::too_many_arguments)] pub fn new( genesis: &GenesisBlock, @@ -269,7 +327,7 @@ impl, max_difficulty_target: Uint256, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, difficulty_window_size: usize, min_difficulty_window_len: usize, difficulty_sample_rate: u64, @@ -294,7 +352,7 @@ impl Some(&self.block_window_cache_for_difficulty), - WindowType::SampledMedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), - WindowType::FullDifficultyWindow | WindowType::VaryingWindow(_) => None, + let inner_cache = match window_type { + WindowType::DifficultyWindow => Some(&self.block_window_cache_for_difficulty), + WindowType::MedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), + WindowType::VaryingWindow(_) => None, }; - - if let Some(cache) = cache { - if let Some(selected_parent_binary_heap) = cache.get(&ghostdag_data.selected_parent) { - // Only use the cached window if it originates from here - if let WindowOrigin::Sampled = selected_parent_binary_heap.origin() { - let selected_parent_blue_work = self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(); - - let mut heap = - Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_binary_heap).clone())); - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { - match block { - SampledBlock::Sampled(block) => { - heap.try_push(block.hash, block.blue_work); - } - SampledBlock::NonDaa(hash) => { - mergeset_non_daa_inserter(hash); - } - } - } - - return if let Ok(heap) = Lazy::into_value(heap) { - Ok(Arc::new(heap.binary_heap)) - } else { - Ok(selected_parent_binary_heap.clone()) - }; - } - } + // Wrap the inner cache with a cache affiliated with this origin (WindowOrigin::Sampled). + // This is crucial for hardfork times where the DAA mechanism changes thereby invalidating cache entries + // originating from the prior mechanism + let cache = AffiliatedWindowCache::new(inner_cache, WindowOrigin::Sampled); + + let selected_parent_blue_work = self.ghostdag_store.get_blue_work(ghostdag_data.selected_parent).unwrap(); + + // Try to initialize the window from the cache directly + if let Some(res) = self.try_init_from_cache( + window_size, + sample_rate, + &cache, + ghostdag_data, + selected_parent_blue_work, + Some(&mut mergeset_non_daa_inserter), + ) { + return Ok(res); } + // else we populate the window with the passed ghostdag_data. let mut window_heap = BoundedSizeBlockHeap::new(WindowOrigin::Sampled, window_size); - let parent_ghostdag = self.ghostdag_store.get_data(ghostdag_data.selected_parent).unwrap(); - - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, parent_ghostdag.blue_work) { - match block { - SampledBlock::Sampled(block) => { - window_heap.try_push(block.hash, block.blue_work); - } - SampledBlock::NonDaa(hash) => { - mergeset_non_daa_inserter(hash); - } - } - } + self.push_mergeset( + &mut &mut window_heap, + sample_rate, + ghostdag_data, + selected_parent_blue_work, + Some(&mut mergeset_non_daa_inserter), + ); + let mut current_ghostdag = self.ghostdag_store.get_data(ghostdag_data.selected_parent).unwrap(); - let mut current_ghostdag = parent_ghostdag; + // Note: no need to check for cache here, as we already tried to initialize from the passed ghostdag's selected parent cache in `self.try_init_from_cache` - // Walk down the chain until we cross the window boundaries + // Walk down the chain until we cross the window boundaries. loop { + // check if we may exit early. if current_ghostdag.selected_parent.is_origin() { // Reaching origin means there's no more data, so we expect the window to already be full, otherwise we err. // This error can happen only during an IBD from pruning proof when processing the first headers in the pruning point's @@ -387,50 +433,98 @@ impl); + + // see if we can inherit and merge with the selected parent cache + if self.try_merge_with_selected_parent_cache(&mut window_heap, &cache, ¤t_ghostdag.selected_parent) { + // if successful, we may break out of the loop, with the window already filled. + break; + }; + + // update the current ghostdag to the parent ghostdag, and continue the loop. current_ghostdag = parent_ghostdag; } Ok(Arc::new(window_heap.binary_heap)) } - fn try_push_mergeset( + /// Push the mergeset samples into the bounded heap. + /// Note: receives the heap argument as a DerefMut so that Lazy can be passed and be evaluated *only if an actual push is needed* + fn push_mergeset( &self, - heap: &mut BoundedSizeBlockHeap, + heap: &mut impl DerefMut, sample_rate: u64, ghostdag_data: &GhostdagData, selected_parent_blue_work: BlueWorkType, - ) -> bool { - // If the window is full and the selected parent is less than the minimum then we break - // because this means that there cannot be any more blocks in the past with higher blue work - if !heap.can_push(ghostdag_data.selected_parent, selected_parent_blue_work) { - return true; - } - - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { - match block { - SampledBlock::Sampled(block) => { + mergeset_non_daa_inserter: Option, + ) { + if let Some(mut mergeset_non_daa_inserter) = mergeset_non_daa_inserter { + // If we have a non-daa inserter, we most iterate over the whole mergeset and op the sampled and non-daa blocks. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + match block { + SampledBlock::Sampled(block) => { + heap.try_push(block.hash, block.blue_work); + } + SampledBlock::NonDaa(hash) => mergeset_non_daa_inserter(hash), + }; + } + } else { + // If we don't have a non-daa inserter, we can iterate over the sampled mergeset and return early if we can't push anymore. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + if let SampledBlock::Sampled(block) = block { if !heap.try_push(block.hash, block.blue_work) { - break; + return; } } - SampledBlock::NonDaa(_) => {} } } - false + } + + fn try_init_from_cache( + &self, + window_size: usize, + sample_rate: u64, + cache: &impl AffiliatedWindowCacheReader, + ghostdag_data: &GhostdagData, + selected_parent_blue_work: BlueWorkType, + mergeset_non_daa_inserter: Option, + ) -> Option> { + cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { + let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); + // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples + self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); + if let Ok(heap) = Lazy::into_value(heap) { + Arc::new(heap.binary_heap) + } else { + selected_parent_window.clone() + } + }) + } + + fn try_merge_with_selected_parent_cache( + &self, + heap: &mut BoundedSizeBlockHeap, + cache: &impl AffiliatedWindowCacheReader, + selected_parent: &Hash, + ) -> bool { + cache + .get(selected_parent) + .map(|selected_parent_window| { + heap.merge_ancestor_heap(&mut (*selected_parent_window).clone()); + }) + .is_some() } fn sampled_mergeset_iterator<'a>( @@ -461,7 +555,7 @@ impl WindowManager +impl WindowManager for SampledWindowManager { fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { @@ -476,7 +570,7 @@ impl Result { let mut mergeset_non_daa = BlockHashSet::default(); - let window = self.build_block_window(ghostdag_data, WindowType::SampledDifficultyWindow, |hash| { + let window = self.build_block_window(ghostdag_data, WindowType::DifficultyWindow, |hash| { mergeset_non_daa.insert(hash); })?; let daa_score = self.difficulty_manager.calc_daa_score(ghostdag_data, &mergeset_non_daa); @@ -488,49 +582,96 @@ impl Result<(u64, Arc), RuleError> { - let window = self.block_window(ghostdag_data, WindowType::SampledMedianTimeWindow)?; + let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; Ok((past_median_time, window)) } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Sampled) { + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + Ok(past_median_time) + } else { + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); + let (past_median_time, window) = self.calc_past_median_time(&ghostdag_data)?; + self.block_window_cache_for_past_median_time.insert(hash, window); + Ok(past_median_time) + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.difficulty_manager.estimate_network_hashes_per_second(&window) } fn window_size(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { match window_type { - WindowType::SampledDifficultyWindow => self.difficulty_window_size, - // We aim to return a full window such that it contains what would be the sampled window. Note that the - // product below addresses also the worst-case scenario where the last sampled block is exactly `sample_rate` - // blocks from the end of the full window - WindowType::FullDifficultyWindow => self.difficulty_window_size * self.difficulty_sample_rate as usize, - WindowType::SampledMedianTimeWindow => self.past_median_time_window_size, + WindowType::DifficultyWindow => self.difficulty_window_size, + WindowType::MedianTimeWindow => self.past_median_time_window_size, WindowType::VaryingWindow(size) => size, } } fn sample_rate(&self, _ghostdag_data: &GhostdagData, window_type: WindowType) -> u64 { match window_type { - WindowType::SampledDifficultyWindow => self.difficulty_sample_rate, - WindowType::SampledMedianTimeWindow => self.past_median_time_sample_rate, - WindowType::FullDifficultyWindow | WindowType::VaryingWindow(_) => 1, + WindowType::DifficultyWindow => self.difficulty_sample_rate, + WindowType::MedianTimeWindow => self.past_median_time_sample_rate, + WindowType::VaryingWindow(_) => 1, } } + + fn consecutive_cover_for_window(&self, mut ghostdag: Arc, window: &BlockWindowHeap) -> Vec { + assert_eq!(WindowOrigin::Sampled, window.origin()); + + // In the sampled case, the sampling logic relies on DAA indexes which can only be calculated correctly if the full + // mergesets covering all sampled blocks are sent. + + // Tracks the window blocks to make sure we visit all blocks + let mut unvisited: BlockHashSet = window.iter().map(|b| b.0.hash).collect(); + let capacity_estimate = window.len() * self.difficulty_sample_rate as usize; + // The full consecutive window covering all sampled window blocks and the full mergesets containing them + let mut cover = Vec::with_capacity(capacity_estimate); + while !unvisited.is_empty() { + assert!(!ghostdag.selected_parent.is_origin(), "unvisited still not empty"); + // TODO (relaxed): a possible optimization here is to iterate in the same order as + // sampled_mergeset_iterator (descending_mergeset) and to break once all samples from + // this mergeset are reached. + // * Why is this sufficient? bcs we still send the prefix of the mergeset required for + // obtaining the DAA index for all sampled blocks. + // * What's the benefit? This might exclude deeply merged blocks which in turn will help + // reducing the number of trusted blocks sent to a fresh syncing peer. + for merged in ghostdag.unordered_mergeset() { + cover.push(merged); + unvisited.remove(&merged); + } + if unvisited.is_empty() { + break; + } + ghostdag = self.ghostdag_store.get_data(ghostdag.selected_parent).unwrap(); + } + cover + } } /// A window manager handling either full (un-sampled) or sampled windows depending on an activation DAA score /// /// See [FullWindowManager] and [SampledWindowManager] #[derive(Clone)] -pub struct DualWindowManager { +pub struct DualWindowManager< + T: GhostdagStoreReader, + U: BlockWindowCacheReader + BlockWindowCacheWriter, + V: HeaderStoreReader, + W: DaaStoreReader, +> { ghostdag_store: Arc, headers_store: Arc, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, full_window_manager: FullWindowManager, sampled_window_manager: SampledWindowManager, } -impl DualWindowManager { +impl + DualWindowManager +{ #[allow(clippy::too_many_arguments)] pub fn new( genesis: &GenesisBlock, @@ -541,7 +682,7 @@ impl, max_difficulty_target: Uint256, target_time_per_block: u64, - sampling_activation_daa_score: u64, + sampling_activation: ForkActivation, full_difficulty_window_size: usize, sampled_difficulty_window_size: usize, min_difficulty_window_len: usize, @@ -571,77 +712,92 @@ impl bool { - let sp_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); - sp_daa_score >= self.sampling_activation_daa_score + /// Checks whether sampling mode was activated based on the selected parent (internally checking its DAA score) + pub(crate) fn sampling(&self, selected_parent: Hash) -> bool { + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + self.sampling_activation.is_active(sp_daa_score) } } -impl WindowManager +impl WindowManager for DualWindowManager { fn block_window(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> Result, RuleError> { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.block_window(ghostdag_data, window_type), false => self.full_window_manager.block_window(ghostdag_data, window_type), } } fn calc_daa_window(&self, ghostdag_data: &GhostdagData, window: Arc) -> DaaWindow { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calc_daa_window(ghostdag_data, window), false => self.full_window_manager.calc_daa_window(ghostdag_data, window), } } fn block_daa_window(&self, ghostdag_data: &GhostdagData) -> Result { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.block_daa_window(ghostdag_data), false => self.full_window_manager.block_daa_window(ghostdag_data), } } fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32 { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calculate_difficulty_bits(ghostdag_data, daa_window), false => self.full_window_manager.calculate_difficulty_bits(ghostdag_data, daa_window), } } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.calc_past_median_time(ghostdag_data), false => self.full_window_manager.calc_past_median_time(ghostdag_data), } } + fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { + match self.sampling(self.ghostdag_store.get_selected_parent(hash).unwrap()) { + true => self.sampled_window_manager.calc_past_median_time_for_known_hash(hash), + false => self.full_window_manager.calc_past_median_time_for_known_hash(hash), + } + } + fn estimate_network_hashes_per_second(&self, window: Arc) -> DifficultyResult { self.sampled_window_manager.estimate_network_hashes_per_second(window) } fn window_size(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> usize { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.window_size(ghostdag_data, window_type), false => self.full_window_manager.window_size(ghostdag_data, window_type), } } fn sample_rate(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> u64 { - match self.sampling(ghostdag_data) { + match self.sampling(ghostdag_data.selected_parent) { true => self.sampled_window_manager.sample_rate(ghostdag_data, window_type), false => self.full_window_manager.sample_rate(ghostdag_data, window_type), } } + + fn consecutive_cover_for_window(&self, ghostdag_data: Arc, window: &BlockWindowHeap) -> Vec { + match window.origin() { + WindowOrigin::Sampled => self.sampled_window_manager.consecutive_cover_for_window(ghostdag_data, window), + WindowOrigin::Full => self.full_window_manager.consecutive_cover_for_window(ghostdag_data, window), + } + } } struct BoundedSizeBlockHeap { @@ -686,4 +842,14 @@ impl BoundedSizeBlockHeap { self.binary_heap.push(r_sortable_block); true } + + // This method is intended to be used to merge the ancestor heap with the current heap. + fn merge_ancestor_heap(&mut self, ancestor_heap: &mut BlockWindowHeap) { + self.binary_heap.blocks.append(&mut ancestor_heap.blocks); + // Below we saturate for cases where ancestor may be close to, the origin, or genesis. + // Note: this is a no-op if overflow_amount is 0, i.e. because of the saturating sub, the sum of the two heaps is less or equal to the size bound. + for _ in 0..self.binary_heap.len().saturating_sub(self.size_bound) { + self.binary_heap.blocks.pop(); + } + } } diff --git a/consensus/src/test_helpers.rs b/consensus/src/test_helpers.rs index c119c6d6d..b3867f145 100644 --- a/consensus/src/test_helpers.rs +++ b/consensus/src/test_helpers.rs @@ -19,7 +19,7 @@ pub fn block_from_precomputed_hash(hash: Hash, parents: Vec) -> Block { pub fn generate_random_utxos_from_script_public_key_pool( rng: &mut SmallRng, amount: usize, - script_public_key_pool: &Vec, + script_public_key_pool: &[ScriptPublicKey], ) -> UtxoCollection { let mut i = 0; let mut collection = UtxoCollection::with_capacity(amount); @@ -40,10 +40,7 @@ pub fn generate_random_outpoint(rng: &mut SmallRng) -> TransactionOutpoint { TransactionOutpoint::new(generate_random_hash(rng), rng.gen::()) } -pub fn generate_random_utxo_from_script_public_key_pool( - rng: &mut SmallRng, - script_public_key_pool: &Vec, -) -> UtxoEntry { +pub fn generate_random_utxo_from_script_public_key_pool(rng: &mut SmallRng, script_public_key_pool: &[ScriptPublicKey]) -> UtxoEntry { UtxoEntry::new( rng.gen_range(1..100_000), //we choose small amounts as to not overflow with large utxosets. script_public_key_pool.choose(rng).expect("expected_script_public key").clone(), diff --git a/consensus/wasm/src/utils.rs b/consensus/wasm/src/utils.rs index 0139b573f..b70664e1e 100644 --- a/consensus/wasm/src/utils.rs +++ b/consensus/wasm/src/utils.rs @@ -1,5 +1,5 @@ use crate::result::Result; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; use kaspa_consensus_core::tx; @@ -9,9 +9,9 @@ pub fn script_hashes(mut mutable_tx: tx::SignableTransaction) -> Result, core: Arc) { - return tokio::runtime::Builder::new_multi_thread() + tokio::runtime::Builder::new_multi_thread() .worker_threads(self.threads) .enable_all() .build() .expect("Failed building the Runtime") - .block_on(async { self.worker_impl(core).await }); + .block_on(async { self.worker_impl(core).await }) } pub async fn worker_impl(self: &Arc, core: Arc) { diff --git a/crypto/addresses/src/lib.rs b/crypto/addresses/src/lib.rs index 8e3ea385a..48c0baf19 100644 --- a/crypto/addresses/src/lib.rs +++ b/crypto/addresses/src/lib.rs @@ -506,7 +506,7 @@ impl<'de> Deserialize<'de> for Address { impl TryCastFromJs for Address { type Error = AddressError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/crypto/hashes/src/lib.rs b/crypto/hashes/src/lib.rs index d9ff47997..da9019af2 100644 --- a/crypto/hashes/src/lib.rs +++ b/crypto/hashes/src/lib.rs @@ -187,7 +187,7 @@ impl Hash { type TryFromError = workflow_wasm::error::Error; impl TryCastFromJs for Hash { type Error = TryFromError; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/crypto/muhash/src/lib.rs b/crypto/muhash/src/lib.rs index 3fa7fc6e6..2ad059466 100644 --- a/crypto/muhash/src/lib.rs +++ b/crypto/muhash/src/lib.rs @@ -146,7 +146,7 @@ pub struct MuHashElementBuilder<'a> { element_hasher: MuHashElementHash, } -impl<'a> HasherBase for MuHashElementBuilder<'a> { +impl HasherBase for MuHashElementBuilder<'_> { fn update>(&mut self, data: A) -> &mut Self { self.element_hasher.write(data); self diff --git a/crypto/muhash/src/u3072.rs b/crypto/muhash/src/u3072.rs index 82021eb88..fae82b8cf 100644 --- a/crypto/muhash/src/u3072.rs +++ b/crypto/muhash/src/u3072.rs @@ -88,6 +88,16 @@ impl U3072 { } fn mul(&mut self, other: &U3072) { + /* + Optimization: short-circuit when LHS is one + - This case is especially frequent during parallel reduce operation where the identity (one) is used for each sub-computation (at the LHS) + - If self ≠ one, the comparison should exit early, otherwise if they are equal -- we gain much more than we lose + - Benchmarks show that general performance remains the same while parallel reduction gains ~35% + */ + if *self == Self::one() { + *self = *other; + return; + } let (mut carry_low, mut carry_high, mut carry_highest) = (0, 0, 0); let mut tmp = Self::one(); diff --git a/crypto/txscript/Cargo.toml b/crypto/txscript/Cargo.toml index e2f492ad3..46e3103e8 100644 --- a/crypto/txscript/Cargo.toml +++ b/crypto/txscript/Cargo.toml @@ -9,6 +9,9 @@ include.workspace = true license.workspace = true repository.workspace = true +[[example]] +name = "kip-10" + [features] wasm32-core = [] wasm32-sdk = [] diff --git a/crypto/txscript/errors/src/lib.rs b/crypto/txscript/errors/src/lib.rs index 4c077dae3..b16ec4cea 100644 --- a/crypto/txscript/errors/src/lib.rs +++ b/crypto/txscript/errors/src/lib.rs @@ -6,8 +6,8 @@ pub enum TxScriptError { MalformedPushSize(Vec), #[error("opcode requires {0} bytes, but script only has {1} remaining")] MalformedPush(usize, usize), - #[error("transaction input index {0} >= {1}")] - InvalidIndex(usize, usize), + #[error("transaction input {0} is out of bounds, should be non-negative below {1}")] + InvalidInputIndex(i32, usize), #[error("combined stack size {0} > max allowed {1}")] StackSizeExceeded(usize, usize), #[error("attempt to execute invalid opcode {0}")] @@ -69,4 +69,14 @@ pub enum TxScriptError { InvalidStackOperation(usize, usize), #[error("script of size {0} exceeded maximum allowed size of {1}")] ScriptSize(usize, usize), + #[error("transaction output {0} is out of bounds, should be non-negative below {1}")] + InvalidOutputIndex(i32, usize), + #[error(transparent)] + Serialization(#[from] SerializationError), +} + +#[derive(Error, PartialEq, Eq, Debug, Clone, Copy)] +pub enum SerializationError { + #[error("Number exceeds 8 bytes: {0}")] + NumberTooLong(i64), } diff --git a/crypto/txscript/examples/kip-10.rs b/crypto/txscript/examples/kip-10.rs new file mode 100644 index 000000000..4077385a7 --- /dev/null +++ b/crypto/txscript/examples/kip-10.rs @@ -0,0 +1,663 @@ +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus_core::{ + hashing::{ + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, + sighash_type::SIG_HASH_ALL, + }, + tx::{ + MutableTransaction, PopulatedTransaction, Transaction, TransactionId, TransactionInput, TransactionOutpoint, + TransactionOutput, UtxoEntry, VerifiableTransaction, + }, +}; +use kaspa_txscript::{ + caches::Cache, + opcodes::codes::{ + OpCheckSig, OpCheckSigVerify, OpDup, OpElse, OpEndIf, OpEqualVerify, OpFalse, OpGreaterThanOrEqual, OpIf, OpSub, OpTrue, + OpTxInputAmount, OpTxInputIndex, OpTxInputSpk, OpTxOutputAmount, OpTxOutputSpk, + }, + pay_to_address_script, pay_to_script_hash_script, + script_builder::{ScriptBuilder, ScriptBuilderResult}, + TxScriptEngine, +}; +use kaspa_txscript_errors::TxScriptError::{EvalFalse, VerifyError}; +use rand::thread_rng; +use secp256k1::Keypair; + +/// Main function to execute all Kaspa transaction script scenarios. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for all scenarios. +fn main() -> ScriptBuilderResult<()> { + threshold_scenario()?; + threshold_scenario_limited_one_time()?; + threshold_scenario_limited_2_times()?; + shared_secret_scenario()?; + Ok(()) +} + +/// # Standard Threshold Scenario +/// +/// This scenario demonstrates the use of custom opcodes and script execution within the Kaspa blockchain ecosystem. +/// There are two main cases: +/// +/// 1. **Owner case:** The script checks if the input is used by the owner and verifies the owner's signature. +/// 2. **Borrower case:** The script allows the input to be consumed if the output with the same index has a value of input + threshold and goes to the P2SH of the script itself. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario() -> ScriptBuilderResult<()> { + println!("\n[STANDARD] Running standard threshold scenario"); + // Create a new key pair for the owner + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Set a threshold value for comparison + let threshold: i64 = 100; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create the script builder + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_ops(&[OpTxInputIndex, OpTxInputSpk, OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount])? + .add_i64(threshold)? + .add_ops(&[OpSub, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: spk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[STANDARD] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[STANDARD] Owner branch execution successful"); + } + + // Check borrower branch + { + println!("[STANDARD] Checking borrower branch"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[STANDARD] Borrower branch execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[STANDARD] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[STANDARD] Borrower branch with threshold not reached failed as expected"); + } + + println!("[STANDARD] Standard threshold scenario completed successfully"); + Ok(()) +} + +/// Generate a script for limited-time borrowing scenarios +/// +/// This function creates a script that allows for limited-time borrowing with a threshold, +/// or spending by the owner at any time. It's generic enough to be used for both one-time +/// and multi-time borrowing scenarios. +/// +/// # Arguments +/// +/// * `owner` - The public key of the owner +/// * `threshold` - The threshold amount that must be met for borrowing +/// * `output_spk` - The output script public key as a vector of bytes +/// +/// # Returns +/// +/// * The generated script as a vector of bytes +fn generate_limited_time_script(owner: &Keypair, threshold: i64, output_spk: Vec) -> ScriptBuilderResult> { + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_data(&output_spk)? + .add_ops(&[OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount])? + .add_i64(threshold)? + .add_ops(&[OpSub, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + Ok(script) +} + +// Helper function to create P2PK script as a vector +fn p2pk_as_vec(owner: &Keypair) -> Vec { + let p2pk = + pay_to_address_script(&Address::new(Prefix::Mainnet, Version::PubKey, owner.x_only_public_key().0.serialize().as_slice())); + let version = p2pk.version.to_be_bytes(); + let script = p2pk.script(); + let mut v = Vec::with_capacity(version.len() + script.len()); + v.extend_from_slice(&version); + v.extend_from_slice(script); + v +} + +/// # Threshold Scenario with Limited One-Time Borrowing +/// +/// This function demonstrates a modified version of the threshold scenario where borrowing +/// is limited to a single occurrence. The key difference from the standard threshold scenario +/// is that the output goes to a Pay-to-Public-Key (P2PK) address instead of a Pay-to-Script-Hash (P2SH) +/// address of the script itself. +/// +/// ## Key Features: +/// 1. **One-Time Borrowing:** The borrower can only use this mechanism once, as the funds are +/// sent to a regular P2PK address instead of back to the script. +/// 2. **Owner Access:** The owner retains the ability to spend the funds at any time using their private key. +/// 3. **Threshold Mechanism:** The borrower must still meet the threshold requirement to spend the funds. +/// 4. **Output Validation:** Ensures the output goes to the correct address. +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower Spending:** Checks if the borrower can spend when meeting the threshold and +/// sending to the correct P2PK address. +/// 3. **Invalid Borrower Attempt (Threshold):** Ensures the script fails if the borrower doesn't meet the threshold. +/// 4. **Invalid Borrower Attempt (Wrong Output):** Ensures the script fails if the output goes to an incorrect address. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario_limited_one_time() -> ScriptBuilderResult<()> { + println!("\n[ONE-TIME] Running threshold one-time scenario"); + // Create a new key pair for the owner + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Set a threshold value for comparison + let threshold: i64 = 100; + + let p2pk = + pay_to_address_script(&Address::new(Prefix::Mainnet, Version::PubKey, owner.x_only_public_key().0.serialize().as_slice())); + let p2pk_vec = p2pk_as_vec(&owner); + let script = generate_limited_time_script(&owner, threshold, p2pk_vec.clone())?; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: p2pk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[ONE-TIME] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[ONE-TIME] Owner branch execution successful"); + } + + // Check borrower branch + { + println!("[ONE-TIME] Checking borrower branch"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[ONE-TIME] Borrower branch execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[ONE-TIME] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[ONE-TIME] Borrower branch with threshold not reached failed as expected"); + } + + // Check borrower branch with output going to wrong address + { + println!("[ONE-TIME] Checking borrower branch with output going to wrong address"); + // Create a new key pair for a different address + let wrong_recipient = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let wrong_p2pk = pay_to_address_script(&Address::new( + Prefix::Mainnet, + Version::PubKey, + wrong_recipient.x_only_public_key().0.serialize().as_slice(), + )); + + // Create a new transaction with the wrong output address + let mut wrong_tx = tx.clone(); + wrong_tx.outputs[0].script_public_key = wrong_p2pk; + wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&script)?.drain(); + + let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input( + &wrong_tx, + &wrong_tx.tx.inputs[0], + 0, + &utxo_entry, + &reused_values, + &sig_cache, + true, + ); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[ONE-TIME] Borrower branch with output going to wrong address failed as expected"); + } + + println!("[ONE-TIME] Threshold one-time scenario completed successfully"); + Ok(()) +} + +/// # Threshold Scenario with Limited Two-Times Borrowing +/// +/// This function demonstrates a modified version of the threshold scenario where borrowing +/// is limited to two occurrences. The key difference from the one-time scenario is that +/// the first borrowing outputs to a P2SH of the one-time script, allowing for a second borrowing. +/// +/// ## Key Features: +/// 1. **Two-Times Borrowing:** The borrower can use this mechanism twice. +/// 2. **Owner Access:** The owner retains the ability to spend the funds at any time using their private key. +/// 3. **Threshold Mechanism:** The borrower must still meet the threshold requirement to spend the funds. +/// 4. **Output Validation:** Ensures the output goes to the correct address (P2SH of one-time script for first borrow). +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower First Spending:** Checks if the borrower can spend when meeting the threshold and +/// sending to the correct P2SH address of the one-time script. +/// 3. **Invalid Borrower Attempt (Threshold):** Ensures the script fails if the borrower doesn't meet the threshold. +/// 4. **Invalid Borrower Attempt (Wrong Output):** Ensures the script fails if the output goes to an incorrect address. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn threshold_scenario_limited_2_times() -> ScriptBuilderResult<()> { + println!("\n[TWO-TIMES] Running threshold two-times scenario"); + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let threshold: i64 = 100; + + // First, create the one-time script + let p2pk_vec = p2pk_as_vec(&owner); + let one_time_script = generate_limited_time_script(&owner, threshold, p2pk_vec)?; + + // Now, create the two-times script using the one-time script as output + let p2sh_one_time = pay_to_script_hash_script(&one_time_script); + let p2sh_one_time_vec = { + let version = p2sh_one_time.version.to_be_bytes(); + let script = p2sh_one_time.script(); + let mut v = Vec::with_capacity(version.len() + script.len()); + v.extend_from_slice(&version); + v.extend_from_slice(script); + v + }; + + let two_times_script = generate_limited_time_script(&owner, threshold, p2sh_one_time_vec)?; + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&two_times_script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: 1000000000 + threshold as u64, script_public_key: p2sh_one_time }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&two_times_script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let mut tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + + // Check owner branch + { + println!("[TWO-TIMES] Checking owner branch"); + let mut tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = owner.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&two_times_script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[TWO-TIMES] Owner branch execution successful"); + } + + // Check borrower branch (first borrowing) + { + println!("[TWO-TIMES] Checking borrower branch (first borrowing)"); + tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[TWO-TIMES] Borrower branch (first borrowing) execution successful"); + } + + // Check borrower branch with threshold not reached + { + println!("[TWO-TIMES] Checking borrower branch with threshold not reached"); + // Less than threshold + tx.outputs[0].value -= 1; + let tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.tx.inputs[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(EvalFalse)); + println!("[TWO-TIMES] Borrower branch with threshold not reached failed as expected"); + } + + // Check borrower branch with output going to wrong address + { + println!("[TWO-TIMES] Checking borrower branch with output going to wrong address"); + // Create a new key pair for a different address + let wrong_recipient = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let wrong_p2pk = pay_to_address_script(&Address::new( + Prefix::Mainnet, + Version::PubKey, + wrong_recipient.x_only_public_key().0.serialize().as_slice(), + )); + + // Create a new transaction with the wrong output address + let mut wrong_tx = tx.clone(); + wrong_tx.outputs[0].script_public_key = wrong_p2pk; + wrong_tx.inputs[0].signature_script = ScriptBuilder::new().add_op(OpFalse)?.add_data(&two_times_script)?.drain(); + + let wrong_tx = PopulatedTransaction::new(&wrong_tx, vec![utxo_entry.clone()]); + let mut vm = TxScriptEngine::from_transaction_input( + &wrong_tx, + &wrong_tx.tx.inputs[0], + 0, + &utxo_entry, + &reused_values, + &sig_cache, + true, + ); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[TWO-TIMES] Borrower branch with output going to wrong address failed as expected"); + } + + println!("[TWO-TIMES] Threshold two-times scenario completed successfully"); + Ok(()) +} + +/// # Shared Secret Scenario +/// +/// This scenario demonstrates the use of a shared secret within the Kaspa blockchain ecosystem. +/// Instead of using a threshold value, it checks the shared secret and the signature associated with it. +/// +/// ## Key Features: +/// 1. **Owner Access:** The owner can spend funds at any time using their signature. +/// 2. **Shared Secret:** A separate keypair is used as a shared secret for borrower access. +/// 3. **Borrower Verification:** The borrower must provide the correct shared secret signature to spend. +/// +/// ## Scenarios Tested: +/// 1. **Owner Spending:** Verifies that the owner can spend the funds using their signature. +/// 2. **Borrower with Correct Secret:** Checks if the borrower can spend when providing the correct shared secret. +/// 3. **Borrower with Incorrect Secret:** Ensures the script fails if the borrower uses an incorrect secret. +/// +/// # Returns +/// +/// * `ScriptBuilderResult<()>` - Result of script builder operations for this scenario. +fn shared_secret_scenario() -> ScriptBuilderResult<()> { + println!("\n[SHARED-SECRET] Running shared secret scenario"); + + // Create key pairs for the owner, shared secret, and a potential borrower + let owner = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let shared_secret_kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + let borrower_kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + + // Initialize a cache for signature verification + let sig_cache = Cache::new(10_000); + + // Create the script builder + let mut builder = ScriptBuilder::new(); + let script = builder + // Owner branch + .add_op(OpIf)? + .add_data(owner.x_only_public_key().0.serialize().as_slice())? + .add_op(OpCheckSig)? + // Borrower branch + .add_op(OpElse)? + .add_op(OpDup)? + .add_data(shared_secret_kp.x_only_public_key().0.serialize().as_slice())? + .add_op(OpEqualVerify)? + .add_op(OpCheckSigVerify)? + .add_ops(&[OpTxInputIndex, OpTxInputSpk, OpTxInputIndex, OpTxOutputSpk, OpEqualVerify, OpTxInputIndex, OpTxOutputAmount, OpTxInputIndex, OpTxInputAmount, OpGreaterThanOrEqual])? + .add_op(OpEndIf)? + .drain(); + + // Generate the script public key + let spk = pay_to_script_hash_script(&script); + + // Define the input value + let input_value = 1000000000; + + // Create a transaction output + let output = TransactionOutput { value: input_value, script_public_key: spk.clone() }; + + // Create a UTXO entry for the input + let utxo_entry = UtxoEntry::new(input_value, spk, 0, false); + + // Create a transaction input + let input = TransactionInput { + previous_outpoint: TransactionOutpoint { + transaction_id: TransactionId::from_bytes([ + 0xc9, 0x97, 0xa5, 0xe5, 0x6e, 0x10, 0x42, 0x02, 0xfa, 0x20, 0x9c, 0x6a, 0x85, 0x2d, 0xd9, 0x06, 0x60, 0xa2, 0x0b, + 0x2d, 0x9c, 0x35, 0x24, 0x23, 0xed, 0xce, 0x25, 0x85, 0x7f, 0xcd, 0x37, 0x04, + ]), + index: 0, + }, + signature_script: ScriptBuilder::new().add_data(&script)?.drain(), + sequence: 4294967295, + sig_op_count: 1, + }; + + // Create a transaction with the input and output + let tx = Transaction::new(1, vec![input.clone()], vec![output.clone()], 0, Default::default(), 0, vec![]); + let sign = |pk: Keypair| { + // Prepare to reuse values for signature hashing + let reused_values = SigHashReusedValuesUnsync::new(); + + let tx = MutableTransaction::with_entries(tx.clone(), vec![utxo_entry.clone()]); + let sig_hash = calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + + let sig = pk.sign_schnorr(msg); + let mut signature = Vec::new(); + signature.extend_from_slice(sig.as_ref().as_slice()); + signature.push(SIG_HASH_ALL.to_u8()); + (tx, signature, reused_values) + }; + + // Check owner branch + { + println!("[SHARED-SECRET] Checking owner branch"); + let (mut tx, signature, reused_values) = sign(owner); + let mut builder = ScriptBuilder::new(); + builder.add_data(&signature)?; + builder.add_op(OpTrue)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[SHARED-SECRET] Owner branch execution successful"); + } + + // Check borrower branch with correct shared secret + { + println!("[SHARED-SECRET] Checking borrower branch with correct shared secret"); + let (mut tx, signature, reused_values) = sign(shared_secret_kp); + builder.add_data(&signature)?; + builder.add_data(shared_secret_kp.x_only_public_key().0.serialize().as_slice())?; + builder.add_op(OpFalse)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Ok(())); + println!("[SHARED-SECRET] Borrower branch with correct shared secret execution successful"); + } + + // Check borrower branch with incorrect secret + { + println!("[SHARED-SECRET] Checking borrower branch with incorrect secret"); + let (mut tx, signature, reused_values) = sign(borrower_kp); + builder.add_data(&signature)?; + builder.add_data(borrower_kp.x_only_public_key().0.serialize().as_slice())?; + builder.add_op(OpFalse)?; + builder.add_data(&script)?; + { + tx.tx.inputs[0].signature_script = builder.drain(); + } + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, &utxo_entry, &reused_values, &sig_cache, true); + assert_eq!(vm.execute(), Err(VerifyError)); + println!("[SHARED-SECRET] Borrower branch with incorrect secret failed as expected"); + } + + println!("[SHARED-SECRET] Shared secret scenario completed successfully"); + Ok(()) +} diff --git a/crypto/txscript/src/caches.rs b/crypto/txscript/src/caches.rs index 11d468260..507dfb542 100644 --- a/crypto/txscript/src/caches.rs +++ b/crypto/txscript/src/caches.rs @@ -31,6 +31,10 @@ impl Option { self.map.read().get(key).cloned().inspect(|_data| { self.counters.get_counts.fetch_add(1, Ordering::Relaxed); diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index 5d8ea18ed..898550fec 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -1,11 +1,59 @@ use crate::TxScriptError; use core::fmt::Debug; use core::iter; +use kaspa_txscript_errors::SerializationError; +use std::cmp::Ordering; +use std::num::TryFromIntError; +use std::ops::Deref; -const DEFAULT_SCRIPT_NUM_LEN: usize = 4; +#[derive(PartialEq, Eq, Debug, Default, PartialOrd, Ord)] +pub(crate) struct SizedEncodeInt(pub(crate) i64); -#[derive(PartialEq, Eq, Debug, Default)] -pub(crate) struct SizedEncodeInt(i64); +impl From for SizedEncodeInt { + fn from(value: i64) -> Self { + SizedEncodeInt(value) + } +} + +impl From for SizedEncodeInt { + fn from(value: i32) -> Self { + SizedEncodeInt(value as i64) + } +} + +impl TryFrom> for i32 { + type Error = TryFromIntError; + + fn try_from(value: SizedEncodeInt) -> Result { + value.0.try_into() + } +} + +impl PartialEq for SizedEncodeInt { + fn eq(&self, other: &i64) -> bool { + self.0 == *other + } +} + +impl PartialOrd for SizedEncodeInt { + fn partial_cmp(&self, other: &i64) -> Option { + self.0.partial_cmp(other) + } +} + +impl Deref for SizedEncodeInt { + type Target = i64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From> for i64 { + fn from(value: SizedEncodeInt) -> Self { + value.0 + } +} pub(crate) type Stack = Vec>; @@ -19,7 +67,7 @@ pub(crate) trait DataStack { Vec: OpcodeData; fn pop_raw(&mut self) -> Result<[Vec; SIZE], TxScriptError>; fn peek_raw(&self) -> Result<[Vec; SIZE], TxScriptError>; - fn push_item(&mut self, item: T) + fn push_item(&mut self, item: T) -> Result<(), TxScriptError> where Vec: OpcodeData; fn drop_items(&mut self) -> Result<(), TxScriptError>; @@ -31,7 +79,9 @@ pub(crate) trait DataStack { pub(crate) trait OpcodeData { fn deserialize(&self) -> Result; - fn serialize(from: &T) -> Self; + fn serialize(from: &T) -> Result + where + Self: Sized; } fn check_minimal_data_encoding(v: &[u8]) -> Result<(), TxScriptError> { @@ -59,6 +109,36 @@ fn check_minimal_data_encoding(v: &[u8]) -> Result<(), TxScriptError> { Ok(()) } +#[inline] +fn serialize_i64(from: &i64) -> Vec { + let sign = from.signum(); + let mut positive = from.unsigned_abs(); + let mut last_saturated = false; + let mut number_vec: Vec = iter::from_fn(move || { + if positive == 0 { + if last_saturated { + last_saturated = false; + Some(0) + } else { + None + } + } else { + let value = positive & 0xff; + last_saturated = (value & 0x80) != 0; + positive >>= 8; + Some(value as u8) + } + }) + .collect(); + if sign == -1 { + match number_vec.last_mut() { + Some(num) => *num |= 0x80, + _ => unreachable!(), + } + } + number_vec +} + fn deserialize_i64(v: &[u8]) -> Result { match v.len() { l if l > size_of::() => { @@ -75,62 +155,36 @@ fn deserialize_i64(v: &[u8]) -> Result { } } +// TODO: Rename to DefaultSizedEncodeInt when KIP-10 is activated +pub type Kip10I64 = SizedEncodeInt<8>; + impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { - match self.len() > DEFAULT_SCRIPT_NUM_LEN { - true => Err(TxScriptError::NumberTooBig(format!( - "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", - self, - self.len(), - DEFAULT_SCRIPT_NUM_LEN - ))), - false => deserialize_i64(self), - } + // TODO: Change LEN to 8 once KIP-10 is activated + OpcodeData::>::deserialize(self).map(i64::from) } #[inline] - fn serialize(from: &i64) -> Self { - let sign = from.signum(); - let mut positive = from.abs(); - let mut last_saturated = false; - let mut number_vec: Vec = iter::from_fn(move || { - if positive == 0 { - if last_saturated { - last_saturated = false; - Some(0) - } else { - None - } - } else { - let value = positive & 0xff; - last_saturated = (value & 0x80) != 0; - positive >>= 8; - Some(value as u8) - } - }) - .collect(); - if sign == -1 { - match number_vec.last_mut() { - Some(num) => *num |= 0x80, - _ => unreachable!(), - } - } - number_vec + fn serialize(from: &i64) -> Result { + // Note that serialization and deserialization use different LEN. + // This is because prior to KIP-10, only deserialization size was limited. + // It's safe to use 8 here because i32 arithmetic operations (which were the + // only ones that were supported prior to KIP-10) can't get to i64::MIN + // (the only i64 value that requires more than 8 bytes to serialize). + OpcodeData::>::serialize(&(*from).into()) } } impl OpcodeData for Vec { #[inline] fn deserialize(&self) -> Result { - let res = OpcodeData::::deserialize(self)?; - i32::try_from(res.clamp(i32::MIN as i64, i32::MAX as i64)) - .map_err(|e| TxScriptError::InvalidState(format!("data is too big for `i32`: {e}"))) + OpcodeData::>::deserialize(self).map(|v| v.try_into().expect("number is within i32 range")) } #[inline] - fn serialize(from: &i32) -> Self { - OpcodeData::::serialize(&(*from as i64)) + fn serialize(from: &i32) -> Result { + OpcodeData::>::serialize(&(*from).into()) } } @@ -138,19 +192,23 @@ impl OpcodeData> for Vec { #[inline] fn deserialize(&self) -> Result, TxScriptError> { match self.len() > LEN { - true => Err(TxScriptError::InvalidState(format!( + true => Err(TxScriptError::NumberTooBig(format!( "numeric value encoded as {:x?} is {} bytes which exceeds the max allowed of {}", self, self.len(), - DEFAULT_SCRIPT_NUM_LEN + LEN ))), false => deserialize_i64(self).map(SizedEncodeInt::), } } #[inline] - fn serialize(from: &SizedEncodeInt) -> Self { - OpcodeData::::serialize(&from.0) + fn serialize(from: &SizedEncodeInt) -> Result { + let bytes = serialize_i64(&from.0); + if bytes.len() > LEN { + return Err(SerializationError::NumberTooLong(from.0)); + } + Ok(bytes) } } @@ -166,11 +224,11 @@ impl OpcodeData for Vec { } #[inline] - fn serialize(from: &bool) -> Self { - match from { + fn serialize(from: &bool) -> Result { + Ok(match from { true => vec![1], false => vec![], - } + }) } } @@ -216,11 +274,13 @@ impl DataStack for Stack { } #[inline] - fn push_item(&mut self, item: T) + fn push_item(&mut self, item: T) -> Result<(), TxScriptError> where Vec: OpcodeData, { - Vec::push(self, OpcodeData::serialize(&item)); + let v = OpcodeData::serialize(&item)?; + Vec::push(self, v); + Ok(()) } #[inline] @@ -283,9 +343,9 @@ impl DataStack for Stack { #[cfg(test)] mod tests { - use super::OpcodeData; + use super::{Kip10I64, OpcodeData}; use crate::data_stack::SizedEncodeInt; - use kaspa_txscript_errors::TxScriptError; + use kaspa_txscript_errors::{SerializationError, TxScriptError}; // TestScriptNumBytes #[test] @@ -322,7 +382,7 @@ mod tests { TestCase { num: 2147483647, serialized: hex::decode("ffffff7f").expect("failed parsing hex") }, TestCase { num: -2147483647, serialized: hex::decode("ffffffff").expect("failed parsing hex") }, // Values that are out of range for data that is interpreted as - // numbers, but are allowed as the result of numeric operations. + // numbers before KIP-10 enabled, but are allowed as the result of numeric operations. TestCase { num: 2147483648, serialized: hex::decode("0000008000").expect("failed parsing hex") }, TestCase { num: -2147483648, serialized: hex::decode("0000008080").expect("failed parsing hex") }, TestCase { num: 2415919104, serialized: hex::decode("0000009000").expect("failed parsing hex") }, @@ -340,9 +400,13 @@ mod tests { ]; for test in tests { - let serialized: Vec = OpcodeData::::serialize(&test.num); + let serialized: Vec = OpcodeData::::serialize(&test.num).unwrap(); assert_eq!(serialized, test.serialized); } + + // special case 9-byte i64 + let r: Result, _> = OpcodeData::::serialize(&-9223372036854775808); + assert_eq!(r, Err(SerializationError::NumberTooLong(-9223372036854775808))); } // TestMakeScriptNum @@ -537,7 +601,73 @@ mod tests { }, // 7340032 // Values above 8 bytes should always return error ]; - + let kip10_tests = vec![ + TestCase:: { + serialized: hex::decode("0000008000").expect("failed parsing hex"), + result: Ok(Kip10I64::from(2147483648i64)), + }, + TestCase:: { + serialized: hex::decode("0000008080").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-2147483648i64)), + }, + TestCase:: { + serialized: hex::decode("0000009000").expect("failed parsing hex"), + result: Ok(Kip10I64::from(2415919104i64)), + }, + TestCase:: { + serialized: hex::decode("0000009080").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-2415919104i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64::from(4294967295i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-4294967295i64)), + }, + TestCase:: { + serialized: hex::decode("0000000001").expect("failed parsing hex"), + result: Ok(Kip10I64::from(4294967296i64)), + }, + TestCase:: { + serialized: hex::decode("0000000081").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-4294967296i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64::from(281474976710655i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-281474976710655i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff00").expect("failed parsing hex"), + result: Ok(Kip10I64::from(72057594037927935i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff80").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-72057594037927935i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffff7f").expect("failed parsing hex"), + result: Ok(Kip10I64::from(9223372036854775807i64)), + }, + TestCase:: { + serialized: hex::decode("ffffffffffffffff").expect("failed parsing hex"), + result: Ok(Kip10I64::from(-9223372036854775807i64)), + }, + // Minimally encoded values that are out of range for data that + // is interpreted as script numbers with the minimal encoding + // flag set. Should error and return 0. + TestCase:: { + serialized: hex::decode("000000000000008080").expect("failed parsing hex"), + result: Err(TxScriptError::NumberTooBig( + "numeric value encoded as [0, 0, 0, 0, 0, 0, 0, 80, 80] is 9 bytes which exceeds the max allowed of 8".to_string(), + )), + }, + ]; let test_of_size_5 = vec![ TestCase::> { serialized: hex::decode("ffffffff7f").expect("failed parsing hex"), @@ -633,5 +763,10 @@ mod tests { // code matches the value specified in the test instance. assert_eq!(test.serialized.deserialize(), test.result); } + for test in kip10_tests { + // Ensure the error code is of the expected type and the error + // code matches the value specified in the test instance. + assert_eq!(test.serialized.deserialize(), test.result); + } } } diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index b145fb90e..637a10aff 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -45,6 +45,8 @@ pub const MAX_PUB_KEYS_PER_MUTLTISIG: i32 = 20; // Note that this includes OP_RESERVED which counts as a push operation. pub const NO_COST_OPCODE: u8 = 0x60; +type DynOpcodeImplementation = Box>; + #[derive(Clone, Hash, PartialEq, Eq)] enum Signature { Secp256k1(secp256k1::schnorr::Signature), @@ -66,50 +68,56 @@ pub struct SigCacheKey { } enum ScriptSource<'a, T: VerifiableTransaction> { - TxInput { tx: &'a T, input: &'a TransactionInput, id: usize, utxo_entry: &'a UtxoEntry, is_p2sh: bool }, + TxInput { tx: &'a T, input: &'a TransactionInput, idx: usize, utxo_entry: &'a UtxoEntry, is_p2sh: bool }, StandAloneScripts(Vec<&'a [u8]>), } -pub struct TxScriptEngine<'a, T: VerifiableTransaction> { +pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> { dstack: Stack, astack: Stack, script_source: ScriptSource<'a, T>, // Outer caches for quicker calculation - // TODO:: make it compatible with threading - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, cond_stack: Vec, // Following if stacks, and whether it is running num_ops: i32, + kip10_enabled: bool, } -fn parse_script( +fn parse_script( script: &[u8], -) -> impl Iterator>, TxScriptError>> + '_ { +) -> impl Iterator, TxScriptError>> + '_ { script.iter().batching(|it| deserialize_next_opcode(it)) } -pub fn get_sig_op_count(signature_script: &[u8], prev_script_public_key: &ScriptPublicKey) -> u64 { +#[must_use] +pub fn get_sig_op_count( + signature_script: &[u8], + prev_script_public_key: &ScriptPublicKey, +) -> u64 { let is_p2sh = ScriptClass::is_pay_to_script_hash(prev_script_public_key.script()); - let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); + let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); if !is_p2sh { return get_sig_op_count_by_opcodes(&script_pub_key_ops); } - let signature_script_ops = parse_script::(signature_script).collect_vec(); + let signature_script_ops = parse_script::(signature_script).collect_vec(); if signature_script_ops.is_empty() || signature_script_ops.iter().any(|op| op.is_err() || !op.as_ref().unwrap().is_push_opcode()) { return 0; } let p2sh_script = signature_script_ops.last().expect("checked if empty above").as_ref().expect("checked if err above").get_data(); - let p2sh_ops = parse_script::(p2sh_script).collect_vec(); + let p2sh_ops = parse_script::(p2sh_script).collect_vec(); get_sig_op_count_by_opcodes(&p2sh_ops) } -fn get_sig_op_count_by_opcodes(opcodes: &[Result>, TxScriptError>]) -> u64 { +fn get_sig_op_count_by_opcodes( + opcodes: &[Result, TxScriptError>], +) -> u64 { // TODO: Check for overflows let mut num_sigs: u64 = 0; for (i, op) in opcodes.iter().enumerate() { @@ -142,12 +150,12 @@ fn get_sig_op_count_by_opcodes(opcodes: &[Result(script: &[u8]) -> bool { - parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) +pub fn is_unspendable(script: &[u8]) -> bool { + parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) } -impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { - pub fn new(reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { +impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache, kip10_enabled: bool) -> Self { Self { dstack: vec![], astack: vec![], @@ -156,36 +164,58 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { sig_cache, cond_stack: vec![], num_ops: 0, + kip10_enabled, } } + /// Creates a new Script Engine for validating transaction input. + /// + /// # Arguments + /// * `tx` - The transaction being validated + /// * `input` - The input being validated + /// * `input_idx` - Index of the input in the transaction + /// * `utxo_entry` - UTXO entry being spent + /// * `reused_values` - Reused values for signature hashing + /// * `sig_cache` - Cache for signature verification + /// * `kip10_enabled` - Whether KIP-10 transaction introspection opcodes are enabled + /// + /// # Panics + /// * When input_idx >= number of inputs in transaction (malformed input) + /// + /// # Returns + /// Script engine instance configured for the given input pub fn from_transaction_input( tx: &'a T, input: &'a TransactionInput, input_idx: usize, utxo_entry: &'a UtxoEntry, - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, - ) -> Result { + kip10_enabled: bool, + ) -> Self { let script_public_key = utxo_entry.script_public_key.script(); // The script_public_key in P2SH is just validating the hash on the OpMultiSig script // the user provides let is_p2sh = ScriptClass::is_pay_to_script_hash(script_public_key); - match input_idx < tx.tx().inputs.len() { - true => Ok(Self { - dstack: Default::default(), - astack: Default::default(), - script_source: ScriptSource::TxInput { tx, input, id: input_idx, utxo_entry, is_p2sh }, - reused_values, - sig_cache, - cond_stack: Default::default(), - num_ops: 0, - }), - false => Err(TxScriptError::InvalidIndex(input_idx, tx.tx().inputs.len())), + assert!(input_idx < tx.tx().inputs.len()); + Self { + dstack: Default::default(), + astack: Default::default(), + script_source: ScriptSource::TxInput { tx, input, idx: input_idx, utxo_entry, is_p2sh }, + reused_values, + sig_cache, + cond_stack: Default::default(), + num_ops: 0, + kip10_enabled, } } - pub fn from_script(script: &'a [u8], reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { + pub fn from_script( + script: &'a [u8], + reused_values: &'a Reused, + sig_cache: &'a Cache, + kip10_enabled: bool, + ) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -194,15 +224,16 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { sig_cache, cond_stack: Default::default(), num_ops: 0, + kip10_enabled, } } #[inline] pub fn is_executing(&self) -> bool { - return self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True; + self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True } - fn execute_opcode(&mut self, opcode: Box>) -> Result<(), TxScriptError> { + fn execute_opcode(&mut self, opcode: DynOpcodeImplementation) -> Result<(), TxScriptError> { // Different from kaspad: Illegal and disabled opcode are checked on execute instead // Note that this includes OP_RESERVED which counts as a push operation. if !opcode.is_push_opcode() { @@ -293,7 +324,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { // each is successful scripts.iter().enumerate().filter(|(_, s)| !s.is_empty()).try_for_each(|(idx, s)| { let verify_only_push = - idx == 0 && matches!(self.script_source, ScriptSource::TxInput { tx: _, input: _, id: _, utxo_entry: _, is_p2sh: _ }); + idx == 0 && matches!(self.script_source, ScriptSource::TxInput { tx: _, input: _, idx: _, utxo_entry: _, is_p2sh: _ }); // Save script in p2sh if is_p2sh && idx == 1 { saved_stack = Some(self.dstack.clone()); @@ -430,21 +461,21 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { return Err(TxScriptError::NullFail); } - self.dstack.push_item(!failed); + self.dstack.push_item(!failed)?; Ok(()) } #[inline] fn check_schnorr_signature(&mut self, hash_type: SigHashType, key: &[u8], sig: &[u8]) -> Result { match self.script_source { - ScriptSource::TxInput { tx, id, .. } => { + ScriptSource::TxInput { tx, idx, .. } => { if sig.len() != 64 { return Err(TxScriptError::SigLength(sig.len())); } Self::check_pub_key_encoding(key)?; let pk = secp256k1::XOnlyPublicKey::from_slice(key).map_err(TxScriptError::InvalidSignature)?; let sig = secp256k1::schnorr::Signature::from_slice(sig).map_err(TxScriptError::InvalidSignature)?; - let sig_hash = calc_schnorr_signature_hash(tx, id, hash_type, self.reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, idx, hash_type, self.reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig_cache_key = SigCacheKey { signature: Signature::Secp256k1(sig), pub_key: PublicKey::Schnorr(pk), message: msg }; @@ -472,14 +503,14 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { fn check_ecdsa_signature(&mut self, hash_type: SigHashType, key: &[u8], sig: &[u8]) -> Result { match self.script_source { - ScriptSource::TxInput { tx, id, .. } => { + ScriptSource::TxInput { tx, idx, .. } => { if sig.len() != 64 { return Err(TxScriptError::SigLength(sig.len())); } Self::check_pub_key_encoding_ecdsa(key)?; let pk = secp256k1::PublicKey::from_slice(key).map_err(TxScriptError::InvalidSignature)?; let sig = secp256k1::ecdsa::Signature::from_compact(sig).map_err(TxScriptError::InvalidSignature)?; - let sig_hash = calc_ecdsa_signature_hash(tx, id, hash_type, self.reused_values); + let sig_hash = calc_ecdsa_signature_hash(tx, idx, hash_type, self.reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig_cache_key = SigCacheKey { signature: Signature::Ecdsa(sig), pub_key: PublicKey::Ecdsa(pk), message: msg }; @@ -505,6 +536,16 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { } } +trait SpkEncoding { + fn to_bytes(&self) -> Vec; +} + +impl SpkEncoding for ScriptPublicKey { + fn to_bytes(&self) -> Vec { + self.version.to_be_bytes().into_iter().chain(self.script().iter().copied()).collect() + } +} + #[cfg(test)] mod tests { use std::iter::once; @@ -512,6 +553,7 @@ mod tests { use crate::opcodes::codes::{OpBlake2b, OpCheckSig, OpData1, OpData2, OpData32, OpDup, OpEqual, OpPushData1, OpTrue}; use super::*; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -538,11 +580,15 @@ mod tests { fn populated_input(&self, _index: usize) -> (&TransactionInput, &UtxoEntry) { unimplemented!() } + + fn utxo(&self, _index: usize) -> Option<&UtxoEntry> { + unimplemented!() + } } fn run_test_script_cases(test_cases: Vec) { let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for test in test_cases { // Ensure encapsulation of variables (no leaking between tests) @@ -564,10 +610,18 @@ mod tests { let utxo_entry = UtxoEntry::new(output.value, output.script_public_key.clone(), 0, tx.is_coinbase()); let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - - let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) - .expect("Script creation failed"); - assert_eq!(vm.execute(), test.expected_result); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::from_transaction_input( + &populated_tx, + &input, + 0, + &utxo_entry, + &reused_values, + &sig_cache, + kip10_enabled, + ); + assert_eq!(vm.execute(), test.expected_result); + }); } } @@ -783,7 +837,7 @@ mod tests { ]; for test in test_cases { - let check = TxScriptEngine::::check_pub_key_encoding(test.key); + let check = TxScriptEngine::::check_pub_key_encoding(test.key); if test.is_valid { assert_eq!( check, @@ -880,7 +934,10 @@ mod tests { for test in tests { assert_eq!( - get_sig_op_count::(test.signature_script, &test.prev_script_public_key), + get_sig_op_count::( + test.signature_script, + &test.prev_script_public_key + ), test.expected_sig_ops, "failed for '{}'", test.name @@ -909,7 +966,7 @@ mod tests { for test in tests { assert_eq!( - is_unspendable::(test.script_public_key), + is_unspendable::(test.script_public_key), test.expected, "failed for '{}'", test.name @@ -929,6 +986,7 @@ mod bitcoind_tests { use super::*; use crate::script_builder::ScriptBuilderError; use kaspa_consensus_core::constants::MAX_TX_IN_SEQUENCE_NUM; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -987,7 +1045,7 @@ mod bitcoind_tests { } impl JsonTestRow { - fn test_row(&self) -> Result<(), TestError> { + fn test_row(&self, kip10_enabled: bool) -> Result<(), TestError> { // Parse test to objects let (sig_script, script_pub_key, expected_result) = match self.clone() { JsonTestRow::Test(sig_script, sig_pub_key, _, expected_result) => (sig_script, sig_pub_key, expected_result), @@ -999,7 +1057,7 @@ mod bitcoind_tests { } }; - let result = Self::run_test(sig_script, script_pub_key); + let result = Self::run_test(sig_script, script_pub_key, kip10_enabled); match Self::result_name(result.clone()).contains(&expected_result.as_str()) { true => Ok(()), @@ -1007,7 +1065,7 @@ mod bitcoind_tests { } } - fn run_test(sig_script: String, script_pub_key: String) -> Result<(), UnifiedError> { + fn run_test(sig_script: String, script_pub_key: String, kip10_enabled: bool) -> Result<(), UnifiedError> { let script_sig = opcodes::parse_short_form(sig_script).map_err(UnifiedError::ScriptBuilderError)?; let script_pub_key = ScriptPublicKey::from_vec(0, opcodes::parse_short_form(script_pub_key).map_err(UnifiedError::ScriptBuilderError)?); @@ -1019,16 +1077,16 @@ mod bitcoind_tests { // Run transaction let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut vm = TxScriptEngine::from_transaction_input( &populated_tx, &populated_tx.tx().inputs[0], 0, &populated_tx.entries[0], - &mut reused_values, + &reused_values, &sig_cache, - ) - .map_err(UnifiedError::TxScriptError)?; + kip10_enabled, + ); vm.execute().map_err(UnifiedError::TxScriptError) } @@ -1061,6 +1119,7 @@ mod bitcoind_tests { Err(ue) => match ue { UnifiedError::TxScriptError(e) => match e { TxScriptError::NumberTooBig(_) => vec!["UNKNOWN_ERROR"], + TxScriptError::Serialization(_) => vec!["UNKNOWN_ERROR"], TxScriptError::PubKeyFormat => vec!["PUBKEYFORMAT"], TxScriptError::EvalFalse => vec!["EVAL_FALSE"], TxScriptError::EmptyStack => { @@ -1107,22 +1166,41 @@ mod bitcoind_tests { #[test] fn test_bitcoind_tests() { - let file = File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join("script_tests.json")) - .expect("Could not find test file"); - let reader = BufReader::new(file); - - // Read the JSON contents of the file as an instance of `User`. - let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); - let mut had_errors = 0; - let total_tests = tests.len(); - for row in tests { - if let Err(error) = row.test_row() { - println!("Test: {:?} failed: {:?}", row.clone(), error); - had_errors += 1; + // Script test files are split into two versions to test behavior before and after KIP-10: + // + // - script_tests.json: Tests basic script functionality with KIP-10 disabled (kip10_enabled=false) + // - script_tests-kip10.json: Tests expanded functionality with KIP-10 enabled (kip10_enabled=true) + // + // KIP-10 introduces two major changes: + // + // 1. Support for 8-byte integer arithmetic (previously limited to 4 bytes) + // This enables working with larger numbers in scripts and reduces artificial constraints + // + // 2. Transaction introspection opcodes: + // - OpTxInputCount (0xb3): Get number of inputs + // - OpTxOutputCount (0xb4): Get number of outputs + // - OpTxInputIndex (0xb9): Get current input index + // - OpTxInputAmount (0xbe): Get input amount + // - OpTxInputSpk (0xbf): Get input script public key + // - OpTxOutputAmount (0xc2): Get output amount + // - OpTxOutputSpk (0xc3): Get output script public key + // + // These changes were added to support mutual transactions and auto-compounding addresses. + // When KIP-10 is disabled (pre-activation), the new opcodes will return an InvalidOpcode error + // and arithmetic is limited to 4 bytes. When enabled, scripts gain full access to transaction + // data and 8-byte arithmetic capabilities. + for (file_name, kip10_enabled) in [("script_tests.json", false), ("script_tests-kip10.json", true)] { + let file = + File::open(Path::new(env!("CARGO_MANIFEST_DIR")).join("test-data").join(file_name)).expect("Could not find test file"); + let reader = BufReader::new(file); + + // Read the JSON contents of the file as an instance of `User`. + let tests: Vec = serde_json::from_reader(reader).expect("Failed Parsing {:?}"); + for row in tests { + if let Err(error) = row.test_row(kip10_enabled) { + panic!("Test: {:?} failed for {}: {:?}", row.clone(), file_name, error); + } } } - if had_errors > 0 { - panic!("{}/{} json tests failed", had_errors, total_tests) - } } } diff --git a/crypto/txscript/src/opcodes/macros.rs b/crypto/txscript/src/opcodes/macros.rs index b3db98829..a4b3bfbbf 100644 --- a/crypto/txscript/src/opcodes/macros.rs +++ b/crypto/txscript/src/opcodes/macros.rs @@ -6,9 +6,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), length.to_le_bytes().as_slice(), self.data.as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { match it.take(size_of::<$type>()).copied().collect::>().try_into() { Ok(bytes) => { let length = <$type>::from_le_bytes(bytes) as usize; @@ -32,9 +32,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), self.data.clone().as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { // Static length includes the opcode itself let data: Vec = it.take($length - 1).copied().collect(); Self::new(data) @@ -44,7 +44,7 @@ macro_rules! opcode_serde { macro_rules! opcode_init { ($type:ty) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() > <$type>::MAX as usize { return Err(TxScriptError::MalformedPush(<$type>::MAX as usize, data.len())); } @@ -52,7 +52,7 @@ macro_rules! opcode_init { } }; ($length: literal) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() != $length - 1 { return Err(TxScriptError::MalformedPush($length - 1, data.len())); } @@ -69,20 +69,20 @@ macro_rules! opcode_impl { opcode_serde!($length); } - impl OpCodeExecution for $name { - fn empty() -> Result>, TxScriptError> { + impl OpCodeExecution for $name { + fn empty() -> Result>, TxScriptError> { Self::new(vec![]) } opcode_init!($length); #[allow(unused_variables)] - fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { + fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { $code } } - impl OpCodeImplementation for $name {} + impl OpCodeImplementation for $name {} } } @@ -111,7 +111,7 @@ macro_rules! opcode_list { )? )* - pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction>(it: &mut I) -> Option>, TxScriptError>> { + pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>(it: &mut I) -> Option>, TxScriptError>> { match it.next() { Some(opcode_num) => match opcode_num { $( @@ -132,7 +132,11 @@ macro_rules! opcode_list { let mut builder = ScriptBuilder::new(); for token in script.split_whitespace() { if let Ok(value) = token.parse::() { - builder.add_i64(value)?; + if value == i64::MIN { + builder.add_i64_min()?; + } else { + builder.add_i64(value)?; + } } else if let Some(Ok(value)) = token.strip_prefix("0x").and_then(|trimmed| Some(hex::decode(trimmed))) { builder.extend(&value); diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index ad800d248..5ee6fbaae 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -1,17 +1,20 @@ #[macro_use] mod macros; -use crate::data_stack::{DataStack, OpcodeData}; use crate::{ - ScriptSource, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD, MAX_TX_IN_SEQUENCE_NUM, NO_COST_OPCODE, + data_stack::{DataStack, Kip10I64, OpcodeData}, + ScriptSource, SpkEncoding, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD, MAX_TX_IN_SEQUENCE_NUM, NO_COST_OPCODE, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK, }; use blake2b_simd::Params; -use core::cmp::{max, min}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; use kaspa_consensus_core::hashing::sighash_type::SigHashType; use kaspa_consensus_core::tx::VerifiableTransaction; use sha2::{Digest, Sha256}; -use std::fmt::{Debug, Formatter}; +use std::{ + fmt::{Debug, Formatter}, + num::TryFromIntError, +}; /// First value in the range formed by the "small integer" Op# opcodes pub const OP_SMALL_INT_MIN_VAL: u8 = 1; @@ -73,28 +76,31 @@ pub trait OpCodeMetadata: Debug { } } -pub trait OpCodeExecution { - fn empty() -> Result>, TxScriptError> +pub trait OpCodeExecution { + fn empty() -> Result>, TxScriptError> where Self: Sized; #[allow(clippy::new_ret_no_self)] - fn new(data: Vec) -> Result>, TxScriptError> + fn new(data: Vec) -> Result>, TxScriptError> where Self: Sized; - fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; + fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; } pub trait OpcodeSerialization { fn serialize(&self) -> Vec; - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> + ) -> Result>, TxScriptError> where Self: Sized; } -pub trait OpCodeImplementation: OpCodeExecution + OpCodeMetadata + OpcodeSerialization {} +pub trait OpCodeImplementation: + OpCodeExecution + OpCodeMetadata + OpcodeSerialization +{ +} impl OpCodeMetadata for OpCode { fn value(&self) -> u8 { @@ -193,17 +199,44 @@ impl OpCodeMetadata for OpCode { // Helpers for some opcodes with shared data #[inline] -fn push_data(data: Vec, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_data( + data: Vec, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push(data); Ok(()) } #[inline] -fn push_number(number: i64, vm: &mut TxScriptEngine) -> OpCodeResult { - vm.dstack.push_item(number); +fn push_number( + number: i64, + vm: &mut TxScriptEngine, +) -> OpCodeResult { + vm.dstack.push_item(number)?; Ok(()) } +/// This macro helps to avoid code duplication in numeric opcodes where the only difference +/// between KIP10_ENABLED and disabled states is the numeric type used (Kip10I64 vs i64). +/// KIP10I64 deserializator supports 8-byte integers +// TODO: Remove this macro after KIP-10 activation. +macro_rules! numeric_op { + ($vm: expr, $pattern: pat, $count: expr, $block: expr) => { + if $vm.kip10_enabled { + let $pattern: [Kip10I64; $count] = $vm.dstack.pop_items()?; + let r = $block; + $vm.dstack.push_item(r)?; + Ok(()) + } else { + let $pattern: [i64; $count] = $vm.dstack.pop_items()?; + #[allow(clippy::useless_conversion)] + let r = $block; + $vm.dstack.push_item(r)?; + Ok(()) + } + }; +} + /* The following is the implementation and metadata of all opcodes. Each opcode has unique number (and template system makes it impossible to use two opcodes), length specification, @@ -511,7 +544,7 @@ opcode_list! { opcode OpSize<0x82, 1>(self, vm) { match vm.dstack.last() { Some(last) => { - vm.dstack.push_item(i64::try_from(last.len()).map_err(|e| TxScriptError::NumberTooBig(e.to_string()))?); + vm.dstack.push_item(i64::try_from(last.len()).map_err(|e| TxScriptError::NumberTooBig(e.to_string()))?)?; Ok(()) }, None => Err(TxScriptError::InvalidStackOperation(1, 0)) @@ -556,54 +589,38 @@ opcode_list! { // Numeric related opcodes. opcode Op1Add<0x8b, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(value + 1); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_add(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of addition exceeds 64-bit signed integer range".to_string()))?) } opcode Op1Sub<0x8c, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(value - 1); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_sub(1).ok_or_else(|| TxScriptError::NumberTooBig("Result of subtraction exceeds 64-bit signed integer range".to_string()))?) } opcode Op2Mul<0x8d, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode Op2Div<0x8e, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpNegate<0x8f, 1>(self, vm) { - let [value]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(-value); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_neg().ok_or_else(|| TxScriptError::NumberTooBig("Negation result exceeds 64-bit signed integer range".to_string()))?) } opcode OpAbs<0x90, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item(m.abs()); - Ok(()) + numeric_op!(vm, [value], 1, value.checked_abs().ok_or_else(|| TxScriptError::NumberTooBig("Absolute value exceeds 64-bit signed integer range".to_string()))?) } opcode OpNot<0x91, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item((m == 0) as i64); - Ok(()) + numeric_op!(vm, [m], 1, (m == 0) as i64) } opcode Op0NotEqual<0x92, 1>(self, vm) { - let [m]: [i64; 1] = vm.dstack.pop_items()?; - vm.dstack.push_item((m != 0) as i64 ); - Ok(()) + numeric_op!(vm, [m], 1, (m != 0) as i64) } opcode OpAdd<0x93, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(a+b); - Ok(()) + numeric_op!(vm, [a,b], 2, a.checked_add(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Sum exceeds 64-bit signed integer range".to_string()))?) } opcode OpSub<0x94, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(a-b); - Ok(()) + numeric_op!(vm, [a,b], 2, a.checked_sub(b.into()).ok_or_else(|| TxScriptError::NumberTooBig("Difference exceeds 64-bit signed integer range".to_string()))?) } opcode OpMul<0x95, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) @@ -613,77 +630,63 @@ opcode_list! { opcode OpRShift<0x99, 1>(self, vm) Err(TxScriptError::OpcodeDisabled(format!("{self:?}"))) opcode OpBoolAnd<0x9a, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(((a != 0) && (b != 0)) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, ((a != 0) && (b != 0)) as i64) } opcode OpBoolOr<0x9b, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(((a != 0) || (b != 0)) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, ((a != 0) || (b != 0)) as i64) } opcode OpNumEqual<0x9c, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a == b) as i64); - Ok(()) + numeric_op!(vm, [a,b], 2, (a == b) as i64) } opcode OpNumEqualVerify<0x9d, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - match a == b { - true => Ok(()), - false => Err(TxScriptError::VerifyError) + if vm.kip10_enabled { + let [a,b]: [Kip10I64; 2] = vm.dstack.pop_items()?; + match a == b { + true => Ok(()), + false => Err(TxScriptError::VerifyError) + } + } else { + let [a,b]: [i64; 2] = vm.dstack.pop_items()?; + match a == b { + true => Ok(()), + false => Err(TxScriptError::VerifyError) + } } } opcode OpNumNotEqual<0x9e, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a != b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a != b) as i64) } opcode OpLessThan<0x9f, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a < b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a < b) as i64) } opcode OpGreaterThan<0xa0, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a > b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a > b) as i64) } opcode OpLessThanOrEqual<0xa1, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a <= b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a <= b) as i64) } opcode OpGreaterThanOrEqual<0xa2, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item((a >= b) as i64); - Ok(()) + numeric_op!(vm, [a, b], 2, (a >= b) as i64) } opcode OpMin<0xa3, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(min(a,b)); - Ok(()) + numeric_op!(vm, [a, b], 2, a.min(b)) } opcode OpMax<0xa4, 1>(self, vm) { - let [a,b]: [i64; 2] = vm.dstack.pop_items()?; - vm.dstack.push_item(max(a,b)); - Ok(()) + numeric_op!(vm, [a, b], 2, a.max(b)) } opcode OpWithin<0xa5, 1>(self, vm) { - let [x,l,u]: [i64; 3] = vm.dstack.pop_items()?; - vm.dstack.push_item((x >= l && x < u) as i64); - Ok(()) + numeric_op!(vm, [x,l,u], 3, (x >= l && x < u) as i64) } // Undefined opcodes. @@ -719,7 +722,7 @@ opcode_list! { let hash_type = SigHashType::from_u8(typ).map_err(|e| TxScriptError::InvalidSigHashType(typ))?; match vm.check_ecdsa_signature(hash_type, key.as_slice(), sig.as_slice()) { Ok(valid) => { - vm.dstack.push_item(valid); + vm.dstack.push_item(valid)?; Ok(()) }, Err(e) => { @@ -728,7 +731,7 @@ opcode_list! { } } None => { - vm.dstack.push_item(false); + vm.dstack.push_item(false)?; Ok(()) } } @@ -742,7 +745,7 @@ opcode_list! { let hash_type = SigHashType::from_u8(typ).map_err(|e| TxScriptError::InvalidSigHashType(typ))?; match vm.check_schnorr_signature(hash_type, key.as_slice(), sig.as_slice()) { Ok(valid) => { - vm.dstack.push_item(valid); + vm.dstack.push_item(valid)?; Ok(()) }, Err(e) => { @@ -751,7 +754,7 @@ opcode_list! { } } None => { - vm.dstack.push_item(false); + vm.dstack.push_item(false)?; Ok(()) } } @@ -874,25 +877,125 @@ opcode_list! { } } - // Undefined opcodes. - opcode OpUnknown178<0xb2, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown179<0xb3, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown180<0xb4, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown181<0xb5, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown182<0xb6, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown183<0xb7, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown184<0xb8, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown185<0xb9, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown186<0xba, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown187<0xbb, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown188<0xbc, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown189<0xbd, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown190<0xbe, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown191<0xbf, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown192<0xc0, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown193<0xc1, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown194<0xc2, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) - opcode OpUnknown195<0xc3, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + // Introspection opcodes + // Transaction level opcodes (following Transaction struct field order) + opcode OpTxVersion<0xb2, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputCount<0xb3, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.inputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputCount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxOutputCount<0xb4, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + push_number(tx.outputs().len() as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputCount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxLockTime<0xb5, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxSubnetId<0xb6, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxGas<0xb7, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxPayload<0xb8, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // Input related opcodes (following TransactionInput struct field order) + opcode OpTxInputIndex<0xb9, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{idx, ..} => { + push_number(idx as i64, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputIndex only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpOutpointTxId<0xba, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpOutpointIndex<0xbb, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputScriptSig<0xbc, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputSeq<0xbd, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // UTXO related opcodes (following UtxoEntry struct field order) + opcode OpTxInputAmount<0xbe, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + push_number(utxo.amount.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpInputAmount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxInputSpk<0xbf, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let utxo = usize::try_from(idx).ok() + .and_then(|idx| tx.utxo(idx)) + .ok_or_else(|| TxScriptError::InvalidInputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(utxo.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpInputSpk only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxInputBlockDaaScore<0xc0, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + opcode OpTxInputIsCoinbase<0xc1, 1>(self, vm) Err(TxScriptError::OpcodeReserved(format!("{self:?}"))) + // Output related opcodes (following TransactionOutput struct field order) + opcode OpTxOutputAmount<0xc2, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + push_number(output.value.try_into().map_err(|e: TryFromIntError| TxScriptError::NumberTooBig(e.to_string()))?, vm) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputAmount only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + opcode OpTxOutputSpk<0xc3, 1>(self, vm) { + if vm.kip10_enabled { + match vm.script_source { + ScriptSource::TxInput{tx, ..} => { + let [idx]: [i32; 1] = vm.dstack.pop_items()?; + let output = usize::try_from(idx).ok() + .and_then(|idx| tx.outputs().get(idx)) + .ok_or_else(|| TxScriptError::InvalidOutputIndex(idx, tx.inputs().len()))?; + vm.dstack.push(output.script_public_key.to_bytes()); + Ok(()) + }, + _ => Err(TxScriptError::InvalidSource("OpOutputSpk only applies to transaction inputs".to_string())) + } + } else { + Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) + } + } + // Undefined opcodes opcode OpUnknown196<0xc4, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) opcode OpUnknown197<0xc5, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) opcode OpUnknown198<0xc6, 1>(self, vm) Err(TxScriptError::InvalidOpcode(format!("{self:?}"))) @@ -958,7 +1061,7 @@ opcode_list! { // converts an opcode from the list of Op0 to Op16 to its associated value #[allow(clippy::borrowed_box)] -pub fn to_small_int(opcode: &Box>) -> u8 { +pub fn to_small_int(opcode: &Box>) -> u8 { let value = opcode.value(); if value == codes::OpFalse { return 0; @@ -976,7 +1079,7 @@ mod test { use crate::{opcodes, pay_to_address_script, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD}; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::constants::{SOMPI_PER_KASPA, TX_VERSION}; - use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, @@ -985,47 +1088,51 @@ mod test { struct TestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, dstack: Stack, } struct ErrorTestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, error: TxScriptError, } fn run_success_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); - vm.dstack = init; - code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); - assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); + vm.dstack = init.clone(); + code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); + assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); + }); } } fn run_error_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); - vm.dstack.clone_from(&init); - assert_eq!( - code.execute(&mut vm) - .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), - error, - "Opcode {} returned wrong error {:?}", - code.value(), - init - ); + [false, true].into_iter().for_each(|kip10_enabled| { + let mut vm = TxScriptEngine::new(&reused_values, &cache, kip10_enabled); + vm.dstack.clone_from(&init); + assert_eq!( + code.execute(&mut vm) + .expect_err(format!("Opcode {} should have errored (init: {:?})", code.value(), init.clone()).as_str()), + error, + "Opcode {} returned wrong error {:?}", + code.value(), + init + ); + }); } } #[test] fn test_opcode_disabled() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpCat::empty().expect("Should accept empty"), opcodes::OpSubStr::empty().expect("Should accept empty"), opcodes::OpLeft::empty().expect("Should accept empty"), @@ -1044,8 +1151,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -1057,18 +1164,29 @@ mod test { #[test] fn test_opcode_reserved() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpReserved::empty().expect("Should accept empty"), opcodes::OpVer::empty().expect("Should accept empty"), opcodes::OpVerIf::empty().expect("Should accept empty"), opcodes::OpVerNotIf::empty().expect("Should accept empty"), opcodes::OpReserved1::empty().expect("Should accept empty"), opcodes::OpReserved2::empty().expect("Should accept empty"), + opcodes::OpTxVersion::empty().expect("Should accept empty"), + opcodes::OpTxLockTime::empty().expect("Should accept empty"), + opcodes::OpTxSubnetId::empty().expect("Should accept empty"), + opcodes::OpTxGas::empty().expect("Should accept empty"), + opcodes::OpTxPayload::empty().expect("Should accept empty"), + opcodes::OpOutpointTxId::empty().expect("Should accept empty"), + opcodes::OpOutpointIndex::empty().expect("Should accept empty"), + opcodes::OpTxInputScriptSig::empty().expect("Should accept empty"), + opcodes::OpTxInputSeq::empty().expect("Should accept empty"), + opcodes::OpTxInputBlockDaaScore::empty().expect("Should accept empty"), + opcodes::OpTxInputIsCoinbase::empty().expect("Should accept empty"), ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -1080,27 +1198,9 @@ mod test { #[test] fn test_opcode_invalid() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpUnknown166::empty().expect("Should accept empty"), opcodes::OpUnknown167::empty().expect("Should accept empty"), - opcodes::OpUnknown178::empty().expect("Should accept empty"), - opcodes::OpUnknown179::empty().expect("Should accept empty"), - opcodes::OpUnknown180::empty().expect("Should accept empty"), - opcodes::OpUnknown181::empty().expect("Should accept empty"), - opcodes::OpUnknown182::empty().expect("Should accept empty"), - opcodes::OpUnknown183::empty().expect("Should accept empty"), - opcodes::OpUnknown184::empty().expect("Should accept empty"), - opcodes::OpUnknown185::empty().expect("Should accept empty"), - opcodes::OpUnknown186::empty().expect("Should accept empty"), - opcodes::OpUnknown187::empty().expect("Should accept empty"), - opcodes::OpUnknown188::empty().expect("Should accept empty"), - opcodes::OpUnknown189::empty().expect("Should accept empty"), - opcodes::OpUnknown190::empty().expect("Should accept empty"), - opcodes::OpUnknown191::empty().expect("Should accept empty"), - opcodes::OpUnknown192::empty().expect("Should accept empty"), - opcodes::OpUnknown193::empty().expect("Should accept empty"), - opcodes::OpUnknown194::empty().expect("Should accept empty"), - opcodes::OpUnknown195::empty().expect("Should accept empty"), opcodes::OpUnknown196::empty().expect("Should accept empty"), opcodes::OpUnknown197::empty().expect("Should accept empty"), opcodes::OpUnknown198::empty().expect("Should accept empty"), @@ -1158,8 +1258,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache, false); for pop in tests { match pop.execute(&mut vm) { @@ -2708,6 +2808,9 @@ mod test { fn populated_input(&self, _index: usize) -> (&TransactionInput, &UtxoEntry) { unimplemented!() } + fn utxo(&self, _index: usize) -> Option<&UtxoEntry> { + unimplemented!() + } } fn make_mock_transaction(lock_time: u64) -> (VerifiableTransactionMock, TransactionInput, UtxoEntry) { @@ -2739,7 +2842,7 @@ mod test { let (base_tx, input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckLockTimeVerify::empty().expect("Should accept empty"); @@ -2751,8 +2854,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) - .expect("Shouldn't fail"); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -2781,7 +2883,7 @@ mod test { let (tx, base_input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckSequenceVerify::empty().expect("Should accept empty"); @@ -2794,8 +2896,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) - .expect("Shouldn't fail"); + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache, false); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { // Message is based on the should_fail values @@ -2897,4 +2998,849 @@ mod test { TestCase { code: opcodes::OpIfDup::empty().expect("Should accept empty"), init: vec![vec![]], dstack: vec![vec![]] }, ]) } + + mod kip10 { + use super::*; + use crate::{ + data_stack::{DataStack, OpcodeData}, + opcodes::{codes::*, push_number}, + pay_to_script_hash_script, + script_builder::ScriptBuilder, + SpkEncoding, + }; + use kaspa_consensus_core::tx::MutableTransaction; + + #[derive(Clone, Debug)] + struct Kip10Mock { + spk: ScriptPublicKey, + amount: u64, + } + + fn create_mock_spk(value: u8) -> ScriptPublicKey { + let pub_key = vec![value; 32]; + let addr = Address::new(Prefix::Testnet, Version::PubKey, &pub_key); + pay_to_address_script(&addr) + } + + fn kip_10_tx_mock(inputs: Vec, outputs: Vec) -> (Transaction, Vec) { + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let dummy_sig_script = vec![0u8; 65]; + let (utxos, tx_inputs) = inputs + .into_iter() + .map(|Kip10Mock { spk, amount }| { + (UtxoEntry::new(amount, spk, 0, false), TransactionInput::new(dummy_prev_out, dummy_sig_script.clone(), 10, 0)) + }) + .unzip(); + + let tx_out = outputs.into_iter().map(|Kip10Mock { spk, amount }| TransactionOutput::new(amount, spk)); + + let tx = Transaction::new(TX_VERSION + 1, tx_inputs, tx_out.collect(), 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + (tx, utxos) + } + + #[derive(Debug)] + struct TestGroup { + name: &'static str, + kip10_enabled: bool, + test_cases: Vec, + } + + #[derive(Debug)] + enum Operation { + InputSpk, + OutputSpk, + InputAmount, + OutputAmount, + } + + #[derive(Debug)] + enum TestCase { + Successful { operation: Operation, index: i64, expected_result: ExpectedResult }, + Incorrect { operation: Operation, index: Option, expected_error: TxScriptError }, + } + + #[derive(Debug)] + struct ExpectedResult { + expected_spk: Option>, + expected_amount: Option>, + } + + fn execute_test_group(group: &TestGroup) { + let input_spk1 = create_mock_spk(1); + let input_spk2 = create_mock_spk(2); + let output_spk1 = create_mock_spk(3); + let output_spk2 = create_mock_spk(4); + + let inputs = + vec![Kip10Mock { spk: input_spk1.clone(), amount: 1111 }, Kip10Mock { spk: input_spk2.clone(), amount: 2222 }]; + let outputs = + vec![Kip10Mock { spk: output_spk1.clone(), amount: 3333 }, Kip10Mock { spk: output_spk2.clone(), amount: 4444 }]; + + let (tx, utxo_entries) = kip_10_tx_mock(inputs, outputs); + let tx = PopulatedTransaction::new(&tx, utxo_entries); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + for current_idx in 0..tx.inputs().len() { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[current_idx], + current_idx, + tx.utxo(current_idx).unwrap(), + &reused_values, + &sig_cache, + group.kip10_enabled, + ); + + // Check input index opcode first + let op_input_idx = opcodes::OpTxInputIndex::empty().expect("Should accept empty"); + if !group.kip10_enabled { + assert!(matches!(op_input_idx.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_)))); + } else { + let mut expected = vm.dstack.clone(); + expected.push_item(current_idx as i64).unwrap(); + op_input_idx.execute(&mut vm).unwrap(); + assert_eq!(vm.dstack, expected); + vm.dstack.clear(); + } + + // Prepare opcodes + let op_input_spk = opcodes::OpTxInputSpk::empty().expect("Should accept empty"); + let op_output_spk = opcodes::OpTxOutputSpk::empty().expect("Should accept empty"); + let op_input_amount = opcodes::OpTxInputAmount::empty().expect("Should accept empty"); + let op_output_amount = opcodes::OpTxOutputAmount::empty().expect("Should accept empty"); + + // Execute each test case + for test_case in &group.test_cases { + match test_case { + TestCase::Successful { operation, index, expected_result } => { + push_number(*index, &mut vm).unwrap(); + let result = match operation { + Operation::InputSpk => op_input_spk.execute(&mut vm), + Operation::OutputSpk => op_output_spk.execute(&mut vm), + Operation::InputAmount => op_input_amount.execute(&mut vm), + Operation::OutputAmount => op_output_amount.execute(&mut vm), + }; + assert!(result.is_ok()); + + // Check the result matches expectations + if let Some(ref expected_spk) = expected_result.expected_spk { + assert_eq!(vm.dstack, vec![expected_spk.clone()]); + } + if let Some(ref expected_amount) = expected_result.expected_amount { + assert_eq!(vm.dstack, vec![expected_amount.clone()]); + } + vm.dstack.clear(); + } + TestCase::Incorrect { operation, index, expected_error } => { + if let Some(idx) = index { + push_number(*idx, &mut vm).unwrap(); + } + + let result = match operation { + Operation::InputSpk => op_input_spk.execute(&mut vm), + Operation::OutputSpk => op_output_spk.execute(&mut vm), + Operation::InputAmount => op_input_amount.execute(&mut vm), + Operation::OutputAmount => op_output_amount.execute(&mut vm), + }; + + assert!( + matches!(result, Err(ref e) if std::mem::discriminant(e) == std::mem::discriminant(expected_error)) + ); + vm.dstack.clear(); + } + } + } + } + } + + #[test] + fn test_unary_introspection_ops() { + let test_groups = vec![ + TestGroup { + name: "KIP-10 disabled", + kip10_enabled: false, + test_cases: vec![ + TestCase::Incorrect { + operation: Operation::InputSpk, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::OutputSpk, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(0), + expected_error: TxScriptError::InvalidOpcode("Invalid opcode".to_string()), + }, + ], + }, + TestGroup { + name: "Valid input indices", + kip10_enabled: true, + test_cases: vec![ + TestCase::Successful { + operation: Operation::InputSpk, + index: 0, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(1).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::InputSpk, + index: 1, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(2).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::InputAmount, + index: 0, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&1111).unwrap()), + }, + }, + TestCase::Successful { + operation: Operation::InputAmount, + index: 1, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&2222).unwrap()), + }, + }, + ], + }, + TestGroup { + name: "Valid output indices", + kip10_enabled: true, + test_cases: vec![ + TestCase::Successful { + operation: Operation::OutputSpk, + index: 0, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(3).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::OutputSpk, + index: 1, + expected_result: ExpectedResult { + expected_spk: Some(create_mock_spk(4).to_bytes()), + expected_amount: None, + }, + }, + TestCase::Successful { + operation: Operation::OutputAmount, + index: 0, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&3333).unwrap()), + }, + }, + TestCase::Successful { + operation: Operation::OutputAmount, + index: 1, + expected_result: ExpectedResult { + expected_spk: None, + expected_amount: Some(OpcodeData::::serialize(&4444).unwrap()), + }, + }, + ], + }, + TestGroup { + name: "Error cases", + kip10_enabled: true, + test_cases: vec![ + TestCase::Incorrect { + operation: Operation::InputAmount, + index: None, + expected_error: TxScriptError::InvalidStackOperation(1, 0), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(-1), + expected_error: TxScriptError::InvalidInputIndex(-1, 2), + }, + TestCase::Incorrect { + operation: Operation::InputAmount, + index: Some(2), + expected_error: TxScriptError::InvalidInputIndex(2, 2), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: None, + expected_error: TxScriptError::InvalidStackOperation(1, 0), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(-1), + expected_error: TxScriptError::InvalidOutputIndex(-1, 2), + }, + TestCase::Incorrect { + operation: Operation::OutputAmount, + index: Some(2), + expected_error: TxScriptError::InvalidOutputIndex(2, 2), + }, + ], + }, + ]; + + for group in test_groups { + println!("Running test group: {}", group.name); + execute_test_group(&group); + } + } + fn create_mock_tx(input_count: usize, output_count: usize) -> (Transaction, Vec) { + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let dummy_sig_script = vec![0u8; 65]; + + // Create inputs with different SPKs and amounts + let inputs: Vec = + (0..input_count).map(|i| Kip10Mock { spk: create_mock_spk(i as u8), amount: 1000 + i as u64 }).collect(); + + // Create outputs with different SPKs and amounts + let outputs: Vec = + (0..output_count).map(|i| Kip10Mock { spk: create_mock_spk((100 + i) as u8), amount: 2000 + i as u64 }).collect(); + + let (utxos, tx_inputs): (Vec<_>, Vec<_>) = inputs + .into_iter() + .map(|Kip10Mock { spk, amount }| { + (UtxoEntry::new(amount, spk, 0, false), TransactionInput::new(dummy_prev_out, dummy_sig_script.clone(), 10, 0)) + }) + .unzip(); + + let tx_outputs: Vec<_> = + outputs.into_iter().map(|Kip10Mock { spk, amount }| TransactionOutput::new(amount, spk)).collect(); + + let tx = Transaction::new(TX_VERSION + 1, tx_inputs, tx_outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + + (tx, utxos) + } + + #[test] + fn test_op_input_output_count() { + // Test cases with different input/output combinations + let test_cases = vec![ + (1, 0), // Minimum inputs, no outputs + (1, 1), // Minimum inputs, one output + (1, 2), // Minimum inputs, multiple outputs + (2, 1), // Multiple inputs, one output + (3, 2), // Multiple inputs, multiple outputs + (5, 3), // More inputs than outputs + (2, 4), // More outputs than inputs + ]; + + for (input_count, output_count) in test_cases { + let (tx, utxo_entries) = create_mock_tx(input_count, output_count); + let tx = PopulatedTransaction::new(&tx, utxo_entries); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Test with KIP-10 enabled and disabled + for kip10_enabled in [true, false] { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], // Use first input + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + kip10_enabled, + ); + + let op_input_count = opcodes::OpTxInputCount::empty().expect("Should accept empty"); + let op_output_count = opcodes::OpTxOutputCount::empty().expect("Should accept empty"); + + if kip10_enabled { + // Test input count + op_input_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(input_count as i64)).unwrap()], + "Input count mismatch for {} inputs", + input_count + ); + vm.dstack.clear(); + + // Test output count + op_output_count.execute(&mut vm).unwrap(); + assert_eq!( + vm.dstack, + vec![ as OpcodeData>::serialize(&(output_count as i64)).unwrap()], + "Output count mismatch for {} outputs", + output_count + ); + vm.dstack.clear(); + } else { + // Test that operations fail when KIP-10 is disabled + assert!( + matches!(op_input_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), + "OpInputCount should fail when KIP-10 is disabled" + ); + assert!( + matches!(op_output_count.execute(&mut vm), Err(TxScriptError::InvalidOpcode(_))), + "OpOutputCount should fail when KIP-10 is disabled" + ); + } + } + } + } + + #[test] + fn test_output_amount() { + // Create script: 0 OP_OUTPUTAMOUNT 100 EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxOutputAmount) + .unwrap() + .add_i64(100) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + + // Create transaction with output amount 100 + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock.clone()], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + + // Set signature script to push redeem script + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Test success case + { + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case with wrong amount + { + let output_mock = Kip10Mock { + spk: create_mock_spk(1), + amount: 99, // Wrong amount + }; + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock.clone()], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_amount() { + // Create script: 0 OP_INPUTAMOUNT 200 EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxInputAmount) + .unwrap() + .add_i64(200) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + let spk = pay_to_script_hash_script(&redeem_script); + + // Test success case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case + { + let input_mock = Kip10Mock { + spk: spk.clone(), + amount: 199, // Wrong amount + }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_spk_basic() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK OpNop + // Just verify that OpInputSpk pushes something onto stack + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, OpNop]).unwrap().drain(); + let spk = pay_to_script_hash_script(&redeem_script); + + let (tx, utxo_entries) = kip_10_tx_mock(vec![Kip10Mock { spk, amount: 100 }], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // OpInputSpk should push input's SPK onto stack, making it non-empty + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_input_spk_different() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK 1 OP_INPUTSPK OP_EQUAL OP_NOT + // Verifies that two different inputs have different SPKs + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, Op1, OpTxInputSpk, OpEqual, OpNot]).unwrap().drain(); + let spk = pay_to_script_hash_script(&redeem_script); + let input_mock1 = Kip10Mock { spk, amount: 100 }; + let input_mock2 = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; // Different SPK + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // Should succeed because the SPKs are different + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_input_spk_same() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + + // Create script: 0 OP_INPUTSPK 1 OP_INPUTSPK OP_EQUAL + // Verifies that two inputs with same SPK are equal + let redeem_script = ScriptBuilder::new().add_ops(&[Op0, OpTxInputSpk, Op1, OpTxInputSpk, OpEqual]).unwrap().drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + let input_mock1 = Kip10Mock { spk: spk.clone(), amount: 100 }; + let input_mock2 = Kip10Mock { spk, amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = + TxScriptEngine::from_transaction_input(&tx, &tx.inputs()[0], 0, tx.utxo(0).unwrap(), &reused_values, &sig_cache, true); + + // Should succeed because both SPKs are identical + assert_eq!(vm.execute(), Ok(())); + } + + #[test] + fn test_output_spk() { + // Create unique SPK to check + let expected_spk = create_mock_spk(42); + let expected_spk_bytes = expected_spk.to_bytes(); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Create script: 0 OP_OUTPUTSPK EQUAL + let redeem_script = ScriptBuilder::new() + .add_op(Op0) + .unwrap() + .add_op(OpTxOutputSpk) + .unwrap() + .add_data(&expected_spk_bytes) + .unwrap() + .add_op(OpEqual) + .unwrap() + .drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + + // Test success case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: expected_spk.clone(), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure case + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { + spk: create_mock_spk(43), // Different SPK + amount: 100, + }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_input_index() { + // Create script: OP_INPUTINDEX 0 EQUAL + let redeem_script = + ScriptBuilder::new().add_op(OpTxInputIndex).unwrap().add_i64(0).unwrap().add_op(OpEqual).unwrap().drain(); + + let spk = pay_to_script_hash_script(&redeem_script); + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Test first input (success case) + { + let input_mock = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test second input (failure case) + { + let input_mock1 = Kip10Mock { spk: create_mock_spk(1), amount: 100 }; + let input_mock2 = Kip10Mock { spk: spk.clone(), amount: 200 }; + let output_mock = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; + + let (tx, utxo_entries) = kip_10_tx_mock(vec![input_mock1, input_mock2], vec![output_mock]); + let mut tx = MutableTransaction::with_entries(tx, utxo_entries); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + // Should fail because script expects index 0 but we're at index 1 + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + + #[test] + fn test_counts() { + let sig_cache = Cache::new(10_000); + let reused_values = SigHashReusedValuesUnsync::new(); + // Test OpInputCount: "OP_INPUTCOUNT 2 EQUAL" + let input_count_script = + ScriptBuilder::new().add_op(OpTxInputCount).unwrap().add_i64(2).unwrap().add_op(OpEqual).unwrap().drain(); + + // Test OpOutputCount: "OP_OUTPUTCOUNT 3 EQUAL" + let output_count_script = + ScriptBuilder::new().add_op(OpTxOutputCount).unwrap().add_i64(3).unwrap().add_op(OpEqual).unwrap().drain(); + + let input_spk = pay_to_script_hash_script(&input_count_script); + let output_spk = pay_to_script_hash_script(&output_count_script); + + // Create transaction with 2 inputs and 3 outputs + let input_mock1 = Kip10Mock { spk: input_spk.clone(), amount: 100 }; + let input_mock2 = Kip10Mock { spk: output_spk.clone(), amount: 200 }; + let output_mock1 = Kip10Mock { spk: create_mock_spk(1), amount: 50 }; + let output_mock2 = Kip10Mock { spk: create_mock_spk(2), amount: 100 }; + let output_mock3 = Kip10Mock { spk: create_mock_spk(3), amount: 150 }; + + let (tx, utxo_entries) = + kip_10_tx_mock(vec![input_mock1.clone(), input_mock2.clone()], vec![output_mock1, output_mock2, output_mock3]); + + // Test InputCount + { + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&input_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test OutputCount + { + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&output_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Ok(())); + } + + // Test failure cases with wrong counts + { + // Wrong input count script: "OP_INPUTCOUNT 3 EQUAL" + let wrong_input_count_script = + ScriptBuilder::new().add_op(OpTxInputCount).unwrap().add_i64(3).unwrap().add_op(OpEqual).unwrap().drain(); + + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[0].signature_script = ScriptBuilder::new().add_data(&wrong_input_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[0], + 0, + tx.utxo(0).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + + { + // Wrong output count script: "OP_OUTPUTCOUNT 2 EQUAL" + let wrong_output_count_script = + ScriptBuilder::new().add_op(OpTxOutputCount).unwrap().add_i64(2).unwrap().add_op(OpEqual).unwrap().drain(); + + let mut tx = MutableTransaction::with_entries(tx.clone(), utxo_entries.clone()); + tx.tx.inputs[1].signature_script = ScriptBuilder::new().add_data(&wrong_output_count_script).unwrap().drain(); + + let tx = tx.as_verifiable(); + let mut vm = TxScriptEngine::from_transaction_input( + &tx, + &tx.inputs()[1], + 1, + tx.utxo(1).unwrap(), + &reused_values, + &sig_cache, + true, + ); + + assert_eq!(vm.execute(), Err(TxScriptError::EvalFalse)); + } + } + } } diff --git a/crypto/txscript/src/script_builder.rs b/crypto/txscript/src/script_builder.rs index 731c47680..7a5b28ca5 100644 --- a/crypto/txscript/src/script_builder.rs +++ b/crypto/txscript/src/script_builder.rs @@ -1,11 +1,12 @@ use std::iter::once; use crate::{ - data_stack::OpcodeData, + data_stack::{Kip10I64, OpcodeData}, opcodes::{codes::*, OP_1_NEGATE_VAL, OP_DATA_MAX_VAL, OP_DATA_MIN_VAL, OP_SMALL_INT_MAX_VAL}, MAX_SCRIPTS_SIZE, MAX_SCRIPT_ELEMENT_SIZE, }; use hexplay::{HexView, HexViewBuilder}; +use kaspa_txscript_errors::SerializationError; use thiserror::Error; /// DEFAULT_SCRIPT_ALLOC is the default size used for the backing array @@ -31,6 +32,9 @@ pub enum ScriptBuilderError { #[error("adding integer {0} would exceed the maximum allowed canonical script length of {MAX_SCRIPTS_SIZE}")] IntegerRejected(i64), + + #[error(transparent)] + Serialization(#[from] SerializationError), } pub type ScriptBuilderResult = std::result::Result; @@ -228,7 +232,14 @@ impl ScriptBuilder { return Ok(self); } - let bytes: Vec<_> = OpcodeData::serialize(&val); + let bytes: Vec<_> = OpcodeData::::serialize(&val.into())?; + self.add_data(&bytes) + } + + // Bitcoind tests utilizes this function + #[cfg(test)] + pub fn add_i64_min(&mut self) -> ScriptBuilderResult<&mut Self> { + let bytes: Vec<_> = OpcodeData::serialize(&crate::data_stack::SizedEncodeInt::<9>(i64::MIN)).expect("infallible"); self.add_data(&bytes) } @@ -355,6 +366,11 @@ mod tests { Test { name: "push -256", val: -256, expected: vec![OpData2, 0x00, 0x81] }, Test { name: "push -32767", val: -32767, expected: vec![OpData2, 0xff, 0xff] }, Test { name: "push -32768", val: -32768, expected: vec![OpData3, 0x00, 0x80, 0x80] }, + Test { + name: "push 9223372036854775807", + val: 9223372036854775807, + expected: vec![OpData8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F], + }, ]; for test in tests { @@ -362,6 +378,15 @@ mod tests { let result = builder.add_i64(test.val).expect("the script is canonical").script(); assert_eq!(result, test.expected, "{} wrong result", test.name); } + + // special case that used in bitcoind test + let mut builder = ScriptBuilder::new(); + let result = builder.add_i64_min().expect("the script is canonical").script(); + assert_eq!( + result, + vec![OpData9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80], + "push -9223372036854775808 wrong result" + ); } /// Tests that pushing data to a script via the ScriptBuilder API works as expected and conforms to BIP0062. diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 79c74c7b3..6c51fd361 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -74,7 +74,7 @@ mod tests { use core::str::FromStr; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, subnets::SubnetworkId, @@ -154,11 +154,11 @@ mod tests { }]; let mut tx = MutableTransaction::with_entries(tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let sig_hash = if !is_ecdsa { - calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) } else { - calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) }; let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let signatures: Vec<_> = inputs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &mut reused_values, &cache).unwrap(); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache, false); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/crypto/txscript/test-data/script_tests-kip10.json b/crypto/txscript/test-data/script_tests-kip10.json new file mode 100644 index 000000000..947c8810d --- /dev/null +++ b/crypto/txscript/test-data/script_tests-kip10.json @@ -0,0 +1,5397 @@ +[ + [ + "Format is: [[wit..., amount]?, scriptSig, scriptPubKey, flags, expected_scripterror, ... comments]" + ], + [ + "It is evaluated as if there was a crediting coinbase transaction with two 0" + ], + [ + "pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey," + ], + [ + "followed by a spending transaction which spends this output as only input (and" + ], + [ + "correct prevout hash), using the given scriptSig. All nLockTimes are 0, all" + ], + [ + "nSequences are max." + ], + [ + "", + "DEPTH 0 EQUAL", + "", + "OK", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK", + "Similarly whitespace around and between symbols" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1", + "", + "", + "OK" + ], + [ + "0x02 0x01 0x00", + "", + "", + "OK", + "all bytes are significant, not only the last one" + ], + [ + "0x09 0x00000000 0x00000000 0x10", + "", + "", + "OK", + "equals zero when cast to Int64" + ], + [ + "0x01 0x11", + "17 EQUAL", + "", + "OK", + "push 1 byte" + ], + [ + "0x02 0x417a", + "'Az' EQUAL", + "", + "OK" + ], + [ + "0x4b 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "push 75 bytes" + ], + [ + "0x4c 0x4c 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4c is OP_PUSHDATA1 (push 76 bytes)" + ], + [ + "0x4d 0x0001 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4d is OP_PUSHDATA2" + ], + [ + "0x4f 1000", + "ADD 999 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0x50 ENDIF 1", + "", + "OK", + "0x50 is reserved (ok if not executed)" + ], + [ + "0x51", + "0x5f ADD 0x60 EQUAL", + "", + "OK", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "1", + "NOP", + "", + "OK" + ], + [ + "0", + "IF VER ELSE 1 ENDIF", + "", + "OK", + "VER non-functional (ok if not executed)" + ], + [ + "0", + "IF RESERVED RESERVED1 RESERVED2 ELSE 1 ENDIF", + "", + "OK", + "RESERVED ok in un-executed IF" + ], + [ + "1", + "DUP IF ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK" + ], + [ + "1", + "DUP IF ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "IF ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and executed inverts on each ELSE encountered" + ], + [ + "1", + "IF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 1", + "IF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "1", + "NOTIF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and execution inverts on each ELSE encountered" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 0", + "NOTIF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK", + "Nested ELSE ELSE" + ], + [ + "1", + "NOTIF 0 NOTIF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 0 NOTIF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "0", + "IF RETURN ENDIF 1", + "", + "OK", + "RETURN only works if executed" + ], + [ + "1 1", + "VERIFY", + "", + "OK" + ], + [ + "1 0x05 0x01 0x00 0x00 0x00 0x00", + "VERIFY", + "", + "OK", + "values >4 bytes can be cast to boolean" + ], + [ + "0x01 0x80", + "VERIFY TRUE", + "", + "VERIFY", + "negative 0 is false" + ], + [ + "10 0 11", + "TOALTSTACK DROP FROMALTSTACK ADD 21 EQUAL", + "", + "OK" + ], + [ + "'gavin_was_here'", + "TOALTSTACK 11 FROMALTSTACK 'gavin_was_here' EQUALVERIFY 11 EQUAL", + "", + "OK" + ], + [ + "0", + "IFDUP DEPTH 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "1", + "IFDUP DEPTH 2 EQUALVERIFY 1 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "0x05 0x0100000000", + "IFDUP DEPTH 2 EQUALVERIFY 0x05 0x0100000000 EQUALVERIFY DROP TRUE", + "", + "OK", + "IFDUP dups non ints" + ], + [ + "0", + "DROP DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "0", + "DUP 1 ADD 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT 22 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP 20 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP DROP 21 EQUAL", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT 21 EQUAL 2DROP", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT ROT 20 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 24 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT DROP 25 EQUALVERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 20 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP DROP 21 EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP 22 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP DROP 23 EQUALVERIFY TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 22 EQUALVERIFY 2DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 2ROT 20 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "OK" + ], + [ + "13 14", + "2DUP ROT EQUALVERIFY EQUAL", + "", + "OK" + ], + [ + "-1 0 1 2", + "3DUP DEPTH 7 EQUALVERIFY ADD ADD 3 EQUALVERIFY 2DROP 0 EQUALVERIFY", + "", + "OK" + ], + [ + "1 2 3 5", + "2OVER ADD ADD 8 EQUALVERIFY ADD ADD 6 EQUAL", + "", + "OK" + ], + [ + "1 3 5 7", + "2SWAP ADD 4 EQUALVERIFY ADD 12 EQUAL", + "", + "OK" + ], + [ + "0", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SIZE 26 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "42", + "SIZE 1 EQUALVERIFY 42 EQUAL", + "", + "OK", + "SIZE does not consume argument" + ], + [ + "2 -2", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "2147483647 -2147483647", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "-1 -1", + "ADD -2 EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "1 1", + "ADD 2 EQUAL", + "", + "OK" + ], + [ + "1", + "1ADD 2 EQUAL", + "", + "OK" + ], + [ + "111", + "1SUB 110 EQUAL", + "", + "OK" + ], + [ + "111 1", + "ADD 12 SUB 100 EQUAL", + "", + "OK" + ], + [ + "0", + "ABS 0 EQUAL", + "", + "OK" + ], + [ + "16", + "ABS 16 EQUAL", + "", + "OK" + ], + [ + "-16", + "ABS -16 NEGATE EQUAL", + "", + "OK" + ], + [ + "0", + "NOT NOP", + "", + "OK" + ], + [ + "1", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "11", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "0", + "0NOTEQUAL 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "-111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "1 1", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 1", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 0", + "BOOLOR NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLOR NOP", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUAL", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMNOTEQUAL NOT", + "", + "OK" + ], + [ + "111 10 1", + "ADD NUMNOTEQUAL", + "", + "OK" + ], + [ + "11 10", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHAN", + "", + "OK" + ], + [ + "11 10", + "GREATERTHAN", + "", + "OK" + ], + [ + "4 4", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "11 10", + "LESSTHANOREQUAL NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "11 10", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "4 4", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "1 0", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 1", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN -1 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MIN -2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "2147483647 0", + "MAX 2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "0 100", + "MAX 100 NUMEQUAL", + "", + "OK" + ], + [ + "-100 0", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 0 1", + "WITHIN", + "", + "OK" + ], + [ + "1 0 1", + "WITHIN NOT", + "", + "OK" + ], + [ + "0 -2147483647 2147483647", + "WITHIN", + "", + "OK" + ], + [ + "-1 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "11 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "-2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 2147483647", + "SUB 0 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "DUP ADD 4294967294 EQUAL", + "", + "OK", + ">32 bit EQUAL is valid" + ], + [ + "2147483647", + "NEGATE DUP ADD -4294967294 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "NOP BLAKE2B 0x20 0x0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8 EQUAL", + "", + "OK" + ], + [ + "'a'", + "BLAKE2B NOP 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "NOP BLAKE2B 0x20 0x117ad6b940f5e8292c007d9c7e7350cd33cf85b5887e8da71c7957830f536e7c EQUAL", + "", + "OK", + "The NOP is added so the script won't be interpreted as P2SH" + ], + [ + "'a'", + "NOP BLAKE2B 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0xb2 ELSE 1 ENDIF", + "", + "OK", + "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" + ], + [ + "0", + "IF 0xbd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xca ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xce ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xda ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xde ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xea ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xeb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xec ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xed ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xee ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xef ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfa ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xff ELSE 1 ENDIF", + "", + "OK" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "OK", + "520 byte push" + ], + [ + "1", + "0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OK", + "201 opcodes executed. 0x61 is NOP" + ], + [ + "1 2 3 4 5", + "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d75", + "", + "OK", + "244 stack size (0x6f is 3DUP, 0x6d is 2DROP, and 0x75 is DROP)" + ], + [ + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP DROP 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d 0x61616161", + "", + "OK", + "Max-size (10,000-byte), max-push(520 bytes), max-opcodes(201), max stack size(244 items). 0x6f is 3DUP, 0x61 is NOP, 0x6d is 2DROP" + ], + [ + "0", + "IF 0x5050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050 ENDIF 1", + "", + "OK", + ">201 opcodes, but RESERVED (0x50) doesn't count towards opcode limit." + ], + [ + "", + "1", + "", + "OK" + ], + [ + "127", + "0x01 0x7F EQUAL", + "", + "OK" + ], + [ + "128", + "0x02 0x8000 EQUAL", + "", + "OK", + "Leave room for the sign bit" + ], + [ + "32767", + "0x02 0xFF7F EQUAL", + "", + "OK" + ], + [ + "32768", + "0x03 0x008000 EQUAL", + "", + "OK" + ], + [ + "8388607", + "0x03 0xFFFF7F EQUAL", + "", + "OK" + ], + [ + "8388608", + "0x04 0x00008000 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "0x04 0xFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "2147483648", + "0x05 0x0000008000 EQUAL", + "", + "OK" + ], + [ + "549755813887", + "0x05 0xFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "549755813888", + "0x06 0xFFFFFFFF7F EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "0x08 0xFFFFFFFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "-2", + "0x01 0x82 EQUAL", + "", + "OK", + "Numbers are little-endian with the MSB being a sign bit" + ], + [ + "-127", + "0x01 0xFF EQUAL", + "", + "OK" + ], + [ + "-128", + "0x02 0x8080 EQUAL", + "", + "OK" + ], + [ + "-32767", + "0x02 0xFFFF EQUAL", + "", + "OK" + ], + [ + "-32768", + "0x03 0x008080 EQUAL", + "", + "OK" + ], + [ + "-8388607", + "0x03 0xFFFFFF EQUAL", + "", + "OK" + ], + [ + "-8388608", + "0x04 0x00008080 EQUAL", + "", + "OK" + ], + [ + "-2147483647", + "0x04 0xFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-2147483648", + "0x05 0x0000008080 EQUAL", + "", + "OK" + ], + [ + "-4294967295", + "0x05 0xFFFFFFFF80 EQUAL", + "", + "OK" + ], + [ + "-549755813887", + "0x05 0xFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-549755813888", + "0x06 0x000000008080 EQUAL", + "", + "OK" + ], + [ + "-9223372036854775807", + "0x08 0xFFFFFFFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "2147483647", + "1ADD 2147483648 EQUAL", + "", + "OK", + "We can do math on 4-byte integers, and compare 5-byte ones" + ], + [ + "2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "-2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "1", + "0x02 0x0100 EQUAL NOT", + "", + "OK", + "Not the same byte array..." + ], + [ + "0", + "0x01 0x80 EQUAL NOT", + "", + "OK" + ], + [ + "", + "NOP 1", + "", + "OK", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK", + "They are here to catch copy-and-paste errors" + ], + [ + "0", + "NOTIF 1 ENDIF", + "", + "OK", + "Most of them are duplicated elsewhere," + ], + [ + "1", + "VERIFY 1", + "", + "OK", + "but, hey, more is always better, right?" + ], + [ + "0", + "TOALTSTACK 1", + "", + "OK" + ], + [ + "1", + "TOALTSTACK FROMALTSTACK", + "", + "OK" + ], + [ + "0 0", + "2DROP 1", + "", + "OK" + ], + [ + "0 1", + "2DUP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 0 1", + "3DUP VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2OVER VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0 0 0", + "2ROT VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2SWAP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "IFDUP VERIFY", + "", + "OK" + ], + [ + "", + "DEPTH 1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0", + "DROP 1", + "", + "OK" + ], + [ + "1", + "DUP VERIFY", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "PICK VERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "PICK VERIFY DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "ROLL VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "ROLL", + "", + "OK" + ], + [ + "1 0 0", + "ROT VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 1", + "TUCK VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUALVERIFY 1", + "", + "OK" + ], + [ + "0 0 1", + "EQUAL EQUAL", + "", + "OK", + "OP_0 and bools must have identical byte representations" + ], + [ + "0", + "1ADD", + "", + "OK" + ], + [ + "2", + "1SUB", + "", + "OK" + ], + [ + "-1", + "NEGATE", + "", + "OK" + ], + [ + "-1", + "ABS", + "", + "OK" + ], + [ + "0", + "NOT", + "", + "OK" + ], + [ + "-1", + "0NOTEQUAL", + "", + "OK" + ], + [ + "1 0", + "ADD", + "", + "OK" + ], + [ + "1 0", + "SUB", + "", + "OK" + ], + [ + "-1 -1", + "BOOLAND", + "", + "OK" + ], + [ + "-1 0", + "BOOLOR", + "", + "OK" + ], + [ + "0 0", + "NUMEQUAL", + "", + "OK" + ], + [ + "5 4", + "NUMEQUAL FALSE EQUAL", + "", + "OK" + ], + [ + "0 0", + "NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "-1 0", + "NUMNOTEQUAL", + "", + "OK" + ], + [ + "-1 0", + "LESSTHAN", + "", + "OK" + ], + [ + "1 0", + "GREATERTHAN", + "", + "OK" + ], + [ + "0 0", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "0 0", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN", + "", + "OK" + ], + [ + "1 0", + "MAX", + "", + "OK" + ], + [ + "-1 -1 0", + "WITHIN", + "", + "OK" + ], + [ + "0", + "SHA256", + "", + "OK" + ], + [ + "0", + "BLAKE2B", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Test from up to 20 pubkeys, all not checked" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", + "", + "OK", + "nOpCount is incremented by the number of keys evaluated in addition to the usual one op per op. In this case we have zero keys, so we can execute 201 CHECKMULTISIGS" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DROP DROP DROP DROP DROP DROP DROP TRUE", + "", + "OK", + "Even though there are no signatures being checked nOpCount is incremented by the number of keys." + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OK" + ], + [ + "0x01 1", + "BLAKE2B 0x20 0xce57216285125006ec18197bd8184221cefa559bb0798410d99a5bba5b07cd1d EQUAL", + "", + "OK", + "Very basic P2SH" + ], + [ + "0x00", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK", + "Basic OP_0 execution" + ], + [ + "Numeric pushes" + ], + [ + "-1", + "0x4f EQUAL", + "", + "OK", + "OP1_NEGATE pushes 0x81" + ], + [ + "1", + "0x51 EQUAL", + "", + "OK", + "OP_1 pushes 0x01" + ], + [ + "2", + "0x52 EQUAL", + "", + "OK", + "OP_2 pushes 0x02" + ], + [ + "3", + "0x53 EQUAL", + "", + "OK", + "OP_3 pushes 0x03" + ], + [ + "4", + "0x54 EQUAL", + "", + "OK", + "OP_4 pushes 0x04" + ], + [ + "5", + "0x55 EQUAL", + "", + "OK", + "OP_5 pushes 0x05" + ], + [ + "6", + "0x56 EQUAL", + "", + "OK", + "OP_6 pushes 0x06" + ], + [ + "7", + "0x57 EQUAL", + "", + "OK", + "OP_7 pushes 0x07" + ], + [ + "8", + "0x58 EQUAL", + "", + "OK", + "OP_8 pushes 0x08" + ], + [ + "9", + "0x59 EQUAL", + "", + "OK", + "OP_9 pushes 0x09" + ], + [ + "10", + "0x5a EQUAL", + "", + "OK", + "OP_10 pushes 0x0a" + ], + [ + "11", + "0x5b EQUAL", + "", + "OK", + "OP_11 pushes 0x0b" + ], + [ + "12", + "0x5c EQUAL", + "", + "OK", + "OP_12 pushes 0x0c" + ], + [ + "13", + "0x5d EQUAL", + "", + "OK", + "OP_13 pushes 0x0d" + ], + [ + "14", + "0x5e EQUAL", + "", + "OK", + "OP_14 pushes 0x0e" + ], + [ + "15", + "0x5f EQUAL", + "", + "OK", + "OP_15 pushes 0x0f" + ], + [ + "16", + "0x60 EQUAL", + "", + "OK", + "OP_16 pushes 0x10" + ], + [ + "Unevaluated non-minimal pushes are ignored" + ], + [ + "0", + "IF 0x4c 0x00 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA1 ignored" + ], + [ + "0", + "IF 0x4d 0x0000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA2 ignored" + ], + [ + "0", + "IF 0x4c 0x00000000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA4 ignored" + ], + [ + "0", + "IF 0x01 0x81 ENDIF 1 ", + "", + "OK", + "1NEGATE equiv" + ], + [ + "0", + "IF 0x01 0x01 ENDIF 1 ", + "", + "OK", + "OP_1 equiv" + ], + [ + "0", + "IF 0x01 0x02 ENDIF 1 ", + "", + "OK", + "OP_2 equiv" + ], + [ + "0", + "IF 0x01 0x03 ENDIF 1 ", + "", + "OK", + "OP_3 equiv" + ], + [ + "0", + "IF 0x01 0x04 ENDIF 1 ", + "", + "OK", + "OP_4 equiv" + ], + [ + "0", + "IF 0x01 0x05 ENDIF 1 ", + "", + "OK", + "OP_5 equiv" + ], + [ + "0", + "IF 0x01 0x06 ENDIF 1 ", + "", + "OK", + "OP_6 equiv" + ], + [ + "0", + "IF 0x01 0x07 ENDIF 1 ", + "", + "OK", + "OP_7 equiv" + ], + [ + "0", + "IF 0x01 0x08 ENDIF 1 ", + "", + "OK", + "OP_8 equiv" + ], + [ + "0", + "IF 0x01 0x09 ENDIF 1 ", + "", + "OK", + "OP_9 equiv" + ], + [ + "0", + "IF 0x01 0x0a ENDIF 1 ", + "", + "OK", + "OP_10 equiv" + ], + [ + "0", + "IF 0x01 0x0b ENDIF 1 ", + "", + "OK", + "OP_11 equiv" + ], + [ + "0", + "IF 0x01 0x0c ENDIF 1 ", + "", + "OK", + "OP_12 equiv" + ], + [ + "0", + "IF 0x01 0x0d ENDIF 1 ", + "", + "OK", + "OP_13 equiv" + ], + [ + "0", + "IF 0x01 0x0e ENDIF 1 ", + "", + "OK", + "OP_14 equiv" + ], + [ + "0", + "IF 0x01 0x0f ENDIF 1 ", + "", + "OK", + "OP_15 equiv" + ], + [ + "0", + "IF 0x01 0x10 ENDIF 1 ", + "", + "OK", + "OP_16 equiv" + ], + [ + "Numeric minimaldata rules are only applied when a stack item is numerically evaluated; the push itself is allowed" + ], + [ + "0x01 0x00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x01 0x80", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0180", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0100", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0200", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0300", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0400", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0500", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0600", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0700", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0800", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0900", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0a00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0b00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0c00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0d00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0e00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0f00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x1000", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "While not really correctly DER encoded, the empty signature is allowed" + ], + [ + "to provide a compact way to provide a delibrately invalid signature." + ], + [ + "0", + "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", + "", + "OK" + ], + [ + "0", + "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", + "", + "OK" + ], + [ + "TRUE DATA_8 0x0000000000000080", + "CHECKSEQUENCEVERIFY", + "", + "OK", + "CSV passes if stack top bit 1 << 63 is set" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "0x4c01", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA1 with not enough bytes" + ], + [ + "0x4d0200ff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA2 with not enough bytes" + ], + [ + "0x4e03000000ffff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA4 with not enough bytes" + ], + [ + "1", + "IF 0x50 ENDIF 1", + "", + "BAD_OPCODE", + "0x50 is reserved" + ], + [ + "0x52", + "0x5f ADD 0x60 EQUAL", + "", + "EVAL_FALSE", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "0", + "NOP", + "", + "EVAL_FALSE", + "" + ], + [ + "1", + "IF VER ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VER non-functional" + ], + [ + "0", + "IF VERIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERIF ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF VERNOTIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERNOTIF ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "DUP IF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "DUP IF ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "NOTIF ELSE 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1", + "IF RETURN ELSE ELSE 1 ENDIF", + "", + "OP_RETURN", + "Multiple ELSEs" + ], + [ + "1", + "IF 1 ELSE ELSE RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "Malformed IF/ELSE/ENDIF sequence" + ], + [ + "1", + "ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE IF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ELSE ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "RETURN", + "", + "OP_RETURN" + ], + [ + "1", + "DUP IF RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "RETURN 'data'", + "", + "OP_RETURN", + "canonical prunable txout format" + ], + [ + "0", + "VERIFY 1", + "", + "VERIFY" + ], + [ + "1", + "VERIFY", + "", + "EVAL_FALSE" + ], + [ + "1", + "VERIFY 0", + "", + "EVAL_FALSE" + ], + [ + "", + "IFDUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "DUP 1 ADD 2 EQUALVERIFY 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 0 NIP", + "", + "EVAL_FALSE" + ], + [ + "", + "OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "PICK 19 EQUALVERIFY DEPTH 2 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "0 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 2 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0 1", + "SWAP 1 EQUALVERIFY", + "", + "EQUALVERIFY" + ], + [ + "", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 0", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "EVAL_FALSE" + ], + [ + "", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 2", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2OVER 1 VERIFY DROP DROP DROP DROP TRUE", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "'a' 'b'", + "CAT", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'a' 'b' 0", + "IF CAT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'abc' 1 1", + "SUBSTR", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 1 1 0", + "IF SUBSTR ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 2 0", + "IF LEFT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "LEFT disabled" + ], + [ + "'abc' 2 0", + "IF RIGHT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "RIGHT disabled" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOP", + "", + "EMPTY_STACK", + "Checks EMPTY_STACK error" + ], + [ + "'abc'", + "INVERT VERIFY TRUE", + "", + "DISABLED_OPCODE", + "INVERT disabled" + ], + [ + "1 2 0", + "IF AND ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "AND disabled" + ], + [ + "1 2 0", + "IF OR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "OR disabled" + ], + [ + "1 2 0", + "IF XOR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "XOR disabled" + ], + [ + "2 0", + "IF 2MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2MUL disabled" + ], + [ + "2 0", + "IF 2DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2DIV disabled" + ], + [ + "2 2 0", + "IF MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MUL disabled" + ], + [ + "2 2 0", + "IF DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "DIV disabled" + ], + [ + "2 2 0", + "IF MOD ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MOD disabled" + ], + [ + "2 2 0", + "IF LSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "LSHIFT disabled" + ], + [ + "2 2 0", + "IF RSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "RSHIFT disabled" + ], + [ + "", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are no stack items" + ], + [ + "0", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are not 2 stack items" + ], + [ + "0 1", + "EQUAL", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "ADD 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "11 1", + "ADD 12 SUB 11 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "2147483648 0", + "ADD NOP", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "-2147483648 0", + "ADD NOP", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "-9223372036854775808 0", + "ADD NOP", + "", + "UNKNOWN_ERROR", + "" + ], + [ + "2147483647", + "DUP ADD 4294967294 NUMEQUAL", + "", + "OK", + "NUMEQUAL is in numeric range since kip10" + ], + [ + "'abcdef'", + "NOT 0 EQUAL", + "", + "OK", + "numbers up to 8 bytes are supported since kip10" + ], + [ + "'abcdefghi'", + "NOT 0 EQUAL", + "", + "UNKNOWN_ERROR", + "NOT is an arithmetic operand" + ], + [ + "2", + "DUP MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "DUP DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "7 3", + "MOD 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 2", + "LSHIFT 8 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 1", + "RSHIFT 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "0x50", + "1", + "", + "BAD_OPCODE", + "opcode 0x50 is reserved" + ], + [ + "1", + "IF 0xb2 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxVersion is reserved" + ], + [ + "1", + "IF 0xb3 ELSE 1 ENDIF", + "", + "OK", + "OpTxInputCount is enabled since kip10" + ], + [ + "1", + "IF 0xb4 ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputCount is enabled since kip10" + ], + [ + "1", + "IF 0xb5 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxLockTime is reserved" + ], + [ + "1", + "IF 0xb6 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxSubnetId is reserved" + ], + [ + "1", + "IF 0xb7 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxGas is reserved" + ], + [ + "1", + "IF 0xb8 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxPayload is reserved" + ], + [ + "1", + "IF 0xb9 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxInputIndex is enabled since kip10" + ], + [ + "1", + "IF 0xba ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpOutpointTxId is reserved" + ], + [ + "1", + "IF 0xbb ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpOutpointOutputIdx is reserved" + ], + [ + "1", + "IF 0xbc ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputScriptSig is reserved" + ], + [ + "1", + "IF 0xbd ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputSeq is reserved" + ], + [ + "0 1", + "IF 0xbe 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxInputAmount is enabled since kip10" + ], + [ + "0 1", + "IF 0xbf ELSE 1 ENDIF", + "", + "OK", + "OpTxInputSpk is enabled since kip10" + ], + [ + "1", + "IF 0xc0 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputBlockDaaScore is reserved" + ], + [ + "1", + "IF 0xc1 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "OpTxInputIsCoinbase is reserved" + ], + [ + "0 1", + "IF 0xc2 0 NUMEQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputAmount is enabled since kip10" + ], + [ + "0 1", + "IF 0xc3 0x02 0x0000 EQUAL ELSE 1 ENDIF", + "", + "OK", + "OpTxOutputSpk is enabled since kip10" + ], + [ + "1", + "IF 0xc4 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "opcodes above OpTxOutputSpk invalid if executed" + ], + [ + "1", + "IF 0xc5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xca ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xce ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xda ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xde ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xea ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xeb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xec ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xed ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xee ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xef ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfa ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfe ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xff ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "PUSH_SIZE", + ">520 byte push" + ], + [ + "0", + "IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", + "", + "PUSH_SIZE", + ">520 byte push in non-executed IF branch" + ], + [ + "1", + "0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OP_COUNT", + ">201 opcodes executed. 0x61 is NOP" + ], + [ + "0", + "IF 0x6161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", + "", + "OP_COUNT", + ">201 opcodes including non-executed IF branch. 0x61 is NOP" + ], + [ + "", + "1 2 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack size (0x6f is 3DUP)" + ], + [ + "", + "1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 7 8 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack+altstack size" + ], + [ + "", + "0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "SCRIPT_SIZE", + "10,001-byte scriptPubKey" + ], + [ + "1", + "VER", + "", + "BAD_OPCODE", + "OP_VER is reserved" + ], + [ + "1", + "VERIF", + "", + "BAD_OPCODE", + "OP_VERIF is reserved" + ], + [ + "1", + "VERNOTIF", + "", + "BAD_OPCODE", + "OP_VERNOTIF is reserved" + ], + [ + "1", + "RESERVED", + "", + "BAD_OPCODE", + "OP_RESERVED is reserved" + ], + [ + "1", + "RESERVED1", + "", + "BAD_OPCODE", + "OP_RESERVED1 is reserved" + ], + [ + "1", + "RESERVED2", + "", + "BAD_OPCODE", + "OP_RESERVED2 is reserved" + ], + [ + "1", + "0xb2", + "", + "BAD_OPCODE", + "0xb2 == OP_CHECKSEQUENCEVERIFY + 1" + ], + [ + "2147483648", + "1ADD 2147483649 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "NEGATE -2147483648 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-2147483648", + "1ADD -2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483647", + "DUP 1ADD 1SUB NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648", + "1SUB 2147483647 NUMEQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLOR 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "2147483648 1", + "BOOLAND 1 EQUAL", + "", + "OK", + "We can do math on 5-byte integers since kip10" + ], + [ + "-9223372036854775808", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers" + ], + [ + "-9223372036854775808", + "NEGATE 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers" + ], + [ + "-9223372036854775808", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "Because we use a sign bit, -9223372036854775808 is also 9 bytes" + ], + [ + "-9223372036854775808", + "1ADD 1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers, even if the result is 8-bytes" + ], + [ + "-9223372036854775808", + "1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 9-byte integers, even if the result is 8-bytes" + ], + [ + "-9223372036854775808 1", + "BOOLOR 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLOR on 9-byte integers (but we can still do IF etc)" + ], + [ + "-9223372036854775808 1", + "BOOLAND 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLAND on 9-byte integers" + ], + [ + "-9223372036854775807", + "1SUB", + "", + "UNKNOWN_ERROR", + "result of math operation can't exceed 8 bytes" + ], + [ + "1", + "1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "ENDIF without IF" + ], + [ + "1", + "IF 1", + "", + "UNBALANCED_CONDITIONAL", + "IF without ENDIF" + ], + [ + "", + "IF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "", + "NOTIF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "They are here to catch copy-and-paste errors" + ], + [ + "", + "VERIFY 1", + "", + "INVALID_STACK_OPERATION", + "Most of them are duplicated elsewhere," + ], + [ + "", + "TOALTSTACK 1", + "", + "INVALID_STACK_OPERATION", + "but, hey, more is always better, right?" + ], + [ + "1", + "FROMALTSTACK", + "", + "INVALID_ALTSTACK_OPERATION" + ], + [ + "1", + "2DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "3DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 1 1", + "2ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "IFDUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "PICK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "ROLL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1ADD 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1SUB 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NEGATE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "ABS 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0NOTEQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "ADD", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SUB", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLAND", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLOR", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMNOTEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MAX", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "WITHIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "Increase CHECKSIG and CHECKMULTISIG negative test coverage" + ], + [ + "", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are no stack items" + ], + [ + "0", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are not 2 stack items" + ], + [ + "", + "CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are no stack items" + ], + [ + "", + "-1 CHECKMULTISIG NOT", + "", + "PUBKEY_COUNT", + "CHECKMULTISIG must error when the specified number of pubkeys is negative" + ], + [ + "", + "1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough pubkeys on the stack" + ], + [ + "", + "-1 0 CHECKMULTISIG NOT", + "", + "SIG_COUNT", + "CHECKMULTISIG must error when the specified number of signatures is negative" + ], + [ + "", + "1 'pk1' 1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough signatures on the stack" + ], + [ + "", + "0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", + "", + "OP_COUNT", + "202 CHECKMULTISIGS, fails due to 201 op limit" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", + "", + "OP_COUNT", + "Fails due to 201 script operation limit" + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OP_COUNT", + "" + ], + [ + "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21", + "21 CHECKMULTISIG 1", + "", + "PUBKEY_COUNT", + "nPubKeys > 20" + ], + [ + "0 'sig' 1 0", + "CHECKMULTISIG 1", + "", + "SIG_COUNT", + "nSigs > nPubKeys" + ], + [ + "NOP 0x01 1", + "BLAKE2B 0x20 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", + "", + "SIG_PUSHONLY", + "Tests for Script.IsPushOnly()" + ], + [ + "0 0x01 0x50", + "BLAKE2B 0x20 0xece424a6bb6ddf4db592c0faed60685047a361b1 EQUAL", + "", + "BAD_OPCODE", + "OP_RESERVED in P2SH should fail" + ], + [ + "0 0x01", + "VER BLAKE2B 0x20 0x0f4d7845db968f2a81b530b6f3c1d6246d4c7e01 EQUAL", + "", + "BAD_OPCODE", + "OP_VER in P2SH should fail" + ], + [ + "0x00", + "'00' EQUAL", + "", + "EVAL_FALSE", + "Basic OP_0 execution" + ], + [ + "MINIMALDATA enforcement for PUSHDATAs" + ], + [ + "0x4c 0x00", + "DROP 1", + "", + "MINIMALDATA", + "Empty vector minimally represented by OP_0" + ], + [ + "0x01 0x81", + "DROP 1", + "", + "MINIMALDATA", + "-1 minimally represented by OP_1NEGATE" + ], + [ + "0x01 0x01", + "DROP 1", + "", + "MINIMALDATA", + "1 to 16 minimally represented by OP_1 to OP_16" + ], + [ + "0x01 0x02", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x03", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x04", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x05", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x06", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x07", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x08", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x09", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0a", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0b", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0c", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0d", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0e", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0f", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x10", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x4c 0x48 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA1 of 72 bytes minimally represented by direct push" + ], + [ + "0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1" + ], + [ + "0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2" + ], + [ + "MINIMALDATA enforcement for numeric arguments" + ], + [ + "0x01 0x00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x01 0x80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "0x80 (negative zero) numequals 0" + ], + [ + "0x02 0x0080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0500", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x03 0x050000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x02 0x0580", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0x050080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0xff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff" + ], + [ + "0x03 0xff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xff7f" + ], + [ + "0x04 0xffff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffffff" + ], + [ + "0x04 0xffff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff7f" + ], + [ + "Test every numeric-accepting opcode for correct handling of the numeric minimal encoding rule" + ], + [ + "1 0x02 0x0000", + "PICK DROP", + "", + "UNKNOWN_ERROR" + ], + [ + "1 0x02 0x0000", + "ROLL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NEGATE DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "ABS DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "0NOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 1", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0 0x02 0x0000", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 1", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "Check MINIMALIF" + ], + [ + "2", + "IF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "2", + "NOTIF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "Order of CHECKMULTISIG evaluation tests, inverted by swapping the order of" + ], + [ + "pubkeys/signatures so they fail due to the STRICTENC rules on validly encoded" + ], + [ + "signatures and pubkeys." + ], + [ + "0x41 0x833682d4f60cc916a22a2c263e658fa662c49badb1e2a8c6208987bf99b1abd740498371480069e7a7a6e7471bf78c27bd9a1fd04fb212a92017346250ac187b01 0x41 0xea4a8d20562a950f4695dc24804565482e9fa111704886179d0c348f2b8a15fe691a305cd599c59c131677146661d5b98cb935330989a85f33afc70d0a21add101", + "2 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 0 2 CHECKMULTISIG NOT", + "", + "PUBKEYFORMAT", + "2-of-2 CHECKMULTISIG NOT with the first pubkey invalid, and both signatures validly encoded." + ], + [ + "CHECKSEQUENCEVERIFY tests" + ], + [ + "", + "CHECKSEQUENCEVERIFY", + "", + "INVALID_STACK_OPERATION", + "CSV automatically fails on a empty stack" + ], + [ + "0", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is set and the tx version < 2" + ], + [ + "4294967296", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is not set, and tx version < 2" + ], + [ + "NULLFAIL should cover all signatures and signatures only" + ], + [ + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "OK", + "BIP66 and NULLFAIL-compliant" + ], + [ + "0x09 0x300602010102010101 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "NULLFAIL", + "BIP66-compliant but not NULLFAIL-compliant 4" + ], + [ + "The End" + ] +] diff --git a/database/src/access.rs b/database/src/access.rs index ad82197db..fad8ee130 100644 --- a/database/src/access.rs +++ b/database/src/access.rs @@ -22,6 +22,8 @@ where prefix: Vec, } +pub type KeyDataResult = Result<(Box<[u8]>, TData), Box>; + impl CachedDbAccess where TKey: Clone + std::hash::Hash + Eq + Send + Sync, @@ -65,7 +67,7 @@ where } } - pub fn iterator(&self) -> impl Iterator, TData), Box>> + '_ + pub fn iterator(&self) -> impl Iterator> + '_ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned` has short lifetime @@ -173,7 +175,7 @@ where seek_from: Option, // iter whole range if None limit: usize, // amount to take. skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). - ) -> impl Iterator, TData), Box>> + '_ + ) -> impl Iterator> + '_ where TKey: Clone + AsRef<[u8]>, TData: DeserializeOwned, diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff091..83fa8ebb2 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability diff --git a/database/src/registry.rs b/database/src/registry.rs index 752efb97b..36a728ebe 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -41,6 +41,10 @@ pub enum DatabaseStorePrefixes { ReachabilityTreeChildren = 30, ReachabilityFutureCoveringSet = 31, + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, + // ---- Metadata ---- MultiConsensusMetadata = 124, ConsensusEntries = 125, diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 15a408dad..3507339f2 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ clap.workspace = true dhat = { workspace = true, optional = true } dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true serde.workspace = true tempfile.workspace = true thiserror.workspace = true diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 4175206eb..db9f32c16 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,9 +6,12 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, debug, info}; +use kaspa_core::{core::Core, debug, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -33,6 +36,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -316,13 +320,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db diff --git a/math/src/uint.rs b/math/src/uint.rs index 4ecc1fe12..095d59559 100644 --- a/math/src/uint.rs +++ b/math/src/uint.rs @@ -158,6 +158,18 @@ macro_rules! construct_uint { (self, carry) } + #[inline] + pub fn saturating_sub(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_sub(other); + if carry { Self::ZERO } else { sum } + } + + #[inline] + pub fn saturating_add(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_add(other); + if carry { Self::MAX } else { sum } + } + /// Multiplication by u64 #[inline] pub fn overflowing_mul_u64(self, other: u64) -> (Self, bool) { @@ -1150,6 +1162,19 @@ mod tests { } } + #[test] + fn test_saturating_ops() { + let u1 = Uint128::from_u128(u128::MAX); + let u2 = Uint128::from_u64(u64::MAX); + // Sub + assert_eq!(u1.saturating_sub(u2), Uint128::from_u128(u128::MAX - u64::MAX as u128)); + assert_eq!(u1.saturating_sub(u2).as_u128(), u128::MAX - u64::MAX as u128); + assert_eq!(u2.saturating_sub(u1), Uint128::ZERO); + // Add + assert_eq!(u1.saturating_add(Uint128::from_u64(1)), Uint128::MAX); + assert_eq!(u2.saturating_add(Uint128::from_u64(1)), Uint128::from_u128(u64::MAX as u128 + 1)); + } + #[test] fn test_mod_inv() { use core::cmp::Ordering; diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 319aaa484..12416be67 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -131,7 +131,7 @@ pub enum NonStandardError { RejectInsufficientFee(TransactionId, u64, u64), #[error("transaction input #{1} has {2} signature operations which is more than the allowed max amount of {3}")] - RejectSignatureCount(TransactionId, usize, u8, u8), + RejectSignatureCount(TransactionId, usize, u64, u8), } impl NonStandardError { diff --git a/mining/src/feerate/fee_estimation.ipynb b/mining/src/feerate/fee_estimation.ipynb index a8b8fbfc8..51b905fa2 100644 --- a/mining/src/feerate/fee_estimation.ipynb +++ b/mining/src/feerate/fee_estimation.ipynb @@ -14,7 +14,7 @@ }, { "cell_type": "code", - "execution_count": 97, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -464,6 +464,155 @@ "pred" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Avg transaction mass\n", + "\n", + "We suggest a decaying weight formula for calculating the average mass throughout history, as opposed to using the average mass of the currently existing transactions. The following code compares the two approaches." + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Helper function for creating a long sequence of transaction masses with periods with highly unusual mass clusters\n", + "N = sequence length\n", + "M = length of each unusual cluster\n", + "X = number of unusual clusters\n", + "\"\"\"\n", + "def generate_seq(N, M, X, mean=2036, var=100, mean_cluster=50000, var_cluster=10000):\n", + " seq = np.random.normal(loc=mean, scale=var, size=N)\n", + " clusters = np.random.normal(loc=mean_cluster, scale=var_cluster, size=X * M)\n", + " cluster_indices = np.random.choice(N - M, size=X, replace=False)\n", + " for i, idx in enumerate(cluster_indices):\n", + " seq[idx:idx+M] = clusters[i*M:(i+1)*M]\n", + " return seq" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj0AAAGdCAYAAAD5ZcJyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABB7klEQVR4nO3dfVxUdaI/8M/wMAMIM4AIiCJgpObz5gPSg1srVzTublZ3t8zbdcvq2mK/zK6prVet3d+PfrbtZuXW7t1dbX+bmd672a6YxaJiJmqSpKKSj2HJgE/MAPLM9/cHcWAEYWaYmXPmfD/v14vXC+Z8OfM93znf7/mcxzEIIQSIiIiIdC5A7QoQERER+QJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJIUgtSugptbWVly4cAEREREwGAxqV4eIiIicIIRAdXU1EhISEBDg/PEbqUPPhQsXkJiYqHY1iIiIyA3nz5/H4MGDnS4vdeiJiIgA0NZoZrNZ5doQERGRM+x2OxITE5XtuLOkDj3tp7TMZjNDDxERkZ9x9dIUXshMREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJwaXQk5OTg0mTJiEiIgKxsbGYNWsWSktLHcrcddddMBgMDj/z5893KFNWVoasrCyEhYUhNjYWixcvRnNzs0OZXbt24dZbb4XJZEJqairWr1/fpT5r165FcnIyQkJCkJaWhgMHDriyOERERCQRl0JPQUEBsrOzsW/fPuTl5aGpqQnTp09HbW2tQ7knnngC5eXlys/q1auVaS0tLcjKykJjYyP27t2Ld955B+vXr8eKFSuUMmfPnkVWVhbuvvtuFBcXY+HChXj88cfx8ccfK2Xef/99LFq0CCtXrsQXX3yBcePGITMzE5WVle62BREREemYQQgh3P3nixcvIjY2FgUFBZg6dSqAtiM948ePx2uvvdbt/3z00Uf453/+Z1y4cAFxcXEAgLfffhtLlizBxYsXYTQasWTJEuTm5uLo0aPK/z300EOoqqrC9u3bAQBpaWmYNGkS3nzzTQBtXx6amJiIp59+GkuXLnWq/na7HRaLBTabjU9kJiIi8hPubr/7dE2PzWYDAERHRzu8/u677yImJgajR4/GsmXLcO3aNWVaYWEhxowZowQeAMjMzITdbkdJSYlSJiMjw2GemZmZKCwsBAA0NjaiqKjIoUxAQAAyMjKUMkRERESduf3dW62trVi4cCFuv/12jB49Wnn94YcfRlJSEhISEnD48GEsWbIEpaWl+Otf/woAsFqtDoEHgPK31WrtsYzdbkddXR2uXr2KlpaWbsucOHHihnVuaGhAQ0OD8rfdbndjyYmIiMgfuR16srOzcfToUezZs8fh9SeffFL5fcyYMRg4cCCmTZuG06dP46abbnK/ph6Qk5ODF198UdU6+MJXFdXIP16JeXekwBjEG/SIiIgAN09vLViwAFu3bsXOnTsxePDgHsumpaUBAE6dOgUAiI+PR0VFhUOZ9r/j4+N7LGM2mxEaGoqYmBgEBgZ2W6Z9Ht1ZtmwZbDab8nP+/Hknltb/PPnng/i/20/gtX98pXZViIiINMOl0COEwIIFC/DBBx9gx44dSElJ6fV/iouLAQADBw4EAKSnp+PIkSMOd1nl5eXBbDZj5MiRSpn8/HyH+eTl5SE9PR0AYDQaMWHCBIcyra2tyM/PV8p0x2QywWw2O/zo0bnLbddQfflNlboVISIi0hCXTm9lZ2djw4YN+PDDDxEREaFcg2OxWBAaGorTp09jw4YNuOeee9C/f38cPnwYzz77LKZOnYqxY8cCAKZPn46RI0fikUcewerVq2G1WrF8+XJkZ2fDZDIBAObPn48333wTzz//PB577DHs2LEDmzZtQm5urlKXRYsWYe7cuZg4cSImT56M1157DbW1tXj00Uc91TZERESkJ8IFALr9WbdunRBCiLKyMjF16lQRHR0tTCaTSE1NFYsXLxY2m81hPufOnRMzZ84UoaGhIiYmRjz33HOiqanJoczOnTvF+PHjhdFoFEOHDlXeo7M33nhDDBkyRBiNRjF58mSxb98+VxZH2Gw2AaBL/fxd0pKtImnJVvHwfxWqXRUiIiKPc3f73afn9Pg7vT6nJ3lp2xGx21P7493Hp6hcGyIiIs9S5Tk9RERERP6CoYeIiIikwNCjY/KeuCQiIuqKoYeIiIikwNCjYwaD2jUgIiLSDoYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDo0THesk5ERNSBoYeIiIikwNBDREREUmDo0TE+p4eIiKgDQw8RERFJgaGHiIiIpMDQQ0RERFJg6NEx3rJORETUgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6dIwPJyQiIurA0ENERERSYOghIiIiKTD06Bif00NERNSBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHp0jM/pISIi6sDQQ0RERFJg6NEx3rJORETUgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6iIiISAoMPTrG5/QQERF1YOjRMd6yTkRE1IGhh4iIiKTA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJgaGHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6dEwItWtARESkHQw9REREJAWGHiIiIpICQ4+OGQxq14CIiEg7XAo9OTk5mDRpEiIiIhAbG4tZs2ahtLTUoUx9fT2ys7PRv39/hIeH44EHHkBFRYVDmbKyMmRlZSEsLAyxsbFYvHgxmpubHcrs2rULt956K0wmE1JTU7F+/fou9Vm7di2Sk5MREhKCtLQ0HDhwwJXFISIiIom4FHoKCgqQnZ2Nffv2IS8vD01NTZg+fTpqa2uVMs8++yz+/ve/Y/PmzSgoKMCFCxdw//33K9NbWlqQlZWFxsZG7N27F++88w7Wr1+PFStWKGXOnj2LrKws3H333SguLsbChQvx+OOP4+OPP1bKvP/++1i0aBFWrlyJL774AuPGjUNmZiYqKyv70h5ERESkV6IPKisrBQBRUFAghBCiqqpKBAcHi82bNytljh8/LgCIwsJCIYQQ27ZtEwEBAcJqtSpl3nrrLWE2m0VDQ4MQQojnn39ejBo1yuG9HnzwQZGZman8PXnyZJGdna383dLSIhISEkROTo7T9bfZbAKAsNlsLiy19iUt2SqSlmwVs39fqHZViIiIPM7d7Xefrumx2WwAgOjoaABAUVERmpqakJGRoZQZMWIEhgwZgsLCQgBAYWEhxowZg7i4OKVMZmYm7HY7SkpKlDKd59Fepn0ejY2NKCoqcigTEBCAjIwMpUx3GhoaYLfbHX70jLesExERdXA79LS2tmLhwoW4/fbbMXr0aACA1WqF0WhEZGSkQ9m4uDhYrValTOfA0z69fVpPZex2O+rq6nDp0iW0tLR0W6Z9Ht3JycmBxWJRfhITE11fcCIiIvJLboee7OxsHD16FBs3bvRkfbxq2bJlsNlsys/58+fVrhIRERH5SJA7/7RgwQJs3boVu3fvxuDBg5XX4+Pj0djYiKqqKoejPRUVFYiPj1fKXH+XVfvdXZ3LXH/HV0VFBcxmM0JDQxEYGIjAwMBuy7TPozsmkwkmk8n1BSYiIiK/59KRHiEEFixYgA8++AA7duxASkqKw/QJEyYgODgY+fn5ymulpaUoKytDeno6ACA9PR1HjhxxuMsqLy8PZrMZI0eOVMp0nkd7mfZ5GI1GTJgwwaFMa2sr8vPzlTLE5/QQERF15tKRnuzsbGzYsAEffvghIiIilOtnLBYLQkNDYbFYMG/ePCxatAjR0dEwm814+umnkZ6ejilTpgAApk+fjpEjR+KRRx7B6tWrYbVasXz5cmRnZytHYebPn48333wTzz//PB577DHs2LEDmzZtQm5urlKXRYsWYe7cuZg4cSImT56M1157DbW1tXj00Uc91TZERESkJ67c6gWg259169YpZerq6sTPfvYzERUVJcLCwsR9990nysvLHeZz7tw5MXPmTBEaGipiYmLEc889J5qamhzK7Ny5U4wfP14YjUYxdOhQh/do98Ybb4ghQ4YIo9EoJk+eLPbt2+fK4vCWdSIiIj/k7vbbIIS8Nzbb7XZYLBbYbDaYzWa1q+MxyUvbjojddlN/bHhiisq1ISIi8ix3t9/87i0dkzfOEhERdcXQQ0RERFJg6CEiIiIpMPQQERGRFBh6dIzP6SEiIurA0ENERERSYOghIiIiKTD06BhvWSciIurA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD06xuf0EBERdWDoISIiIikw9OgYb1knIiLqwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHh3jc3qIiIg6MPToGG9ZJyIi6sDQQ0RERFJg6CEiIiIpMPQQERGRFBh6iIiISAoMPURERCQFhh4iIiKSAkMPERERSYGhh4iIiKTA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeHRPg16wTERG1Y+ghIiIiKTD06JgBBrWrQEREpBkMPURERCQFhh4iIiKSAkMPERERSYGhh4iIiKTA0KNjvGWdiIioA0MPERERSYGhh4iIiKTA0KNjfE4PERFRB4YeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDo0TE+p4eIiKgDQw8RERFJgaGHiIiIpMDQo2N8Tg8REVEHhh4iIiKSAkMPERERSYGhh4iIiKTA0KNjvGWdiIioA0MPERERSYGhh4iIiKTA0ENERERSYOjRMT6nh4iIqIPLoWf37t344Q9/iISEBBgMBmzZssVh+k9/+lMYDAaHnxkzZjiUuXLlCubMmQOz2YzIyEjMmzcPNTU1DmUOHz6MO++8EyEhIUhMTMTq1au71GXz5s0YMWIEQkJCMGbMGGzbts3VxSEiIiJJuBx6amtrMW7cOKxdu/aGZWbMmIHy8nLl57333nOYPmfOHJSUlCAvLw9bt27F7t278eSTTyrT7XY7pk+fjqSkJBQVFeGVV17BqlWr8Pvf/14ps3fvXsyePRvz5s3DoUOHMGvWLMyaNQtHjx51dZGIiIhIAgYhhNv3NRsMBnzwwQeYNWuW8tpPf/pTVFVVdTkC1O748eMYOXIkPv/8c0ycOBEAsH37dtxzzz345ptvkJCQgLfeegs///nPYbVaYTQaAQBLly7Fli1bcOLECQDAgw8+iNraWmzdulWZ95QpUzB+/Hi8/fbbTtXfbrfDYrHAZrPBbDa70QLalLw0FwAwZWg0Nj6ZrnJtiIiIPMvd7bdXrunZtWsXYmNjMXz4cDz11FO4fPmyMq2wsBCRkZFK4AGAjIwMBAQEYP/+/UqZqVOnKoEHADIzM1FaWoqrV68qZTIyMhzeNzMzE4WFhd5YJCIiIvJzQZ6e4YwZM3D//fcjJSUFp0+fxgsvvICZM2eisLAQgYGBsFqtiI2NdaxEUBCio6NhtVoBAFarFSkpKQ5l4uLilGlRUVGwWq3Ka53LtM+jOw0NDWhoaFD+ttvtfVpWIiIi8h8eDz0PPfSQ8vuYMWMwduxY3HTTTdi1axemTZvm6bdzSU5ODl588UVV60BERETq8Pot60OHDkVMTAxOnToFAIiPj0dlZaVDmebmZly5cgXx8fFKmYqKCocy7X/3VqZ9eneWLVsGm82m/Jw/f75vC0dERER+w+uh55tvvsHly5cxcOBAAEB6ejqqqqpQVFSklNmxYwdaW1uRlpamlNm9ezeampqUMnl5eRg+fDiioqKUMvn5+Q7vlZeXh/T0G1+4azKZYDabHX70jM/pISIi6uBy6KmpqUFxcTGKi4sBAGfPnkVxcTHKyspQU1ODxYsXY9++fTh37hzy8/Nx7733IjU1FZmZmQCAW265BTNmzMATTzyBAwcO4LPPPsOCBQvw0EMPISEhAQDw8MMPw2g0Yt68eSgpKcH777+PNWvWYNGiRUo9nnnmGWzfvh2vvvoqTpw4gVWrVuHgwYNYsGCBB5qFiIiIdEe4aOfOnQJAl5+5c+eKa9euienTp4sBAwaI4OBgkZSUJJ544glhtVod5nH58mUxe/ZsER4eLsxms3j00UdFdXW1Q5kvv/xS3HHHHcJkMolBgwaJl19+uUtdNm3aJIYNGyaMRqMYNWqUyM3NdWlZbDabACBsNpurzaBpSUu2iqQlW8WDv9urdlWIiIg8zt3td5+e0+Pv+JweIiIi/6Op5/QQERERaQ1DDxEREUmBoYeIiIikwNBDREREUmDoISIiaVht9Ri98mN8fu4KGptb1a4O+RhDDxERSePZ94tR09CMH79diGHLP8KFqjq1q0Q+xNBDRETS+KLsqsPf73/OryOSCUMPERGpqrVV4GRFNXzx2LgwY6DD3xEhvX/v9rELdiQvzUXqC9u8VS3N+duXF3Dn6h1oaG5RuyoexdDj55KX5iJ5aS6Kvr7ae2EiIg36978U4Z9+sxvz3jmodlW6lXes7cutm1vleZbv/3rvEM5fqcOf9pxTuyoexdCjUS2tApsPnkd1fVPvhQH86bOzXq6RfB75434kL83F3768oHZViHStPVTsOFGpck26d7GmXu0qqKbUale7Ch7F0KNRv8w9hsX/fRhTV+90qvyw2Agv10g+n568BKBtj0cNL/69BNkbvlDlvYlk4U9fxCSEwCN/3I8/F57z2XsGBeorJuhraXRk88FvAABXrzl3pMfffe+lT/CT3xWqXY1uDYoM9fl71jQ0Y91n55B7uBxXaxt9/v6d/a7gNJKX5vIuF51rkejUjb/aVXoRn568hBUflvjsPf0pFDqDoUej+pkCey+kE8cu2HH1WhMOnL2idlU0o/PzQ5pa1H2WSM5HJwAA/zv3uKr1oN7VN7Xgw+JvXb4g+H+9dwg3vbAN/zv3GG7LyUddY8fFq5+evIi/6/gUr8Ggdg2cV3blmtpV6JXVVq/6mNUThh4fW7jxEJKX5uLcpVq1q6IZzl63ROqqa9LXXRx6NOI/t+OZjcVYsMG1U7Lt163916dnccFWj7d2nVKmPfLHA3j6vUOorJb3uhat0HpA23miElNy8vHous/VrsoNMfT42JbitsHlkT/tV7kmRK4J0PiASx1yj5T36f+/qqjp8lp5lT5Cj87O1nidK0Hr97vPAAD2nLrkpdr0HUOPFwkhkLw0F8N+/lGXaUnR/Xr5X9feS+t7AKQu27UmJC/NxVN/KVK7KuQDcWZTn/4/KLDrgOKrMcZbz+oRQnQ7b71ds9KbR9cdwD1rPvXKvLtbb7SGoceLLn93AWpjSyvsXj6FI1vHpa6EEPikxIoKe9c98v/5ou3C+I+OWpUwnrw0l9891I1rjc1K+/jiYXnkaPZ/7fP4PMttdUhZtg0zXvsU2t8s39i3fbyZ4EptI3aWXsSxcjtqGpo9VKs2vys4rdzxqmUMPV7U+W4I4UfblrxjFfjXP+z32IAvhMAf95zF4W+qPDI/T7lQVecXGzVna1h4+jKe/H9FSPs/+V2mdR4saztdpOoPF0b6WudTO53bSjbt4djX9p3x3A0NQghcqmnAX/Z9DQAorajuUsbVI1j7zlzG//tufr7U0NyC3xWc6dM8Ou/kNHj4Gr32Gx60rvfnb5NHCD86k/zEn9ueivqfHx7FL2eN6fP89py6hF9sPQYAOPdyVp/n5wl/+PQMfpl7HEtnjsD879+kdnW6cCeM7frqosv/49p1Ov68j+w8fwjCvlBhb1C7Cn02/y9F+LikwqOPnXjo921HoobG9MPtqTEem29vahvkDeCexCM9XtTTJqK3vQtPDLt9HbuveaiTffF1lUfm40lv7my7O+VlP9g7cTZqyBFJyFdadRD+Pi5pe9Jz5yOd1y+Vu4t5qrLrxd7exP7tGQw9KvH0eOKNiwxNwZ55VlCoUXurWZUkD30kIu/Q2xdx3ojewpb2tkY60pdc44kVjXd0UW/8f1+eyDUcFuXG0OMjBnY1chODiVxOWO1IXpqLtTtP9V7YSwwGgzTrHXcO5cLQoxM6OP2uaGkVyD9eAVsdT0G5xMnB290Ldblx8I1n3isGALzycam6FdEpfx0qPVHvX+dxneLdWyrxxYXM/mrNP77C6zva9nK1creXr/nq82eO0Z4Qo/rfuyfTHWwSLSo2ffdF1jLjkR6d0PJeuKuDyl/2l3mnItSFKx+NTBsHNWm4K5OOONudtbxtcQdDjxf1tJHgBuTGxg62qF0FIuldf7RHL9cl+nIp9HDETAeL4IChR8f0trLKip8jaYE/PWDVFd46krHl0LeY8Mt/oOhrzz1hmvqOoUejuKEjrdHbYW6Sk6ceTtibhe8X40ptIx5/56B33oDcwtCjEl9sQLiRou4wTztPD6cnSF2tXIU0haFHo/QUWPS0LEQyMBgMMFzXcfVyTY+/YgD3DIYeL9LrOXDyPnfGN26UyJOu38jqZTzzdS/x97Cit51Whh6V+Hk/cIlMy0pE2nb9cOTuRp3jmn9i6PGiznveru4lsUNRO63sYetsh48IAMda2TD0qMQXhwzZmak3XEeoO92dkuHpU0d6O+0jC4YeL/LlHjr7HzmLQYecwfXEM7TajLJ+vgw9pClCCGk7oxq4t6o9Wl39tXKata88tcr7epxSq/X1doSPoUezXLwGqJvXuEEjT+L6RHqgj+hG7mLoIc3hxtVxr9qbe5SuzJtH4HxDC6v/9c/okZ3ejnbIjKGHSCe4nSJvknnD36dTe9xZ0BSGHi/q256xawOMXoYjHk0g0ia9XNNDrtHb587Qo2MMENQtrhc94qkdfeOnKzeGHs3ilom0RcYswF6oP/76maq1E6u305oMPUQa5+xYp6+hibRGbxs/X/HXkKVXDD2kKRwgvuNGQ7DtPMPfvyCSyBl6u1bHWQw9XiTnKkVEfaHVcUPWjSTpC0OPRnliZ1PGazCIiIhuhKHHi5g52nD/0DecXd/c3WPnNR2+odVW5ufvHn8/Xaq3nWeGHh9RY733x77m7wMEkV6wK3bQQuDj6UXPYOgh1fG5KD1jECQtkHmjK/Oy6w1Djxf1pZu4mgP8OTdwo05ERL7A0KNRruYA5gZ98d3HyRWHnKOFUzz+iD1MWxh6yOtcGSo5QBARkbcw9JDqeE0PaQnXR3KG3z8p3ckF0Ft3YOjRMa2srL31LV7T0zNPNw+bW9v48VC3VFox9DZeMPR4UV825q7+Z3cBR28rK5GvydqHNLK/pGlsI//E0EOaIutGxhO0cmSP+kYLH6MAb9MmfWLoIdXxGoqutBj++DERdXD6mh52HE1h6NEJLW4kncVrekhL1F4f2Ru0R+bb9fWW2Rh6NEpn6xlpFPMmUe94qk8/GHo0yhMXMvsjDi7uc3ZvlC1MzvBlIFZz/HL/C3h9i/3WMxh6yOt6Gzx5ztsz3Bm82fTaw4/EPzCE+CeGHlKd2tdQkHMYkIjI37kcenbv3o0f/vCHSEhIgMFgwJYtWxymCyGwYsUKDBw4EKGhocjIyMDJkycdyly5cgVz5syB2WxGZGQk5s2bh5qaGocyhw8fxp133omQkBAkJiZi9erVXeqyefNmjBgxAiEhIRgzZgy2bdvm6uL4jMybdW4siZynhbHCAHn6rawXKWthPVODy6GntrYW48aNw9q1a7udvnr1arz++ut4++23sX//fvTr1w+ZmZmor69XysyZMwclJSXIy8vD1q1bsXv3bjz55JPKdLvdjunTpyMpKQlFRUV45ZVXsGrVKvz+979XyuzduxezZ8/GvHnzcOjQIcyaNQuzZs3C0aNHXV0kTZL16Ieki92FW6eq3Bi82d6kBXLGDn+hr08nyNV/mDlzJmbOnNntNCEEXnvtNSxfvhz33nsvAODPf/4z4uLisGXLFjz00EM4fvw4tm/fjs8//xwTJ04EALzxxhu455578Ktf/QoJCQl499130djYiD/96U8wGo0YNWoUiouL8etf/1oJR2vWrMGMGTOwePFiAMAvfvEL5OXl4c0338Tbb7/tVmN4GjcozuE1PT3TynqklXp4m9rro1Z6gyyft7dvntDK50ltPHpNz9mzZ2G1WpGRkaG8ZrFYkJaWhsLCQgBAYWEhIiMjlcADABkZGQgICMD+/fuVMlOnToXRaFTKZGZmorS0FFevXlXKdH6f9jLt79OdhoYG2O12hx9Sn6xHtdTC9qbe+HoN4RrZO3Zbz/Bo6LFarQCAuLg4h9fj4uKUaVarFbGxsQ7Tg4KCEB0d7VCmu3l0fo8blWmf3p2cnBxYLBblJzEx0dVF9Bi19yaJANeu2+Aq6xvctmmPrNf96JFUd28tW7YMNptN+Tl//rxqdZFpb1uiRVWVO6GEn00v2D4+oWakYKCRi0dDT3x8PACgoqLC4fWKigplWnx8PCorKx2mNzc348qVKw5luptH5/e4UZn26d0xmUwwm80OP6Q+HvUi0h695r3rg7671/Rwh8E/eTT0pKSkID4+Hvn5+cprdrsd+/fvR3p6OgAgPT0dVVVVKCoqUsrs2LEDra2tSEtLU8rs3r0bTU1NSpm8vDwMHz4cUVFRSpnO79Nepv19ZOPPwUGmo17ucHZQZjPqg//2ZP3ik+L1w+XQU1NTg+LiYhQXFwNou3i5uLgYZWVlMBgMWLhwIX75y1/ib3/7G44cOYJ/+7d/Q0JCAmbNmgUAuOWWWzBjxgw88cQTOHDgAD777DMsWLAADz30EBISEgAADz/8MIxGI+bNm4eSkhK8//77WLNmDRYtWqTU45lnnsH27dvx6quv4sSJE1i1ahUOHjyIBQsW9L1VfMCfQwp5HwMMqam70UkvQ5anlkMv7SEbl29ZP3jwIO6++27l7/YgMnfuXKxfvx7PP/88amtr8eSTT6Kqqgp33HEHtm/fjpCQEOV/3n33XSxYsADTpk1DQEAAHnjgAbz++uvKdIvFgk8++QTZ2dmYMGECYmJisGLFCodn+dx2223YsGEDli9fjhdeeAE333wztmzZgtGjR7vVEP5OL0dLdLIYqnB2EBY3+L3X+UtyDELtvqTVLuDNZjEYDOz8PiZrc7sceu66664eBwWDwYCXXnoJL7300g3LREdHY8OGDT2+z9ixY/Hpp5/2WObHP/4xfvzjH/dcYT8l0/p4/VEvWTsjEfmeu2He1+MUT7F5hlR3b2mJ2nuTvsTDwETOY3fxLk9dyOw0fqCawtCjE/58jdD1AdCPF4VIt9gv5aS3z52hRyW+CClaOZbkykEtHsLtypsHBSU64Og3tPqRePWaHu/Nuut78UJmqTH0kOr8+SiVt7izffF6K0ryMXF9bFv/ZDoF35vurvth8/gnhh4fUWMA4dBN3XF7VeQgTzpw/frv7bsSOQ5rC0MPqY57lJ7BVtQHLWwk9fycnuv5yyl1DpOewdDjRVxJXcc2IyKt6Usw8vchTW9Zl6GHVMdrKHrm7KDpTiv6y16uWtRoH61+InrZIeFw00bWvs/Qo1F6GWBIR7ixkAqHIM9gt9EWhh7yut72KHhND5H26eUICYcb1+ituRh6dMwfV1Z/rLM3MAjKSyfZgkiTGHq8SNZzpq7id295iJO74p3XS7a19mjlI7l+bfLuF456b95qvpcnaWW98HcMPSrx037nFlm+nZv0gUfZ2rAVPMPfb9Tw79p3xdCjEl8MKP6ysvK7t3rm9EbYyxtrfizy6K4PytwvueOmHww9XsSO4jruZZPstDBq6Lkb6nnZqHcMPaQ6fz/8qxlutCM3AF1xffQ9f9xB5DWb/omhx4t66hT+18Xdx8GByHla6S16DcTuZFqOYfrB0KNRnjjN4y/dlKe0uvJqk7C5ifrM10enPD1OOjs7vR34ZOghTeH2uCu2CZHncB9Lbgw9PnJ9P2O/68BrKDyDragPUn6OUi40qYGhh0hivFahZ2ocFdDOJ+LDmmhnoZ3mbN/hPp22MPR4UU8Dpi/6AfsaEblDzxtqPS8b9Y6hR6P8cMfHI3i+Xbt4GlI2/LxJfxh6iDTO00GQuVLbpIwaPlxod/pTd3dq+f7uLZ++nW4x9GiUJ7oT+wj1hgMpdYfrRe+cvqbHy/Ug1zD0EBFpiHbyhnZq4s/YitrC0ONFfVnZpe0o0i44EfkCL01rI+tQy9BDpBPeHsy5rSBv8eW6df2pO2dO5fHRDvrB0ENEpCEMl/ri75+nP34ZbE8YenRMX6uqzNT5zh1ShxY+HoNBnvWEp7vkwtCjEpk6miuDJw8jt9HiBkeDVSIicglDjxf19K24vW7UXNzCdBeiuJEiIn/gy51AmXY4qSuGHiKJuXJEidsK0gN3LmTW23UtMmPo0Qktng4h8ndqdCutbl45xriHX9+iLQw9WqWjfuJKn+fA2hXbRC78uH3L25mkp8sc/IHeMhtDjxd1XtWvX+97XZH8u5+QhvFiceqNn2+nyQn+HsbcxdCjE3pL4+Q6XndA5Dp3t/2+zgySZhSPY+hRiS9WYG4C/Zevjsa48j4M1r6hhWY2GHiwubO+9Ede06MtDD06ppVBy7Xn9BDJTat9gKdFHTHL+CeGHiKiTrgtkwvDi1wYeryopyMcvXU07lNRO64LvsX29j1/vB6N19j4J4YeIsl0Hqw5cJPsvN0H/C/O6RtDD2mKrLdR+gMO3r6hlXZ258nFeqWFI1G8psozGHpU0tsAon4XIzXJvIGRHT96/+DstUD8PLWFoUej2FHIVV5/sqx3Z08awy8B7Zm/75j4e/3dxdDjVTdeq/yxk5M6vDk4STruOY2nW/XP22Oxvw/1/l7/6zH06IQWzjl7AjcxJDut9GRZ8p4sy0ltGHp0ghe5kbdpZWOsd1rtyVqtly9wfNUPhh7SHO55eRebl7SGgbp3HBc9g6FHo3gtARHJQs+jHa/f1BaGHi9yeAicrru15wjBQQJw3Ah4c91huKYb4ZhFgP6+MJWhRyf8+UJmbnhJS/y3J3kX+ynpAUMPkU54e2Ottz0+0g6tr1n+vFPZV3oLuww9REREJAWGHpV4er9BL+ff9bIcWtZ5z42t3RXbpG180tkOvt/jx+EZDD1e1NNKyhW4A9uCiIh8gaFHJ2Q+56xHDkdjmArJx7pb5by5GvJ6Me3S22fD0ENEROQ1+goN/o6hR6Ok3bmXdsHVwaNIPWPzEMBrDfWEoUclzP4duOEl0h699ku93IKtl+XwNYYeL1J9nVT7/d2kervpHNuX3MH1Rl0MOZ7B0KMSrr7kLGfHOp1db0gS8eWq6+sLc9kvtcXjoWfVqlUwGAwOPyNGjFCm19fXIzs7G/3790d4eDgeeOABVFRUOMyjrKwMWVlZCAsLQ2xsLBYvXozm5maHMrt27cKtt94Kk8mE1NRUrF+/3tOL4v/8sLMJcJDwLefjNz8WefCzdtSXu2N5gEZbvHKkZ9SoUSgvL1d+9uzZo0x79tln8fe//x2bN29GQUEBLly4gPvvv1+Z3tLSgqysLDQ2NmLv3r145513sH79eqxYsUIpc/bsWWRlZeHuu+9GcXExFi5ciMcffxwff/yxNxbHKziodOBFgl2xRUhtvuyXXN9d19cwJWsYC/LKTIOCEB8f3+V1m82GP/7xj9iwYQN+8IMfAADWrVuHW265Bfv27cOUKVPwySef4NixY/jHP/6BuLg4jB8/Hr/4xS+wZMkSrFq1CkajEW+//TZSUlLw6quvAgBuueUW7NmzB7/5zW+QmZnpjUUiIiId4LUxcvPKkZ6TJ08iISEBQ4cOxZw5c1BWVgYAKCoqQlNTEzIyMpSyI0aMwJAhQ1BYWAgAKCwsxJgxYxAXF6eUyczMhN1uR0lJiVKm8zzay7TPQysc9pTYz8jL9PYQMdIa7w1iel5z2S21xeNHetLS0rB+/XoMHz4c5eXlePHFF3HnnXfi6NGjsFqtMBqNiIyMdPifuLg4WK1WAIDVanUIPO3T26f1VMZut6Ourg6hoaHd1q2hoQENDQ3K33a7vU/LqiV66VjcCXMf92CJeuevOwfs3Z7h8dAzc+ZM5fexY8ciLS0NSUlJ2LRp0w3DiK/k5OTgxRdfVLUOzvLI9ou9RBe8eW2FS+uZf24ryA0C3AHROn487vH6LeuRkZEYNmwYTp06hfj4eDQ2NqKqqsqhTEVFhXINUHx8fJe7udr/7q2M2WzuMVgtW7YMNptN+Tl//nxfF08z/HqA8ue6k+7wiJlcnPm0ebOFfng99NTU1OD06dMYOHAgJkyYgODgYOTn5yvTS0tLUVZWhvT0dABAeno6jhw5gsrKSqVMXl4ezGYzRo4cqZTpPI/2Mu3zuBGTyQSz2ezwQ6QX/nrYnvyDV7OgD1ddhlq5eTz0/Md//AcKCgpw7tw57N27F/fddx8CAwMxe/ZsWCwWzJs3D4sWLcLOnTtRVFSERx99FOnp6ZgyZQoAYPr06Rg5ciQeeeQRfPnll/j444+xfPlyZGdnw2QyAQDmz5+PM2fO4Pnnn8eJEyfw29/+Fps2bcKzzz7r6cXpE9X7lh9uA7lH1Ub1dYek5odDh9u8vawytaU/8Pg1Pd988w1mz56Ny5cvY8CAAbjjjjuwb98+DBgwAADwm9/8BgEBAXjggQfQ0NCAzMxM/Pa3v1X+PzAwEFu3bsVTTz2F9PR09OvXD3PnzsVLL72klElJSUFubi6effZZrFmzBoMHD8Yf/vAHqW9X504+uYPZqmcMn/rDI6Jy83jo2bhxY4/TQ0JCsHbtWqxdu/aGZZKSkrBt27Ye53PXXXfh0KFDbtWRtIXblZ55esPLDTmRa/r0RGYP1sNhvkKgL8eRnD2qrreMyO/eIiIiVflyu3r9NT3+sg/AnRXPYOjRM410ElcuHGTHJtImdk336OxAid9j6PEibsBJ67iOkuwYSuTC0KNRMt3FxA1vd7TXKH25roH8j177pV4uZNbpx+N1DD1ERKQqfwwifN6Pf2LoUYlPOrlGxhFXlpXDCJE2eXMb78sA4c6FzH058u6Hec6B3o7wMvSohHsJpBaZTp0S6Qf7rScw9HhR542Lq6urTJmIG2H1MHzTjcjSL909jqH2KTl2Xfcw9OhEt91PI52CG1Z98PfD9ESAe2Glu1M8soxregu/DD0q6a3judov9bJayjKQeANDiWdwDfQ9Xx418deHE3qarEMtQw+RBnUekJwdnLw9iMk6SFIb7pDIiRcyE3kYx1LPc3YDxabvmd4O7buC/VLbZF43+4Khx4t6GjR62yhxwCFv4bpF1MFfjmOw33oGQ49OdNtx/aU3d8KO7T5vXxbBa4bk4es7k3z5dj5fNn8ciHWMoUclat/uSETkCr3sj/BCZrkx9OgZe7MuuHPuXisXP5N/EoJXjJA+MfRolMsPM/RKLXzDn+tORPqnxQjoqx0WvZ2UYOgh0iB3xjNeO0D+Ss0119vvrbfQ4O8YejTK1X7CfkXOcncHkesY6ZEz/aEvOxSeOiLj6QM72jt25RsMPUQ64da1P9IOfeQqr37LuvdmTeSAoYdUxye9eh5blPqK/bJnbB7/xNCjUR7pT354PoIDSVfOtgmv6SFP8f2zbPTL36/p8fPqd8HQ40XufH+SZyugwnt2g0GGiLTK3Y26v4cZZ+lt+GboIdIgLQZFaQZ5Dba9Fuj1+i99LhXdCEOPSiTZfjjl+kGHG52+6+l6DIdpLrS1lJ+LjMv8HYkX3Sm+7g/Xv5+U/dEDGHpU0uv6KukKrde9SV+Q5UgM6Y+ev5ZHv0vmnxh6SHN0PP65hTGQfI13bjnizph+MPR4ETtKG4YYItIbZ8c1rW4FnA22ehu+GXpU0uuK5OKa5tfBQqujgh9ztkldaXq/XsfIZV22iRL3Uy0+DoI71e5h6NEqHa3Prhwp51F1IvXxOT29c/75WaQlDD06wbCgL97ci+OqQuR/eGTHMxh6VMLVl5zFi0qJiDyDoceLuK1yDvdgPM/ZdY/rKN2Y6OEvUvsaN1/1XbWX09MYelTi6fVILysmB1b3eXsV0OLFnERq4Q6Df2Lo8YLpvylA8tJcXKxucHsePPpBRLJo0XGC0PODF/1RkNoV0KOvKmoAACv/VqK8tib/JGaNT1D+zj9RifqmFgQFGNAq2o7UnCivRkRIEAwGoKmlYxC4UtuI6vomRIYaYQgAIkxBKLlgdwhVpypr0NzSiqq6JuW1y7UNqG1oRnV9Myyhwbhc2wCDwYB4cwjKrlyDra4JMeFG5B4uR3JMP/xgRCzKq+qV/z93qRZ//eIbJEaHwRIajJsGhKOlVeCrimpYQoNhCg6AKSgQzS2tsNc3IzI0GAYDEBIciI9LrACA9KH90dTSqszzYnUDSi7YcHtqDI6X23GtsQUl39qV6V+er8KJ8mrl73WfncXw+AhEhhoRbgrCpdoG1De2oLK6AcGBAdhz6iLuGh6L6vpm/GBELM5fuYYLVXUYPciCgZYQGAwG1De14NzlWpR8a8fdI2LRzxTo8Hk9t+lLPP2DVOwqrYStrhmjB5kxISlKmf5tVR2stnoct9oxZpAFV2sbERQYgMs1be0fERKM+qYWxFtC8M3VOrQKgYTIUMSbQ3Duci2+vlyL21NjcLKiBuevXMPoQRaEGQMRagxEU4uAOSQIVns96hpb0M8UhJCgQDR3+vyPfGtDqxAYPcgCIYC9py9hzKBInKysRnL/fojuZ4StrgnHyjvasaG5BUe+tSE2wgRTUADqm1o6ludqnfJ72+cbCmNQABqa2j6nsivXkNQ/DA3NrRga008pW1ldjz0nL2F4fASCAw0wGAwIMwYiKMCAqmtNaBUCocZAhAYH4sylWuw9fRmTk6NRduUaBkWG4lpjM0KCA5HUPwyNza1oFcCxcjsSo0LR0ioQFBiAgZYQFH19FROTo3CxugGDIkNRfL4KAQYDjEFt0z8svoCIkCAkRIYizBiIwVFh+OLrq2hubcXNcRFoamnFgHATzKHBuFjdgC/KrqKfMQhjBlvwzdU6VNrrca2xBYfKruInkxIxcqAZDc2t+MOnZ9DUInBTbLiyzAe/vooKez3GDLJgcFQY9p+9jAHhJjS3CtQ2NCNzVDzOXa5FdX0zSiuqcdOAcPQztdVp/5nLmJQSDVNQAIQATl+sUfr494cPgL2uGecu16J/PyOS+veDEAKtAqhv7Pis7PVNaGhqxZmLNcp6U3WtCaHGQHzx9VVsOngez/7TMPQzBWHLoW87/q+uGScrqjE4KgznLteiwl6P1NhwpU2t9nqcsFZj1vhBDv2z3f4zlxEVZnR47Ze5x/HQpET8y4TBCDAYEBIcgBPWauQeLkfmqHiMSjCjsaUVtQ3NCP2unl9VVKPo66vIGjsQ9rpmDB3QDycranChqg7D4yNw/so1BAcGoOpak8N7Xa1txMnKGsSZTSg+X4WbYyNwc1w4hABsdW3rWnV9E5L790O5rR71TW19Z6AlBA3Nrdh6uBy3p/bvslxtbdrs8PflmgbUN7XAXteEEGMgauqbERTYFlTKLl/D+avXcOZSTZf5vJr3FVJjwzFlaH9YQoNxqaYBNQ3NKLfV46uKjjHs26o6vHegDDfHhuNSTSMGWkIQZgxEnCUEZy7W4sDZy7hloBmTkqPR0NSKb6vqEBNuhDGobXytbmjCp19dQj+T4+b6ck0jzl6qhSU0GLFmEyrtDTh1sQbjBkfiQlUdTEEBuNbYgkFRoTh47iqGDujn8P//88U3+JcJiQgNDkRTSyuaWwXe3fc1Hpgw2KHsmYu1+PJ8FUYlmHGlthG7vrqICUlRMAYG4GJNAyJDg2EODe7SPheq6tDY3IrkmH5dpqnJICS+StJut8NiscBms8FsNntsvslLcz02LyIiIn/21S9nwhjk2RNL7m6/eXqLiIiIvOZkZXXvhXyEoccL/uep25TfJ393ePv21P64e/gAAMAdqTHd/l9y/zCHvx+/IwUBNzgdHG4KQky4CT+/5xaYQ5w7Sxlu6r7co7cnK79bQoMRGGDAiPgIAMD3hw1wOC0HAPd/b5BDeQAIDXY8ZQQAgyJDu32/zok/IiQI0f2MGJVgxnP/NMyhnCU0GPeMicc/jx2I9KH98fQPUvHCPSPwr1OGYHhcBObdkYIfjUu4fvaI7mfs8lpPHk4bAgCYMSoePxqXgNmT2/4eGtMPizOHK/VNS4mGJTQYN3c6/eGKBEuIw2HjOLOp23JDnTgcfN/3BuHl+8d0O21ScpTD349MSQIATE6OxrjESIwcaMa4xMhuPx/Td8saHGjAD0bEwhQUgJjwjnqOHmTG7an90b+XNh4a0w+33dR2emHm6HgMj4tQ2hkAUjotY4QpCJOSo5R1PbWb9p02IlZZJ2PCTRga089hOW8Z2LGn1/n1iUlRmJQchZDgrkNd+tDuT38MiDAp6wAAPP2DVPQzOq7f/YyBGBbXVs9nM4ZhUnKUw/IBgDGw7T0t3Rz6B4BRCWaHvtSbycnRDn9bQoNx/63O/X/ny0ra+2qYsWufjQk34fE7UgC0fdYA8K9ThuDWIZG4d3wCXv3xOIf1oV17+1+/Tt0cG44/PzbZ4bVBkaGYPTkRd6TGwBgYgJ9MHKx8ZiMHdt1jN4cEdXuUIDaiox7tbd1dncYNtijrTuflHjPIgoxbYrv8nzOS+odh5uh4/Mf0jjFrfGIk4s0h+OltycpYe8tAs8O6tzDjZowZZHGY1/XrQHfj+V3fbTuAtu3EtBFt9f6nkXFKm/fvZ1Rebxf83Wm69m1PP2MgfvfIBKTE9MNNA/o5tMuDExPx09uSlb8jw4Jx04CuY9G/TBisTL++X7X3qc6f15qHxmNUguMyq4mnt7xweouIiIi8h6e3iIiIiHrA0ENERERSYOghIiIiKTD0EBERkRQYeoiIiEgKDD1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICl2/w14i7V8wb7fbVa4JEREROat9u92+HXeW1KGnuroaAJCYmKhyTYiIiMhV1dXVsFgsTpc3CFdjko60trbiwoULiIiIgMFg8Nh87XY7EhMTcf78eZjNZo/NlxyxnX2Hbe0bbGffYDv7hjfbWQiB6upqJCQkICDA+St1pD7SExAQgMGDB3tt/mazmR3KB9jOvsO29g22s2+wnX3DW+3syhGedryQmYiIiKTA0ENERERSYOjxApPJhJUrV8JkMqldFV1jO/sO29o32M6+wXb2DS22s9QXMhMREZE8eKSHiIiIpMDQQ0RERFJg6CEiIiIpMPQQERGRFBh6vGDt2rVITk5GSEgI0tLScODAAbWrpJrdu3fjhz/8IRISEmAwGLBlyxaH6UIIrFixAgMHDkRoaCgyMjJw8uRJhzJXrlzBnDlzYDabERkZiXnz5qGmpsahzOHDh3HnnXciJCQEiYmJWL16dZe6bN68GSNGjEBISAjGjBmDbdu2uVwXLcrJycGkSZMQERGB2NhYzJo1C6WlpQ5l6uvrkZ2djf79+yM8PBwPPPAAKioqHMqUlZUhKysLYWFhiI2NxeLFi9Hc3OxQZteuXbj11lthMpmQmpqK9evXd6lPb+u/M3XRqrfeegtjx45VHraWnp6Ojz76SJnOdva8l19+GQaDAQsXLlReYzt7xqpVq2AwGBx+RowYoUzXZTsL8qiNGzcKo9Eo/vSnP4mSkhLxxBNPiMjISFFRUaF21VSxbds28fOf/1z89a9/FQDEBx984DD95ZdfFhaLRWzZskV8+eWX4kc/+pFISUkRdXV1SpkZM2aIcePGiX379olPP/1UpKamitmzZyvTbTabiIuLE3PmzBFHjx4V7733nggNDRW/+93vlDKfffaZCAwMFKtXrxbHjh0Ty5cvF8HBweLIkSMu1UWLMjMzxbp168TRo0dFcXGxuOeee8SQIUNETU2NUmb+/PkiMTFR5Ofni4MHD4opU6aI2267TZne3NwsRo8eLTIyMsShQ4fEtm3bRExMjFi2bJlS5syZMyIsLEwsWrRIHDt2TLzxxhsiMDBQbN++XSnjzPrfW1207G9/+5vIzc0VX331lSgtLRUvvPCCCA4OFkePHhVCsJ097cCBAyI5OVmMHTtWPPPMM8rrbGfPWLlypRg1apQoLy9Xfi5evKhM12M7M/R42OTJk0V2drbyd0tLi0hISBA5OTkq1kobrg89ra2tIj4+XrzyyivKa1VVVcJkMon33ntPCCHEsWPHBADx+eefK2U++ugjYTAYxLfffiuEEOK3v/2tiIqKEg0NDUqZJUuWiOHDhyt//+QnPxFZWVkO9UlLSxP//u//7nRd/EVlZaUAIAoKCoQQbcsRHBwsNm/erJQ5fvy4ACAKCwuFEG3hNCAgQFitVqXMW2+9Jcxms9Kuzz//vBg1apTDez344IMiMzNT+bu39d+ZuvibqKgo8Yc//IHt7GHV1dXi5ptvFnl5eeL73/++EnrYzp6zcuVKMW7cuG6n6bWdeXrLgxobG1FUVISMjAzltYCAAGRkZKCwsFDFmmnT2bNnYbVaHdrLYrEgLS1Naa/CwkJERkZi4sSJSpmMjAwEBARg//79SpmpU6fCaDQqZTIzM1FaWoqrV68qZTq/T3uZ9vdxpi7+wmazAQCio6MBAEVFRWhqanJYthEjRmDIkCEO7TxmzBjExcUpZTIzM2G321FSUqKU6akNnVn/namLv2hpacHGjRtRW1uL9PR0trOHZWdnIysrq0tbsJ096+TJk0hISMDQoUMxZ84clJWVAdBvOzP0eNClS5fQ0tLisAIAQFxcHKxWq0q10q72NumpvaxWK2JjYx2mBwUFITo62qFMd/Po/B43KtN5em918Qetra1YuHAhbr/9dowePRpA27IZjUZERkY6lL1++d1tQ7vdjrq6OqfWf2fqonVHjhxBeHg4TCYT5s+fjw8++AAjR45kO3vQxo0b8cUXXyAnJ6fLNLaz56SlpWH9+vXYvn073nrrLZw9exZ33nknqqurddvOUn/LOpHeZGdn4+jRo9izZ4/aVdGt4cOHo7i4GDabDf/93/+NuXPnoqCgQO1q6cb58+fxzDPPIC8vDyEhIWpXR9dmzpyp/D527FikpaUhKSkJmzZtQmhoqIo18x4e6fGgmJgYBAYGdrmivKKiAvHx8SrVSrva26Sn9oqPj0dlZaXD9ObmZly5csWhTHfz6PweNyrTeXpvddG6BQsWYOvWrdi5cycGDx6svB4fH4/GxkZUVVU5lL9++d1tQ7PZjNDQUKfWf2fqonVGoxGpqamYMGECcnJyMG7cOKxZs4bt7CFFRUWorKzErbfeiqCgIAQFBaGgoACvv/46goKCEBcXx3b2ksjISAwbNgynTp3S7frM0ONBRqMREyZMQH5+vvJaa2sr8vPzkZ6ermLNtCklJQXx8fEO7WW327F//36lvdLT01FVVYWioiKlzI4dO9Da2oq0tDSlzO7du9HU1KSUycvLw/DhwxEVFaWU6fw+7WXa38eZumiVEAILFizABx98gB07diAlJcVh+oQJExAcHOywbKWlpSgrK3No5yNHjjgEzLy8PJjNZowcOVIp01MbOrP+O1MXf9Pa2oqGhga2s4dMmzYNR44cQXFxsfIzceJEzJkzR/md7ewdNTU1OH36NAYOHKjf9dmly56pVxs3bhQmk0msX79eHDt2TDz55JMiMjLS4ep2mVRXV4tDhw6JQ4cOCQDi17/+tTh06JD4+uuvhRBtt4lHRkaKDz/8UBw+fFjce++93d6y/r3vfU/s379f7NmzR9x8880Ot6xXVVWJuLg48cgjj4ijR4+KjRs3irCwsC63rAcFBYlf/epX4vjx42LlypXd3rLeW1206KmnnhIWi0Xs2rXL4dbTa9euKWXmz58vhgwZInbs2CEOHjwo0tPTRXp6ujK9/dbT6dOni+LiYrF9+3YxYMCAbm89Xbx4sTh+/LhYu3Ztt7ee9rb+91YXLVu6dKkoKCgQZ8+eFYcPHxZLly4VBoNBfPLJJ0IItrO3dL57Swi2s6c899xzYteuXeLs2bPis88+ExkZGSImJkZUVlYKIfTZzgw9XvDGG2+IIUOGCKPRKCZPniz27dundpVUs3PnTgGgy8/cuXOFEG23iv/nf/6niIuLEyaTSUybNk2UlpY6zOPy5cti9uzZIjw8XJjNZvHoo4+K6upqhzJffvmluOOOO4TJZBKDBg0SL7/8cpe6bNq0SQwbNkwYjUYxatQokZub6zDdmbpoUXftC0CsW7dOKVNXVyd+9rOfiaioKBEWFibuu+8+UV5e7jCfc+fOiZkzZ4rQ0FARExMjnnvuOdHU1ORQZufOnWL8+PHCaDSKoUOHOrxHu97Wf2fqolWPPfaYSEpKEkajUQwYMEBMmzZNCTxCsJ295frQw3b2jAcffFAMHDhQGI1GMWjQIPHggw+KU6dOKdP12M4GIYRw7dgQERERkf/hNT1EREQkBYYeIiIikgJDDxEREUmBoYeIiIikwNBDREREUmDoISIiIikw9BAREZEUGHqIiIhICgw9REREJAWGHiIiIpICQw8RERFJgaGHiIiIpPD/AashMZEKJA13AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Create a long sequence of transaction masses with periods with highly unusual mass clusters\n", + "N = 500_000\n", + "seq = generate_seq(N, 50, 40)\n", + "\n", + "# Approach 1 - calculate the current average\n", + "\n", + "# Requires a removal strategy for having a meaningful \"current\"\n", + "R = 0.8\n", + "# At each time step, remove the first element with probability 1 - R\n", + "removals = np.random.choice([0, 1], size=N, p=[R, 1 - R])\n", + "# After const steps, remove with probability 1, so that we simulate a mempool with nearly const size\n", + "removals[256:] = 1\n", + "j = 0\n", + "y = []\n", + "for i in range(1, N+1):\n", + " y.append(np.sum(seq[j:i])/(i-j))\n", + " if removals[i-1] == 1:\n", + " j += 1\n", + "\n", + "x = np.arange(0, N)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjAAAAGvCAYAAABFKe9kAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABriElEQVR4nO3deXwU5f0H8M/m5shBgCRgwi2XyI2QIhQUQQQrirUetR5Ya5uoiK1K7c+rVagHautVK0IpIoiCFlAgcgSVIBgJEITIaYCQcGYDAXL//khm99llZ3dmdmZndvfzfr14vZbNZPJkk+x853m+z/dra2hoaAARERFREIkwewBEREREajGAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoMIAhIiKioMMAhoiIiIIOAxgiIiIKOlFmD8Ao9fX1KCkpQXx8PGw2m9nDISIiIgUaGhpw5swZtG/fHhER8vMsIRvAlJSUICMjw+xhEBERkQaHDh1Cenq67MdDNoCJj48H0PgCJCQkmDwaIiIiUqKiogIZGRmO67ickA1gpGWjhIQEBjBERERBxlf6B5N4iYiIKOgwgCEiIqKgwwCGiIiIgg4DGCIiIgo6DGCIiIgo6DCAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoMIAhIiKioMMAhoiIiIIOAxgiIiIKOgxgiMgwr3+5B+99td/sYRBRCArZbtREZK6yigt49csfAQC/yeyEmCjeLxGRfviOQkSGqG9ocDw+X1Nn4kiIKBQxgCEiQ8REOt9ezlczgCEifTGAISJD2Gw2x+Nz1bUmjoSIQhEDGCIy3DnOwBCRzhjAEJHhmANDRHpjAENEhjtwotLsIRBRiGEAQ0SG23643OwhEFGIYQBDRIbrl55k9hCIKMQwgCEiw1XV1ps9BCIKMQxgiMhwrANDRHpjAENEhuM2aiLSGwMYIjLcuRoWsiMifTGAISLDcQmJiPTGAIaIDMclJCLSGwMYIjLcx/mHzR4CEYUYVQHMjBkzMGTIEMTHxyMlJQWTJk1CUVGRyzHvvvsuRo0ahYSEBNhsNpSXl190nlOnTuGOO+5AQkICkpKSMGXKFJw9e9blmO3bt2PEiBGIi4tDRkYGXnzxRfXfHREREYUkVQFMbm4usrKysGnTJuTk5KCmpgZjx45FZaWzTPi5c+dw7bXX4s9//rPsee644w7s3LkTOTk5WL58OTZs2ID777/f8fGKigqMHTsWHTt2RH5+Pl566SU888wzePfddzV8i0Rktis6JZs9BCIKMVFqDl65cqXL/+fOnYuUlBTk5+dj5MiRAICpU6cCANavX+/xHLt27cLKlSuxZcsWDB48GADwz3/+E9dddx1efvlltG/fHh988AGqq6vx/vvvIyYmBpdddhkKCgowa9Ysl0CHiIIDdyERkd78yoGx2+0AgORk5XdXeXl5SEpKcgQvADBmzBhERETg22+/dRwzcuRIxMTEOI4ZN24cioqKcPr0aY/nraqqQkVFhcs/IrKGc1VM4iUifWkOYOrr6zF16lQMHz4cffr0Ufx5paWlSElJcXkuKioKycnJKC0tdRyTmprqcoz0f+kYdzNmzEBiYqLjX0ZGhppvh4gMVFnNGRgi0pfmACYrKwuFhYVYuHChnuPRbPr06bDb7Y5/hw4dMntIRNSE26iJSG+qcmAk2dnZjuTb9PR0VZ+blpaGY8eOuTxXW1uLU6dOIS0tzXFMWVmZyzHS/6Vj3MXGxiI2NlbVWIgoMM5V16GhoQE2m83soRBRiFA1A9PQ0IDs7GwsXboUa9euRefOnVV/wczMTJSXlyM/P9/x3Nq1a1FfX4+hQ4c6jtmwYQNqamocx+Tk5KBHjx5o1aqV6q9J4am6th5bi0+jrr7B7KGEvbr6BlTXsSM1EelHVQCTlZWF+fPnY8GCBYiPj0dpaSlKS0tx/vx5xzGlpaUoKCjA3r17AQA7duxAQUEBTp06BQDo1asXrr32Wvz2t7/F5s2b8c033yA7Oxu33nor2rdvDwC4/fbbERMTgylTpmDnzp1YtGgRXn/9dUybNk2v75vCwKOLt+HGtzbijbV7zR4Kge0EiEhfqgKYt99+G3a7HaNGjUK7du0c/xYtWuQ45p133sGAAQPw29/+FgAwcuRIDBgwAP/73/8cx3zwwQfo2bMnrr76alx33XW48sorXWq8JCYmYvXq1Thw4AAGDRqERx99FE899RS3UJMqy7aVAADeXM8AxgoqGcAQkY5sDQ0NITm/XlFRgcTERNjtdiQkJJg9HDJBpydWOB4fnDnBxJGEp1OV1Rj41xzH/794eAR6tePfIhF5p/T6zV5IRBQQP52s9H0QEZFCDGCIiIgo6DCAIaKAiI7k2w0R6YfvKEQUEKcqq80eAhGFEAYwRBQQe4+dNXsIRBRCGMBQyLokqZnZQyBB23hWyiYi/TCAoZDVIjbS7CGEDD2qLbCQHRHpiQEMhazmMZpafZGbyqpaXPVKLp5cusOv85xkDgwR6YgBDIUszsDoY9XOUhw4UYkPvi326zwL/Px8IiIRAxgKWc2inTMwNWwkqFlslD6BYNeUlrqch4gIYABDIaylMANTWVVr4kiCW3PhdfSns/c+7kIiIh0xgKGQJRZOO8sARrOWsc6ZrMpq7a9jNWfBiEhHDGAoLJSUXzB7CEErRggEOZNFRFbBAIbCQlSkzewhBC2b8NIxgCEiq2AAQ2Hh7AVeePVwtoq1XIjIGhjAUFhgDow+OANDRFbBAIbCwpkLNWYPIST4GwjqUdGXiAhgAENh4gyXkHTh7wzM+RouQRGRPhjAUFhgAKOPSj/7GeX/dFqnkRBRuGMAQ2GBAYw+/J2B2X+8UqeREFG4YwBDYeFsFXNg9OBvALO7tEKnkRBRuGMAQ2GBMzD68DeJt57FeIlIJwxgKCxwG7U+/J2B+Vm31jqNhIjCHQMYCgsVnIHRRaXGQnaZXRi4EJG+GMBQWDjLOjC60DqTFRvd+FbDQJKI9MIAhsJCuOXA7D12Fou/O6R74TitS0jri44DAP5XcETP4RBRGIsyewBEgRBuOTBjZuUCACJsNkwelK7bebW+jpe1T8DOkgqkJTbTbSxEFN44A0Nh4Vx1HWrrwm8LTN7+k7qer7JaWwDz8+5tAQCtW8ToORwiCmMMYChsaE1ADWZ6d+HW+hq2jGuc7A23mTAiMg4DGAobFWGYyKt1xkSO1gAkPrYpgAmzXCQiMg4DGAob4Xj3r3fycnVtPWo0LMVJMzBnWBE54I6fqcLWYvagotDDAIbCRrjtRAKAgkPlup/znIZlpPjYaACcgTHDz2auwY1vbWQjTQo5DGAobLAfkj6On72g+nOcMzChHcDU1Tdg19EK1Nfru33dHzV1jWNZX3TM5JEQ6YsBDIWNcJyBMYKWzVwtm3JgQv1n8OTSHRj/+ld4Z8M+s4dykVB/7Sn8MIChsME3cH1omcmKb5qB8beXktUt3HIIAPDiyiKTR3Ix/v5TqGEAQ2GDb+D68PQ6+qr4K83AnKuuQ52FllfCCZdQKdQwgKGwwTdwfRw7U+Xy/zfX7cWwGWtw+PQ52c9pEess+s1EXnOs2llm9hCIdMUAhkJedKQNAGdg9LJut2sy6EurilBWUYUZn+92eb7ivDNgjI1yvtWcrHQNgIiItGAAQyEvPi78tvDGxxrX5qxnWoLH58vPV7v8f9TL6z0eV1Ubfi0drKBT6+ZmD4FIVwxgKORJ+RcVYRTASNuWAeiecyJXyM7XDJd0AQ31RF6rCsdCjhTaGMBQyIt39OEJnxyYeCGA0fvCdUamJYO4ZORJedPHQ6Wlw0ffHcJXe46bPQzFwimAp/DAAIZCXrjUIBHFCDkn9nP6Bgxyr6Ov17e8aRwb9+rbIdsMe4+dxWMfb8edszebPRTFqrl0RyGGAQyFPEcOTJhOoR87o75yrjdyd/K+ZlZS4mMBABnJwZ+LYRdmm2qFJbWk5tGOx1aqxksUihjAUMhLiAu/GRiR3jkwcktIUsl6OZldWzcdF/wzAQnCEp34e9UjNd7xuMR+PqBjIgo3DGAo5JyqrMbrX+7B4dONFxApoTWcdiGJ9A7cSiu0zehIS3mhMBMWFel86xRf3w7C7JLNZtP1ax61n8es1UU4pvL1F5cTiUKJcXstidBYoXXuxoPonhqP4d3aBORr/uXTHfh8R6nj/1JCa3VdPS7U1CEuOjIg47CKMzonL/90Ur5gnTehmoskLp2JMcs5nQO13/03H9sP27Gu6DiWPXil4s9LiIvCibONW9zD8fefQhdDczLUdz+dxrPLfsAd730bsK+Z/9Npl/83j4lyXFhC4e5fLaNnnlrEKLsgJjRrzA+RW4IKVnK5P+eq6zSdb93uY/h6z4mLnt9+2A4A2HHErup8YhXkUAseKbwxgCFDnTwb+KqrCXHRLv+PsNnQMiY07/6V0Gv7bFSEc3pBzKuRAhNf4kM0F6lMZklHbaABNAZD98zdgl/P/hYXalwDoAiNK1IRwrRQqAWPFN4YwJChmsc47/4CtY3T0wW1pePiGX5v4Mu2lehyHnGHjTirIwaM7hddUagGMF/v8bwt/OTZao/Pe1MrJELb3erqtNChunKovfYU3hjAkKHEi16gCpgleghg4sM4kXd36RldzhMb5VwqEtsGtIh1Pu/tAtmsKffi670XL48EM7mgOCpS/ZRJpDBbcqzCdfZSj+Dj++LTvg8iChIMYMhQrtPXgQkePAUw4dhOQNJcYY6KEtJrW1nlnGkRf8begtTzXmZngpncLh9/k2Wral1fLz1+jqH6M6DwxACGAiZQyzdijQ6JNDN/+pz6af1gN7hTsm7natMyBoB8oOKtnUCXNi11G4eVHDxZ6fF5f2c7yt0qKPdNT/TrfAAQHcG3fAodqn6bZ8yYgSFDhiA+Ph4pKSmYNGkSioqKXI65cOECsrKy0Lp1a7Rs2RKTJ09GWVmZyzFbtmzB1VdfjaSkJLRq1Qrjxo3Dtm3bXI7Zvn07RowYgbi4OGRkZODFF1/U+C2SVew7fjYgX8fTDMy2Q+UAgB9KKgIyBivZ8KN+/XoONm2hLrV7TlwVZ7jaJcYBACb0bQebzYbUhMb/R0bY0NAQOlVqW8rkpqzYftSv87rnwLgnp2tRF0KvO5GqACY3NxdZWVnYtGkTcnJyUFNTg7Fjx6Ky0nkH8sgjj2DZsmVYvHgxcnNzUVJSgptuusnx8bNnz+Laa69Fhw4d8O233+Lrr79GfHw8xo0bh5qapmZvFRUYO3YsOnbsiPz8fLz00kt45pln8O677+r0bZMZzlcHJok31svUvfsWa1JH2n20cZ/nPBZPMzAPjOwKwBlY1tU3hNR29k37TxlyXvcARgyU3JeXlNrKHBgKIarS2leuXOny/7lz5yIlJQX5+fkYOXIk7HY7Zs+ejQULFuCqq64CAMyZMwe9evXCpk2bMGzYMOzevRunTp3Cc889h4yMDADA008/jb59++Knn35Ct27d8MEHH6C6uhrvv/8+YmJicNlll6GgoACzZs3C/fffr9O3ToFWrfFNVw/dUlpi77GzCMf7T38Kwp6rrnVpETC0czK+PXAKl6bEezzeW55TXHQEoiNtqKlrQMWFWkePqmAX72HJEnD2ftLqohkYYWbxzIVaxLZUnxOzameZ74OIgoRfC6J2e2Odg+TkxjX2/Px81NTUYMyYMY5jevbsiQ4dOiAvLw8A0KNHD7Ru3RqzZ89GdXU1zp8/j9mzZ6NXr17o1KkTACAvLw8jR45ETEyM4zzjxo1DUVERTp/2fAdRVVWFiooKl39kLUu2HjHta9844BIAQO92CaaNwSzRkdr+zC/U1GHQX7/E5Lc3Op7rmtKYxyJXpM1bEq/NZnPMwujdIdtMckHbsTP+1UByD2DEZGn3jxGFI80BTH19PaZOnYrhw4ejT58+AIDS0lLExMQgKSnJ5djU1FSUljaWdo+Pj8f69esxf/58NGvWDC1btsTKlSvxxRdfICoqynGe1NTUi84hfcyTGTNmIDEx0fFPmt0h6xAb3QXa8aaLySffHzZtDGaprq33Wp9Fzv7jlRftWlnwbTEA4NUvf/T4Od6SeAHnLEKgttQHM2+vpa/XWU6XNi20DofIcjQHMFlZWSgsLMTChQtVfd758+cxZcoUDB8+HJs2bcI333yDPn36YMKECTh/Xnv31unTp8Nutzv+HTp0SPO5yBjRJjaVC4UOyP44rmE2IKGZ+sJpvgITKRGVMwi+lXsLYDSWA9h/wvOOKaJgpOmKkp2djeXLl2PdunVIT093PJ+Wlobq6mqUl5e7HF9WVoa0tDQAwIIFC3Dw4EHMmTMHQ4YMwbBhw7BgwQIcOHAAn332meM87juXpP9L53EXGxuLhIQEl39kLdLduxnG9E71fVAI09KXJ0JD8kzFee8XVscSUogFMGJrBb14e420zsAQhRJVAUxDQwOys7OxdOlSrF27Fp07d3b5+KBBgxAdHY01a9Y4nisqKkJxcTEyMzMBAOfOnUNERIRLq3np//X1jXfJmZmZ2LBhg2NXEgDk5OSgR48eaNWqlfrvksKeVAX2kqRmJo/EHIGqweNrBkYKYELtAlwuU1/In/YZ3gIYb7Mz3oiVsYmCnaoAJisrC/Pnz8eCBQsQHx+P0tJSlJaWOpZ+EhMTMWXKFEybNg3r1q1Dfn4+7rnnHmRmZmLYsGEAgGuuuQanT59GVlYWdu3ahZ07d+Kee+5BVFQURo8eDQC4/fbbERMTgylTpmDnzp1YtGgRXn/9dUybNk3nb58CqV9GkmlfW9oRcqRc+zJlMNNrxuOy9t5nNn3nwEQpOi7YfHvA81bq4lPnNJ/T28/sg00/aTpnxfka1BswW0RkBlUBzNtvvw273Y5Ro0ahXbt2jn+LFi1yHPPqq69i4sSJmDx5MkaOHIm0tDQsWbLE8fGePXti2bJl2L59OzIzMzFixAiUlJRg5cqVaNeuHYDGQGj16tU4cOAABg0ahEcffRRPPfUUt1AHuRN+7srwh1jcLhzzYfQKYHr52MXlq12EYwYmxFo6LN/uuWHmykLtxey8/cy09reqb2BDRwodqrL0lFTPjIuLw5tvvok333xT9phrrrkG11xzjdfz9O3bF1999ZWa4ZHFmTn7kdTcuSX/2JmqsFlKahkbhbNVtV4vhvZzNaipr0eblr7rlvxhVFd8nC+/k8tXoBSqOTByQUHBIbvmc1bX1qO6tl6215JW5eerkcilJAoBbIxBAWVWCfnICGfOlVwZfKNdqKnDrqMVAX0NpIvfpv0nZY/p99xqDP7bl4qq47YSAkFPW7OV5sCEWgAjduoWbTtc7td5jXid3HssEQUrBjAUUFp2w+jNrCDqztnfYvzrX2FloedaRkY4VdmYXLput+9+SD+W+V6WEJfiTlZenLh6ocZ7zRlpG3Wo5cB8uctzhVst29dFRgQw4djQlEITAxgKqBITl5H6NXXzNesOdMvBxirS8/K0JWD6o1pB3o+S6rgRwkzWaQ8BDOD9AhmsMzCnK6vx1GeF2HFY+5KQFnq+TtJsXLC99kRyGMBQQC3aEvgCgw1NHZCkN/CTleYlEwPWvYAcPq1sx0x6q8b8IbmGgt5qwSQEaQDzwue7MC/vJ1z/xtcB/bp6zlS1asp7kQs8iYINAxgKqF2lge9RdaGmcfZBmgFZamJPJgDYc0zbDhKj5XnJkxEdPt04i1Zq9xwIegtOpBkYf/sEBdrmg8Z0nPbF22sptx26vr4BnxUcwf7jZ12eT2rWmL+ktYYMkdUwgKGAyv/JczNOI0l3sYM6NhZBbJdo7g4ksbuzlVTVqNte/shHBR6f3+LlYt8y1rnxUW4Gx4oSTOqc7S2A2esWoEhW/1CKhxcW4KpXcl2el4rYeVtC3Xf8LHJ/9J0vRWQFDGAooC6ovEjqQQpghndr0zSG4Llw+qu/iuKB4lZzJeSqzEZHyrcgEJOAg6kjdfukOFO+rrdgo6C43OPzWw95ft4ZwMgvIV39Si7uen8zvi8O/I0GkVoMYCiguqe2DPjXlO5i9x1rvGP9IoC7gMwmzToBvmc8lCT6ehMX3fh2YoN8ABMRYXPmYgRRANNJ6OIcyF1s3mZg5IIMudkiaQlJyeueW8RZmFBwoaYOnxUcwYmzwbVkqxQDGNJVqf0CXvh8Fw7JlFC/NDU+wCNyXgSGdUkG4OyLFA7EGZidJd7zj1b5COyqfPT1kT5eIDMDIJEuoCX24GnrIAYFnkoBJMSp79ythLfZErnXOTXBOVskVp1OatE0A6MgB+bzHdorCJN1vLF2Lx5eWIBfvpNn9lAMwQCGdPXQh1vx7ob9uOVfnv9gVmwP/BujVFxNuovukNw84GMAnLtAAileuLB+7yP/yNcMjK/6IdLExAqFF78ffARUViK+jp5mRSou1Lp0pG4eo0+QvMRLwrlcOwHx90zccSTNwNgV1IFJSzRnyYz0tbBp1+eBE5Umj8QYDGBIVwVNlUePmlTt1pPKqsY7ZqmK7CmTCnmJVWzNKKbnK6/BV4BVp1MTwMFNy1rJLdTl3JhJXBTbJyTPirN54pbnK5vyrQD53UJGEWv1iL/r0s/34Enf2+VDrdBguIrxko8WChjAkK6SfSSCZiQHdgdQ85hIvPzLfgCcCaTHz1SZEkAkCQHCGQVl+/XmaweY0pyULm1beHx+eLfWAFx3GnnSsXWLpq8XnPVIxEAuSrhAiEszvYWu3WY2Tzx4whmsJAgJ1LU+Ztu2BbhgHxmjxEI3kkZgAEO6qq33/sYYyPyTW4dkYMcz45DZtfHC2rqlM7hS0vdHb3HC915eGfg73LIKfRL5BnZo5fH5zk1LdL527LRSsJ3Xyopklm6KhBpH0ZERjmWk8vPmBWpiDo044xVshQRJm9ZBNMupBQMY0lV3H0m6P5Z5rl1hFLGJY/OYKMQ2VeM1++Jp1dkHX3fmgHwAI82++ZrJadX0phqsFWE/LSjx+Lz79+1YsvTyfR47c8Fnew1/lu7qhJnGKLENhEV//0hfYhK/GTdtRmMAQ7pqGx/r8xizmikCzouK2W/gZn99OUrygwZ2THI8PnbGOUUtBibefsZSfsXi/MMaR2murjJLaJVuF4gjTYHJNpndQg0NDbji+TX42cy1Xrt4+/O7cvKs58/dziWisDC4U7Lj8eYD3itt19c3BDxfy18MYEhXYqEyMeIXd3GcN7GQnJJqpIHg7evX1tXjvv9swRtr9wRwRI2OKVhmujTFOcu2VSimJgWHtfUNXnN8WgX5tLZ7oPKLfu0BOHdhuVuooP9X4RH5gMKfjtazcn70+HxpRWjnRlAjqf8bAGzcKx/ANDQ04Ma3vsHEf36tW7J+IDCAIV2JCZzrdh9zPBZzX0rKzXvzlLaemtHSQOTtAvL13hP4ctcxvLza88XHSN5mAiTispy4sykuOsLxc/aW4zOgaVq7cxvPMxlW18otUV3KrTops1Qkt91ZNMvLz/qNtXtd/t/nkgSZI33r29SRXdpSTeFj4z75AKayug7bDtvxw9EKn0uaVsIAhnRlE3btbT5wyuPzRyzwB+KtX08gbPDSb0YMEJTkpOgpz8ubnCf5B10DQSlR1NtSlOOCH6TVQd2XdNq0bFw23XVUe12b77wE1N+6Tf13TG4M/LTUmundrjH4UVKZtSbAv3tkrB+8/H5GCm/Q3nK2rIYBDBlmcCfPyZ5W2AHh7W4kECo9VHOViHf4enQOVnPHrrY2i/uborRE5y1BN7lF4wW/4kJtUF4kT7kt/0nJse7LQFLCuBiQanHirHvA1PgzOlddh3PV6hIzfQWP4kxpMBUaJP2YVSdLCwYwZBibzfWNW2ryF07NFOXIJXYCrhc8b6XklerTPlHxsWorJd83oovL/x0zMF4CmKRm0ZC+xWDcieQ+Zql0v/sS0p/G9QAAjLy0DfTUPNa5m04uSVeONFvkHhRJxFpF3maFKHSJM+dWxwCGDPP+1wccj22wOUr47z8emmWt1VA6/V9U6v+28/tGdHY89rXLQO6i1atdAu670nme267oAABwn1xQssursaGj97wRK3MPYJrJ/Cyl5Nt1OjdGtEEMRNQtw6n5vPyfgudCRvqpN3GXqFoMYMgw7s3mpLvyZds819EIhJhIa/zKK32TOHDC/wCmhZBYLZbBVyomMgJfPDwCf5nY2/Fca5mZFmkGxtfWXyUzNVZ1pqrWpbO3XDKy3luVG+D8nZFmM5XMwJwXliulJSRlAQxnYMJRTa3396bjZ6rwzd4TppbDkFjj3ZzCwuSB6QCAHmmB70gtkcrdm+1CjbLcj2gdAq52ic72DVsO6nNROniycRZtXt5PLs9LW4x9LUVJAUwwzsAAjV3XJWK101phhqt7aku/vsZTTQHjwA5JF31M6mf0sYJaOicrncFK26YZGCWvu16Vmym4fJzvfdv/+Nc34I73vsWqnd671wcCAxgKmDVN26rXCturA214N33zEfyhJIH1w83Fun5NPWZ0APn+PodON15YfTUMlGYCTgXpTiTx+08Skq7Fmi0jLm3reCzO2CglBR7fC7V23EUoeAevqXMGVa2bApjyczVBmUBNxqvw0btLyp/6+8qiQAzHKwYwFDBWaOkuBjBlJhfzUlJMT0nnYDX6XKI8odebGwdc4vF58aLtTTAvIQGu4xaTrqWZKQAY2sVZBXWrlyBEjre2HN1SGmd31OaTJTWLdoxXbQIwBT8lyz7S75YvVng/ZwBDAXPL4HSzh4AewkXhm70nTBwJYDehyd/DCwt8HqPkTU6uZYTYhdkbaSt1sC4hyW1DPnTKWeMoPs65o0fJtn33WRpvAUy7xMadT2qTeCMibI4lL7WfS8Fv7zHfM7BKjgGcZQLMZP4IKGzcP9K55dasBLAI4W75a5MDGD12GBmh4rzv2iJdmvoBRdhcE5KTmyurIyOXBBws1AZeGxX8rrnP0og1eY65zRZed3k7AEC/9CRV4wCcy0gMYMKPnvWvqmrNX4JkAEOGqhbW2cVk0nNeCrkFirfeIIFgZk8ob/YpyJNp3TSDUt/gWmxP2ioPeM/xCfYk3nUq87iU1FRxr4IsFjTM2+/6MX9evzaOYnbB+dqTdmbPOuuNAQwZqkzYrSHWPik+pSy3w8iZGrMb2u04XB6wr5XWVGxNCbuC3JyYqAhH0bMTQuJqophj4bUab+NFNJiKZolK7K6/O3cM7eD3OTe5BSlijR33C48jCKlUP4uitYYMBT+lAcx5C9xgKsEAhgwlBipiZV73jr6ebNx3AkOeX4OVhcq26+X/dAo/lvlunGcVBxQk6LbQ0O/GkxsGtFd8rFwHY3fSDIG4ayEiwuYITrzFnmLTz1DgrZu0Ume9/E24V86Vdj6JOTdKtVFYCyYqwmaJWh+kH28tTETBEtwygCFDidtKRe53m57c/u9vceJsFR6Yn6/o60x+Ow9jX91g+TfdFJkEWE8qNfS78eTKpt1XStry7FB4Ma6TqerbWkE/pfg4ZwDjnt9hZVIM7v49+ihwrEh6q2ayH3P/nZFmUQBo6IfUlEDtYwmptr4BpxXMxlHo8ZabJv7umf1eywCGDCVX88VXrQG1xAaRcjVKrELK/1F6137ijP+5CtKbjh4XWolcTyvxDU5Op9bO6rVf7QmedfnWMrknNw/yf4edt+KG64pc/44ShACwRGV3d+nnc1zBXbbZpQbIHF/tkW9/IQbvSkpBGIkBDBnqEre7ylE9GuuE6L2EEBft/FU+arf2m640GyI3g+HuhIY8B3diQmi1zO6BMb1SATjH58vlMjVlpCJ13oi7wX48FjzLfgnC1mjRpTK1M8SZJl9SEy4O/AY0VeGNi3ZdShSXY4+rDHDVJPEGY7PNcFRbV4+jdvXLiXJeX7NH9mPie23ZGXPfaxnAkKHc7673lDXucFGaZ6GFew8mdz1NbGUAAFFNfWzEWSNv3HenaCEuQcjNin3X1LxP6fZyucBD2qGkVBuVx5tJrrVDuyTPyz8/66q8dYWn5VYp8PzJS77UfpldY3JBlZIkXmmGx+wLFCnzwPx8ZM5Yi28VLM0rIVZv9sbs2W4GMGQo95yLIyqmu6+7PM3xWM1aq3iH4MlzN/RRfC4jpLdq7vsgwXcH/d+pI854bD3keUuv2ungK7t5rrrbqrnnWQo5x4LoItmpjeefnafZE0D5bBbgeUmnfZLv3WMFMlV+3WdtJG2Efkhy3clTmnatsR9ScPhyV+NNybSPtgX065q9aYIBDBnK/f3xz9f1VPy5GcKFXk0y4Y9l3uuYiBcFuTdwI2UkO+/WlezG0tulKZ5noPpcoqyKrmRMrxSPz6vdnv7vrw6oOt5MkTJZ0M1jPC8V/UwIYA6f9r7rzFOuk1SwzhMpUFR7EZF2idXVN8jOAkoBGXNggoteNwNDOyf7PgjAt/vNLYPAAIYMIVdqvpXCSq2Aa6lqpVuplRDHZkYhNTH/R0kewroi+YQ6T2rr6lF4pOKi56WLklzQNvXq7o7HSma8UmVqy1yh8M0vHHRp40xW9lU48cTZKpeqxoD7jg/X46Wgftth+WTwQR1bXfRcTFQEEptFO76mJ6nxjT/bY5yBCSpyCfRS6wkAOHPB981ghE3BdkUAHVurm03WGwMYMkRnYZeJqK+G0ucAdG3dHhvlnFpftq1Et/NqkV+s/x3M/2S+J6mRpdwMSS+hj5GS5SS57eCdZH724UhMtv1mn/fcotr6BpeqxoBrZePT59QH253beP5ZOGvBeD6ncwmJMzDB5JhM2Yr+GUmOx0py6twrP8v559q9io4zCgMYMkSiTB6E2N/FvXmdN3JvxP6Su9gHyort+gVmErlZHSm5+d0N+z1+PC5K3U6u5BYx8HSjlqygDgwA3Du8s6LjQoWSWUT3GRExj+WzAtff1e6pvrsGF5XKJFr7SOR1LCEFUX4Sye9sjBH+toO1fYcnDGDIEHJJjW2ELbZqpqfnbjzo75A88rVjyWgVCqZz1UoVpovFkuDDujTuiGmuoLqvtzoQkqjICI9F61opDGD6pnvehh2q7hjaUfZjUtAnV/jRk3GXpfk8Rq4oYVtHMTu5AMa5hGR2sTLSl9wNjDslS01mYwBDhpDbbipOqW8LYC8gq1LaC6jWS2NEd2LtEXEJoHvTtlq5aWbRjC92K/pa4nKcRGmNnw4mr5/Lqaqtw9bi07oleEvLQDtL5HNVpMqnajqkj+3tDGDUJm+29rWE1LQ8WFVbr6g7OZlLTXXcAycqFZ0zGHagMYAh0+SqTE4NJVJBP2/E2Q01O3vEVR2x4nF7mVol/lCzLd6dmN8hV1zPDH9avB03vrUR/5K5U1VbR0gKFg4paGCqZleauGvsyx881/Z58rpeHp/3VQsmNirS0axTyzLSV3uO46PvDqn+PNJGzEfTqzqur11zErmK3IHAAIZMszj/sNlDMI2Scvti7Ratb0q3DM5wPG6X6AxgrLAskCzsSLNSLRgpL+rvK52zUOKr9cR4z6UAhnS6eMcPAFzft7GRptxsBwCMuLQpwVpFFWlxNnP1D57za67tI9RSEp53BjDyY5J2ImlJ5L1z9mY89vF2bDN5idbd8TNV+O+mn7w2zgxGzYRl4e9+8lznSa0tCutPmVn5nAEMkQnU9s7RUsyuV7sEpAn5MF1TnInQVpgejoiw4ZKmWSEly1pWIe6kqxJmjn7e3fOsmpRzVO1lGVDKVflW4ZKiu/Uys5lioGwXguDWCjpSpzhqwWj/2XzyvbVuUn4/Px//92khHv9ku9lDMcy/Fea46GX/ce91t4zEAIYCrkeqtlL+ehadu7qn5yJsgSJ2Hq5RkN+i5a7KPQ1JLLa2ZKvnC0vXtup2e2n9WUqkqsmHT/vfx6Wqtg7PLtuJ3B+NXZoUKw2LS1839L/E8VhMnu6hYMlpfB/5hFylRcU8Ee/MxSDRWY3XSwDjxwyMZF7eT5o/1wjS39GK7UdNHolxNsvc7Ey7prvH593dNLDx9zhSYS2Y+ZvM+xkzgCHDTOjruYrolBHats96u1tUSxybmgRZvbQXlnOUFLPboPNF+bUvPTdrm9i03KHUQ1df6tc49h1vTChcosOd+if5RzDnm4O46/3Nfp/LG5vMG7sYlIq5QeK2crmlsg7J8oHj70d1VTtEF1LlZ7GtgaMOjJdGkNJOwmMWrQVTVVtnyt+u1ckUi8bwbsr6ckk70Hzl3Y24tA3iY6Pw98l9VY1PTwxgLKSyqhY/lFxcQTVYyTWyEyvsqrFVx/X0Mb1THY83mVAOOyLChuimpo5K8j8qdG6aJpc0e30/Z2B38KTv3QotYn1vyfZGyhtJk6nqq4ZeW9KV1FfxRAxspMaYgGuy8hc7POeqdPEy85Xeyr/k65xHfo5N06922fUlzcCcr6mTTRxOtXA/pNq6eox6aT1+/tJ6S+RzWYk4US2+NmKTVW+z2TFNU7cffef9puKe4Z2w49lxjqKHZmAAYyG3vrsJ1/3jK6wv8ryjINh0bev5QiBWhVSzLKRkpkKphDjnMsCKHeZMJ0sdX73NrnhbWvBHC5laMN2EPklKptm7C0tI4s3wrUMyPBx9sdFNS3ne8kOUEndZ+VPDIqmZc8ZErjCYGmJgI9do1FuhxmQ/u3XHRUe65EIBjXk50ljk/q6sXMzu2JkqHLVfwJHy86qWH7vJdOgOB5cIgfApL1WdPw6izRUMYCxESuSz2rqxVmLPoQZhD4R4oTmqYnpaz3YCohXbza3Gu91LLxvx7l0P0tZbuaaEIiVVisV+SMeFC93Yy5wzXN6CE6lHy95j/icCivVv/EkKThGKMMoVepPL/enUNMsxqrvnHKuDJz1vTfW2K81bd+9fDVYWKLqz2WyOr+mpCzbgbCdgxX5IYmXZD74tVvx5YpsLJblnoUSszbVut/xNspISD1bBAMaC1nr55QomlwiBirgNWPxD2qfiwqX3xVyi9/KMpL6+AcNnrsXIF9d5neZe/UOZ7Meu1XkG5qYBjbuflHzPZxQcIwZCO4Xlzys6O5cPvW3HjIpo/F3wFsRp8Y2KgnDuomS+J1GRTAfojx7IxF8n9cHjMlutV2loSiqXcwMAD41x5iCpTXKXiufJ1ftwVOM9c8HSyzT/UVGlO7mFMxjUI3E8WD25tFD2Y+P7OJeR5QJ4q2AAQ4YR+7hUyeRcqGlQp6bMuhWcrKzGkfLzKD51TnPBt/RWzqBNTe8oOUr7FGnRV1gaFKvxeit0JS6d6HmR1Csg2qdyi2hKfBzuHNbxomrEUqGx2GhtOUOeWjaI5wXUJ7mfa9opJZeXI7UbqKlrcHS+1sLo4Oe8ikJqYpflUyHUE8idr+Jy3mZF+3dIcjxeadCst14YwJApJjbtAlISlEgzOSV2dUGAkhkEI4mzE3vKLr4QTh3jewePeOH6bKv/S1092zmXPvS6sCx/8Eo8fm3Pi5YzbhrQuB3zoavkv08xJ8Gfi+RFY9JpWVCuc2/vdgken5cjLZvuOqotSf/qXp6XpMTZTLUFxaRctPxiz1v0Y6IiHAGv2q3UYqK+VWv8HLdgbo9evt7jeQZSWuK870r5naDi6rKeeYdGYABDppDe0JW86UoBzBEFU77iG2ehl94zgeapNoNUfdUbsRrvIYWlvb0RcwD0ugPtc0kifj+qq0teAgDM+lV/HJw5weV7cCfO0v0osyyjxYUaffIb1sgs5865Z4iq8/jbNDQtUX4n0sCmO+YSlbN80pi83URIMzxqAxjxd8GqM6cHTvj/92RVLWT6kf2iX2OZhHMKZ62MnLHVg6oAZsaMGRgyZAji4+ORkpKCSZMmoaioyOWYCxcuICsrC61bt0bLli0xefJklJVdvMY/d+5c9O3bF3FxcUhJSUFWVpbLx7dv344RI0YgLi4OGRkZePHFFzV8e8FF6/ZiMx21n8eTS3eovvhIyZtHFcyqtE9qPPZkZbWqvhsV573f0V+roJuvXjzdeUu7kABlu130qIMjBgw/yMwGXOFH4TR/+FMwLdBSE+Kw7amx2P3XaxUdf93lvn/X3ANAUbtE+a2q7aQAX2UAc/fPOvk8Zk9Tjpo/pQZWasj7CQSxVUSokQtm45p2Hy5QmPj8z7We60VZhaorZm5uLrKysrBp0ybk5OSgpqYGY8eORWWls17EI488gmXLlmHx4sXIzc1FSUkJbrrpJpfzzJo1C08++SSeeOIJ7Ny5E19++SXGjRvn+HhFRQXGjh2Ljh07Ij8/Hy+99BKeeeYZvPvuu35+u9YmbnU8Vx0cvToeXliAD74txthXN6j6PKkvT0m574tWQrNox7ZfNXeZvpYkJgo1T4xuJpjR6uIE5F5pzmUIJa0CPtysb3O8738q9/j89TIFCI1yQ//Gu0I1fYCsILF5tEtA6M308c6minLJtrd52Xruvg1a5FhiVfC3JOqbnujzGCmwfid3n6pzi77Zpz2hmrRZJNNIUy6XqgGefyetWANIpCqAWblyJe6++25cdtll6NevH+bOnYvi4mLk5+cDAOx2O2bPno1Zs2bhqquuwqBBgzBnzhxs3LgRmzZtAgCcPn0af/nLXzBv3jzcfvvt6Nq1K/r27Ytf/OIXjq/zwQcfoLq6Gu+//z4uu+wy3HrrrXjooYcwa9YsHb9160lqFnwZ8pt99G6RqzcizaoomYGxwbnk5E/3Y3dXCe0E/Nm1osR/PZTbThS2x35aELit3FKCqdyS1PjLAxvASIXa9PzZWo04gyL3t32dl9ddbHzpfqlpr2I2UyQGMEZ2FD4dgGRZK++SMoXMyyFXm8vdbzI76jgY4/i1ZmG3N+YYJCc3Tjnn5+ejpqYGY8aMcRzTs2dPdOjQAXl5eQCAnJwc1NfX48iRI+jVqxfS09Nxyy234NAhZ8SYl5eHkSNHIibG+Uc7btw4FBUV4fRpzwlnVVVVqKiocPkXdIQM+SeX7jBxIPp54abLPT4vzcCUVVQpKgcuFWFSu87vjdgbaJnJtWC85QmI29H1IHXilStYpaRTtp4SmwJ3vesf6dV6Qo8Kv1FCsq3cjMSQTvJLd+IMzI+lrsu1UnB/4ITvysmiS5Kcs4JfySR96kGu9o2e9N6GbwWfFRxB5ow12H64XPXnyvVD6t3eOetr9zJDrTTQMZvmAKa+vh5Tp07F8OHD0adPHwBAaWkpYmJikJSU5HJsamoqSksb10H379+P+vp6vPDCC3jttdfw8ccf49SpU7jmmmtQXV3tOE9qaupF55A+5smMGTOQmJjo+JeRoa3Ak1VsOahPS3Sz/aJfe0wf3/OipEdxKlNJ8z3nDIzvafINfxqNhLgoLPnDzxSP0+zmbp3byNe4uWd4J12/llk5LnK+NaiVg5oaQ968uXavLueRyNVd8ZbsLBYM3LTfdWeU9Lexu1RdHprY6PELmWrU/7n3ClXnNEuOl1pK3hg58+SvhxcW4Kj9An7xxje6nVO8aftP3kHZ46yaeO1OcwCTlZWFwsJCLFy4UNXn1dfXo6amBv/4xz8wbtw4DBs2DB9++CH27NmDdevWaR0Opk+fDrvd7vgnzuiQccRS9556qthsNvzu510xuofrNlDxzfofCi4QanYidWjdHNufGYeBHVr5PFYiV6dGT3YPScXS7IO3i4/YekEPY3un+j4ogMYJv0N6LgV8X1yuy3l8NbVT68112vNJAKCy2vWiK1YOPqhyFkayXuYm4lJhm7vReWL+WLBZeTVekZ4736ygp4LO55K31su/74qVtN13K1ppsU5TAJOdnY3ly5dj3bp1SE9PdzyflpaG6upqlJeXuxxfVlaGtLTGN6l27RrXeXv37u34eNu2bdGmTRsUFxc7zuO+c0n6v3Qed7GxsUhISHD5R8YT62GoncKWbFOwxdSZqBi8eRKeqrpKHYK9bWkWa6XosZwxrIuzSq4VcgeuFzpgV5zXL3ldr55iRlWA1ktKvHN2ZpuG5QZA/vdPzN3x52JfeMTYJR6tJQHW7AqNqucScba22MfSXZ/28kncl1/i/NgnMkvNNvhuR2I0VQFMQ0MDsrOzsXTpUqxduxadO7sWwxk0aBCio6OxZs0ax3NFRUUoLi5GZmYmAGD48OGO5yWnTp3CiRMn0LFjY+JQZmYmNmzYgJoa55t1Tk4OevTogVatlN9Vk/HEKufzPSSq6sWIJF4ruLwpkVKuZD0AJAkJnO+s9+/uHQA6Cl2J/a1PoodmMZGOJcXD5frlS+zQ6aJplYR6KYgf00t+Bq1FjOf6H1qJbQz+7Ede3ucmNUz1JVh2eyqVKDQilcvrk/7+5XpgAa4/9/0ab0wDQVUAk5WVhfnz52PBggWIj49HaWkpSktLcf584x94YmIipkyZgmnTpmHdunXIz8/HPffcg8zMTAwbNgwA0L17d9xwww14+OGHsXHjRhQWFuKuu+5Cz549MXr0aADA7bffjpiYGEyZMgU7d+7EokWL8Prrr2PatGk6f/ukp4Vb1C3bqal7I+1aKj51TnXPF2+iFDQ1NJKn7dXevKVDABMvdOJ+7+sDfp9PD46dSDoGC+eq9clv2KjTNuB+fi4FLn/wSvxtUh+8cks/2WNmqqxt8vPuyhv3+ZMoq8fvrRH+/ZU1fv89EYvI6dEVXSJVb/5JYXK1t15mZlMVwLz99tuw2+0YNWoU2rVr5/i3aNEixzGvvvoqJk6ciMmTJ2PkyJFIS0vDkiVLXM4zb948DB06FBMmTMDPf/5zREdHY+XKlYiObnxjTUxMxOrVq3HgwAEMGjQIjz76KJ566incf//9OnzLwUPPC7UVqUkmTROSGNUmK3ozIcA1T9z1Ekr7m5FjIJe8/OwvLgvoOC6x4FZq6U5Vr1oYj4/r4Xgs19fqyet6eXweAFq1iMGvh3V05E15orar93idm4WSfoZ3c1bqPnBCfUK6XAFBtQm6enSKN4rqJSRP/+6++27HMXFxcXjzzTdx6tQpVFZWYsmSJRflrSQkJGD27Nk4ffo0Tp48iSVLlly0a6hv37746quvcOHCBRw+fBiPP/649u8ySG064LkPS6hQk34hbkO99d083cYg1t4wY0dCO6FE/LcW+nlfIyT6emv8phdpecQqyzUALmrI6K+BHZ3L34VHPC8Z3jDAmQ8U4aULtV4Gd3KOyQr5UP6w8o4iLSKFH7+WIFpuCfXRsd0dj739zJOaywfKVhF8tevDyEurinwfFMR+PcxZLEnNWnSFjk0axSl0b3koehLXl8WtrB995zlZzgzeStcbQc0uM1/E2Tp/qvu2FTo969EYVKzaK7eVOiU+Dp/8PhOfZQ33uq3a3Y1NjTPVai/UGCo+5XlMbVpaux+OROmSCADEC8FpMMx0b5VpuKnFrUM6OB57u2HQO5/KCAxgLGyrTttArUqshCv35mk08aJyUqfCZ1qVn5PfSdEjVfn2SD3YAnD3L0pvygXSYwlJDDwWfKs9sfxKYQpfae8Ypbzt6BnUMVl1voyaXBaRWBdkzjcHPR7z+1HdHI/V5mJMFJZojZ7hUbPr7NYrnDP+SupQme3l1T8qPtbXn67Yc8tbgq64o8mqs3MMYEhXB1V0eBX/kD7fYV7DN2mbrNk7crxVQ/31MOddkx4JfUMtVsxOyoHRa+eQREmNISX0zs359wZ9k0c7CDvLtJYamLvxoMfnbx7oLJWR+6O6bce/yezkeOxPQ0gl/rdNeTVtcWffbIsksuvl+Umeq5978pGXjRd3DHXOkJ8MQDsILRjAkK5WaNwu+Y81vruexivISdAycSDN/lh1pwQA3CAsEazb7X/tiofHXOr3OfQk7UICPBf8CxWtmvIK9J7gEos2ai1mJ0fs2fWfjepmtMTZsPe/MSZQ6HNJYy2qTq1baPr8rw3ugxZoIy51zhz66lW3YY/87JO4vJ23zzr5eSIGMBQ0pozo7PugEJUgbH321BhSrWGdW/s+KIDErd2rdpo3G2e0R8c27kQSk2f1piYXRC1/llt2GlTM7hf9GhOfz3ioBB6OMoTCi3KFDaVmjUoTnz/wYynWSAxgLOjWIc71WauuPZrhvhFdHI/1vEtvFu25Y3agiD9vJfRYs1eSIPrQ1ebM0jz28Xa/z5EqlNe3ks5tGmcJjAwyVmvsC2S0Ej8Sqr3pkdY4A7MhCHJZ/KFko4P7zJ7c7qWkpq34NXXKri9GL/9pxQDGgsTp4Lz91py608sT43sqPlbc1ure0M4fr9/a3/HYjIDx3iudM0unLbTWPKqHtsRQKxjfR5/6PmLTUT1I+VaHT5/XtTiZP16c3NfsIfhFvAFRs5W6S1ttS05m2VOmXz2WyIjQuPSHxncRYsQKjM8t+8HEkRhvsFAbQ00/k9/9N1+3MYwUdnB8aVBvlN+P6ir7se7CDqNPvrfOVuqBHVrhsWt7BOUFTpoiB4AiPwofPnV9b98HqZAmbE+3yrZ5MVCVa2qqpmq2O6O7nw/pJNbXUb5Mde9w541DTQBqHflr6dYjup1LLODpbTa7RYy5s9O+MICxOD2rzlqRONsktwvCaOJW6n+u9Z1MrMUIYUuuN39bsUv2Y3oHEld08n1h+cOobrhF5RKXVhnJzXwfpFCXts4GmO/7scvEW98hLaKFgoxqd/T4Il6Q1RATbeVuIp67wVmZWe3F/sGruvk+yA/ilv9PC5Rf5G8a6EyM/+6gfnVWjPKtj4Rc0VMTvQfeYoPY3Ufl619NEZbtrZjOwACGdHGlwgu0OzEX4+Pv1PVSMoI//V68ERPrzmtsIDdEuJPVY/lBXLqygpk3OQM0PdsqLPLj96qFztV4Rat26pur8tuR2n6eSmr+3DLYGcT+R+WNRn+hps2xM8bkwUjmb1Jer0esgbOzxNhu2XrY5SXQcCfW2PLFWzG7O4Vio1bcicQAxqLEbaXBQI/xinfNcqyanOnLJULF03+s0VabpIMQBOmRAyTehVmBOBsnt3uC5KXGG1c9WQxy/v3VflWfK+4we8HLDKOZPtC5UKGetGy5V/J+LC3fP/aJfNK8ODtnlcavIgYwFnWTUDwqGPhT10JqKKek6+kfhKqgZjQ/VEOcchVfH08zH38SGv3JiRRmq57+307/Bgegq8WSGMW6E2ZXRQ5GaloP+MOf5pafFigvNhdIB3SunaMnLSs3Yu84OVJ7DKWzuWt1qD+lNwYwFiWW4D5kUpn9QElpivLlEghF4lS2mvVuX9RuZdbimyeuwiu/7Ie7hARTyQShqaSSN5QoHS5WgW4XoMQvBzUG7kWl/u+4aNPSmrN1C3471OwhXCTQva/0dvfPOpk9BMP8Vqh/pWajgy+3D+3g+yCLYwBjUeLOFD3utgMhUuNFdbiK/BnxLv2tdfqUiQeAB37u3CWU/5MxCX2XJDXD5EHpHu+OxClfb3eD7ZsuNKGa3N0jrfH33luvIKWe/YUz8dSfhn1iEqoeeYxDLVZEEACmKqjMLO2kS2xmvS7F4o3NDyqask4RZkOt2tQxVWhOqmfit5gnc65afvu52XWyvGEAEwSsOHXniZpARCS+oauZbTqoYzGwTm2cyylfe+lJZBQxqHl3g3yOwR3DLp69CSVS4F6kQwAjJpb7U9xNDG6/KPS/SrDWQN9INw7wvWQ97rLGpV4tRSR/NdjY4py92yc4HqupGisGp+t13hVmhLfWKW938hsPM70i8abJW25S3/RExV8z0BjAkG6uEOoxqHmPEnutvPql8q6rRjF7DN4aGopvOnrkAFlt6l2agTlwohJVtcqLknki/l556/nii5E7kfSWNdoZbDVA+R9hjII6L2Lht2KVNw+PCwUr9axn4omaYFVs6qh3g00j7DmmfGn12qbcQgDwNLkkLiEv8fIzeenmfo7H9nPW6lPGAEaj/+YdxI1vfYOyCmO3BQaTXwp3WVqXAJZ8b+ybW7CT7oIBfbroil2uay0whS7lQ9XVN2DfMf0SKxdYeJeJnrJHO5eC1L5+4sSQp/QosR/XyJfWqTq3WJxz4WZjyyUcP6MtyTjUqp4P7ugsu7BMRadud2Kn8y8KtTXrNQoDGI3+77Od2Fpcjin/2WLY17h5kHNa14pFhNyJa7V6d8QViUmwer4uLU2+0/Y15Qu4Ft1TsmvLly5tnFupk3Uum6+FeFdo5N9WqBJzxM6rKKsPqKsd4o/NOvzeeiJ1pQ5FtwxWfy1QMqum1ncG5QdqxQDGT4VHlCeMqfWXCb0cj63aTEuOUY3bAGDaNc4tx3p+HbObaKpdztEj2Tgiwoa9z4/H7r9e61Il1gqO6vCzvVOnnKF/3jYAAHCtMAPmDyu2Z/BW0CwYPDXxMt8HBamHx3R3PF6vY9PKWbf0830QgJim94aP863R/kJirXcsciGuzz67LDh2IiXENc5iDFXZ/+QZoeeMr4ZsYm7D9f/8WtXX8eZ3QrLmVyYk8nZuo64ui14duaMiI1xmdswm9rbx133CFlQ1jf7cXd+vPQ7OnIB37hykx7BcythbxdPXOwMAudfql4O016dKiTd2W3v3VOdsopqKv2Iir1V7IomFMOfp2HKlb3qSouN6tov3fZAJGMDoQEmbc38Fy7bZrU+NxRcPj8CHvx2m6vNuE2oSzN+kfBeBnnURxKqTM7/Yrdt5lVJal8WoJTSrEIsV+tsyQaxerKbRn9GUFBrTSmvvoWFdnDcdcr+K1/R29oYqP6fub2/R7zIdj414zxRv+PJV9DYSG6360zcrUNYVKZ+B2fzk1WjVPBoL7/f8ftxJyG/xZuSlzoaftXXWec9hAKORuN761+XWLI9thsgIG3q1S1BdFTQ2yjkD8OUufXvEaPGDir4jgfbEeOfS4jGNCYtWJnYH97elgBgUemuUGUqmXdMdL93cF19O+7mqz7PZbJhzzxD8aVwPl7YOIrG55XtfqbvYixfLbYeMDSYLDpUrPlbsiTTDhBsXI6XEx2HrU2MxrIvn2kNKA2mxTpCV3hsZwGhkg/ON8fMdxmVmW63ceyAoyfeZdo1zTViPxoZWoaR6bLOYSMeUspXeTPQi1kl5Y61+xQrVXNSCmc1mwy8HZ2jqdTW6RwqyRneTnQ0Ub0zeUFlIUjznbf/epHpsSjzRtF27OMSrlweakTOG/rDmqIKMXrkIEvG9Y+49Vzge7zvuf3n1UPEHYdp3fZF+BajG99EnSVOrfkLRKG+rQ4OaGrGpqToajPQo4qg2HytQwvHmxGgDmjpfby0u13wOqy7LfvHwCMdjq/eBCxQGMBaXIazhP/rRNhNHYjwlDQ0l4h3Bn5fu0G0MD13tnCrVs6Gg0vyWF2663PH4hJevL1UeDcUZGL2N6hGY7cFqvf1rZ0JwpAX7UgWjy5tuAEorLmC/ihu+LU+OcTxetdP8JWxPeqY5E2kXqKg27MvDV/tuIwG4VqS2CgYwOvFnh4NSoT4FLjY0zNt3UnEdUX+647oT3yQeWrhVt/MqJdbS8aZXu8YAZleIzsCIeTD+LhFe38/5e3Vax6Rvf3VPjcf/TeyNP47t7rKzzurErbdqZyveuH2A47ERTWrFfJaFW5QXzBMT+F9cZc08GPEm6JllP+h23ocUBjAjL9XWKsZIDGB08rcV+v1ChSuxH9GDHzqDh0B2TRa/1jd7rVuZs3dTALP/RGVAdsEF2uy7Bjser/Azx0zcgvr3lda6OE25sjOyr1J2AbGKiX3bOx6/kyvft8sT8Sbl8U+26zYmT7z1FPNm/3HjinBakdLeXENlEoHNxABGJ/M3GVeq/LFrnUsroZSw6o235RPJqB5tfR4TqsQ7xnW79StsZRViUb2HPvRvJkwMStXclZNnYoVXtQGh+LPYuM+6NwhWFR9nXrVwKzYhZQATBMRqov/NO2jeQCxm1i39HY/3lOmX4Cyu9Zoxu7Fp+tWqjs9a8L1BIyEKPmJLjnoVN3zizsZApARo8WnWcMdjf5udilITjC0yaBQGMEEgXmiipufapxWJdSZ8La+LvXvumatf35xHrnFO6T+4IPB5MGmJcSj627XY/8J1Af/aoUratQVYd5dJMBnezbmcoPb1FIvhGfGzmCqU3f/3V8qXkbJGOwsAfmpwx2ytugjL7Ho2vv3dSKGLuZcfyZu3D3Q8Pq2ykKERGMD46eVfOhPacnXsURGu/jrJWc58wWZzOgiLRfXW6LCNV+sYfBUDvF2oXhyKS4tfPTba8difbroA8P7dQxyPPy2w5sUpmLwj7KA6oLJx6z9udSbyLjJgSU+8sVFTmE5cInliiX47G/UkLsFN13GM9wzv5HjcPEa+rYhYZmLvMfPLejCA8VPrls4/licMTEoLlzvIdonOhEs92wSEoud+4Qz2Fm4xJ9gzklhC4EE/82ASmzlnMZ9fYa1E3mAkzgpf9Uquqs8VO2Yv2+5fYEr6sNlsODhzAg7OnOB104R4UzXlys6yxwUKAxgd6dE9V85MoT7IYot1BDXTf6dc4fsgDebd6zyvVYtGibVwnlxaaOJIgouSBHEKDKvt9Lu+n3OHlVX/7q/q6axrZMYYpUCntYKq4UZjABMkLk111id5eVWRiSMx3ivCspwvIy41ZifSld2cNQ8+45KDaV640Rm4l5Sf9+tcvx7mXHI7X23NJE3Sh9gHSk1navG957fzvtN1THr5523OJbhQnHlVgwGMDsRCQP6+ySoRig38RBOFwmNmEadK//SxsfUq/CHeMVp154Q/xDyfn81c69e5nryut+OxGUUKQ82y7Csdj+ep3B0ploY4atf/PVPsAzXyxXWKP0/cIm7VnMYWsc6t1E99ttPEkZiPAYwOHhE6df7uv/mGfR1/Mv+DiZhEq8RYYVfDhRprTvsa5R+39nc8/s3szeYNJAiIuRc5P1izXHwwuVzo26X2QvqAsOslc4Z/gakv4faeEE4YwOhATHraccS4NvFi5v+/NFaZDEXv/maw74M0WP/HUY7HavqqSAIRYoq/e5sP+u7iTU6hfBNgdb522Jlp7j3OHWtnLujbqFcvD17l3PIdijOvSjGACSJi5v9MFdsDg5G4Fl1bb84dlNjaQO1OC7NYNfHQH48KBcbK/aw9sf2ZsY7Hczce9OtcBDwkXEjt59Rd7JOE/k+1dfr/3i5/0LnEtXa38hk3Mf/tjXV7dR2TXrKF1/0ti44xEBjA6ORZYUvrP9bsMXEkoWFCX2cejJFtGkKB+Eb9q3fzTByJMcQCY/2fy/HrXAnCTQBL2ftv2lhnLku/51ar+lyxzs+Xu/Rf0utziXOJ6965yhNyxd19/1LZ6ylQxGX2f6xlAEN+uutnnRyPZ+X8aNjXmSrk24RiEz9JXLS6PBjxzjrciG/UW4vLzRuIQfRebrhlcDoA5sGYTZxRfmC+ddthBMNSYzCM0QgMYIJMtnA32vupVSaOxHgb/jQaPdPiXXJR5CTERTvqE+jpi4dHOB5/X3xa13PryVv1zFDQQvj+/F3z/9WQDMfj5Syk5rdLhR0/VvOrwc6ftZrt1Bv+5Jwdet2iM+piiYHCIxUAAOtmFhmDAYyOxKJqarcVKiVOb4a6Dq2bY+XUkS65KIHWq12C4/FNb200bRy+PH9jH8fjnSUVJo7EGDueGed4POnNb/w616COyY7H2Sb0ugo179zp3Fyg9n3vj2Od+U2fGFCg8+8393U8VvP326G1swr0a19aM4ARSwxUG5BDFAzC52oYAGJRNSP3598tLFcdCUDdGbK+GwekOx6/FIKFDsVlpN2lZ0wcCbnr2tY5A6P2fU/Mb3p08TbdxuTJ4dPa3yvDdYnG6hjAGMioap/PCAnDw/0s7kW+iQ07F4V55ctQ8ddJzhmrLdx+riv7eeW7kdz77hgRKIjLSMdVFAGdI2ynnpf3k65j0svLKqqWhyIGMDrb8uQYx+Mrnv8yIF+TdwfGunmQc3bj8U+s2aUWAP40rofvg4LYu8JSxfQl/lVHvnNYR8fjX74Teju3Au3rx505I/2eVbcb6f27nXWc3lq/T7cxSWZOduaKqOkuP7qHs+fQ0/+zZsVb8b0pHDGA0VnbeGeDqzNVxu0SEu8OrLpGG6rq6q0ZMIrT8aFo7GVpjscfbj6k67mt+jMNFumtmrv8/8wF5e99V/V0VtI2YvnTW3dlNc4a+H6ul3D7NWYAY4DXhfLurxsUXIh3B1bNkg8l2552btPur/IO0yyhntf3g5/JyiunOneYXTMrOAoVWplY10UtsXicEb2Rnrm+t++DPNj6f9c4Hvd52pq7PsUdU3uPhVd+GAMYA9zQ/xLH41e/NK4mjCiUa8JYQWIzZ82KM1W1ll22W/qHnzke7zoaeruRDsy4zvH4un985de5eqY5d5jtP1HpeGzNn6z1ZSQ3932QjHn3OndwGtEb6e7hnTV9XqsWMTqPRH/ijqmJfdt7OTL0MIAxyOCOrRyPCw3qj7ROqI8S6jVhrEC8i+s8/XMTRyJvQIdWvg8KYu7LAaV25bU9fPl06xGvX4t8e/P2gZo+z71Y4YmzypNttVDTckMsUWBkkVJ/SDWwzCw5YQYGMAb56HeZjscT//m1IV+js9svK2dhjOV+F2fVvInfjtB2txksxGn9YTPW+HUucUZn6qICv85Fri1A1FrxkLMlxuC/6b8BQvy9UbPsfsdQZ8I328RYCwMYg7jfUby7Qf/segDY9pQzN4OzMMZ77Vf9HY+7/tmaszBPTuiN9X8chaK/XWv2UAzhPq3vT2Ve91mWYEjUtDpx27KaOazL2ie6/F/vxqTi780btw/QfJ47Z3+rx3BIBwxgDCTe3b3wuTHdoxOFjq6AMV1dyWnSgEt8H2QBndq0cGn4Fmp2CL2vev7fSr/OJfbRsmqiZjARq992bK0uL2aJkMPV/S9f6DYmyYEZ12H3X69VnSsivpd/teeEZXPgwo2qAGbGjBkYMmQI4uPjkZKSgkmTJqGoyHXb24ULF5CVlYXWrVujZcuWmDx5MsrKPDdNO3nyJNLT02Gz2VBeXu7ysfXr12PgwIGIjY1Ft27dMHfuXFXfmBW439099KExZctfFN4wBvzVv2695NsjY5zlzzs9scLEkYQvsRGgvxLczvVZAfsj+UvKyVCbRzTQLYfLfk55UTwlbDab6kax0udNu8b5d2/VHLhwoyqAyc3NRVZWFjZt2oScnBzU1NRg7NixqKx0ZvA/8sgjWLZsGRYvXozc3FyUlJTgpptu8ni+KVOmoG/fvhc9f+DAAUyYMAGjR49GQUEBpk6divvuuw+rVgXf3ZHYXPB/24x5Y7xFmLI9c6HW72Z35N3DQkdwAPj9/HyTRhLexHIFj3/sX2G7H54b5/sgCojdf3UuffZ7zjolCx662vXvnjcv5lMVwKxcuRJ33303LrvsMvTr1w9z585FcXEx8vMb38Dtdjtmz56NWbNm4aqrrsKgQYMwZ84cbNy4EZs2bXI519tvv43y8nL88Y9/vOjrvPPOO+jcuTNeeeUV9OrVC9nZ2bj55pvx6quv+vGtWsP9874z5LziurO/U+rkm5hf8kVhKYNGE4jlChZ9519hu+YxUf4Oh3TiPkOyrkh59VyjueeV3fX+ZpNGQoCfOTB2e+P24OTkxu6u+fn5qKmpwZgxznL6PXv2RIcOHZCX5yzX/cMPP+C5557DvHnzEBFx8RDy8vJczgEA48aNczmHu6qqKlRUVLj8M1KDimoR4izM6h88L6f5S1x3Bnh3YLTYqEhc0cnZ1bjn/63E3mNnTRxReOoi7MS7Z84Wv85V8NQ1vg+igBBzTu6Zs8UyOSexUZFYlu3cLZX743EmfptIcwBTX1+PqVOnYvjw4ejTp3GffGlpKWJiYpCUlORybGpqKkpLSwE0Bhq33XYbXnrpJXTo0MH9tI7zpKamujyXmpqKiooKnD/vuUrjjBkzkJiY6PiXkZHh8TizdEtxdmz9z8aDhnwNMVACGMQY7aMHMl3+P2ZWLl/zAFsr1ELyV1Jz191Nn+84qtu5SR333JnO0z+3TNmCy9MT8Yt+ziRgJn6bR3MAk5WVhcLCQixcuFDV502fPh29evXCr3/9a61fWva8drvd8e/QIX17pfhr9dSRjsdP/28nys9VG/J19r9wncv/Z35hzO4nauQeNAIMHANt+vieLv/f4UfhyL3Pj3c8rlDRVZn05/631fXPn6PeIkHMP25z3YbNv3lzaApgsrOzsXz5cqxbtw7p6c5umGlpaaiurr5oR1FZWRnS0hobsa1duxaLFy9GVFQUoqKicPXVVwMA2rRpg6efftpxHvedS2VlZUhISECzZs08jik2NhYJCQku/6zEvS5M/+dysLJQ/zu8iAiby7bQd3L38Y/LYHJBjFXebEPd737eVbdzRUVG4OMHMtG5TQt8/MDPfH8CGcr9b6vLnz+3zHKSuMwFMIgxg6oApqGhAdnZ2Vi6dCnWrl2Lzp1dK34OGjQI0dHRWLPGWR2zqKgIxcXFyMxsnG7/5JNPsG3bNhQUFKCgoADvvfceAOCrr75CVlYWACAzM9PlHACQk5PjOEewcv9jfGD+94Zc5BLiotG7nWsAZ9Qfl0XeS0x3cOYE5P5plMtzV7zgX5VYUs5TEKnV4E7JWPfHURfVWCJzuP9srbKF2Waz4ds/X+3yXKcnVlgmwAoHqgKYrKwszJ8/HwsWLEB8fDxKS0tRWlrqyEtJTEzElClTMG3aNKxbtw75+fm45557kJmZiWHDhgEAunbtij59+jj+SUFQr169kJLS2GH5gQcewP79+/HYY49h9+7deOutt/DRRx/hkUce0fN7N4WnOwojtld//vCIi57r9MQK/FPnUtjOP1X2jenYusVFb2gUOFLtET2DGbIG99kOI1oNaJGaEHdR647O0z9nIBMgqgKYt99+G3a7HaNGjUK7du0c/xYtWuQ45tVXX8XEiRMxefJkjBw5EmlpaViyZImqQXXu3BkrVqxATk4O+vXrh1deeQXvvfcexo0LjVoN7m+wD324FZ2eWIEyHRvTSV9nn1tOzCs5P6KbjiXw+TfqKjUhjhdQIp3ZbDZ89dhos4fh0ZMTents2yEFMp2eWIHvDp4yYWShT/USkqd/d999t+OYuLg4vPnmmzh16hQqKyuxZMkSR/6LJ6NGjUJDQ8NFO5dGjRqFrVu3oqqqCvv27XP5GqHA00WutKIxgImK1K/DQ2SEzSUxEQBq6xvQ6YkVeHlVkcxnqcfGva4YxBDpKyO5Of46qY/vA00QGxXp9W/+5nfyHMHM/31aGMCRhTb2QjLRwZkTLspVAQC7zrsfoiIjcHDmBAztnOzy/Bvr9jr+qDo9sQJH7Z63qHsj1cNh/HIxLmkQ6evOYR1dKjBbzcGZEy5a7nL3300/odMTK/DRFmvtlA1GLD9pMilX5b7/fIcvdzXuvBrTK8WQr7Xod5moq2+Q7aKcOWMtgMb1ZqU9TKQlJM7AEFEg3ND/Ekzs2x4/lp1BK7faPVZgs9lcblrkNlA89sl2PPbJdt7g+IEBjEW8d9fggHydyIjGP64hz3+J42eqPB7TefrnaBETiT9P6IVbh3RAZIR8dCKlwNg4B0NEARIZYUMvD7PXViQFKNsPl+NsVS1u//e3Lh8XA5zZdw3G6B4piIiwobquPqDjDEYMYMLUliddWzV8uvUIpi4qcPy/sroOTy4txJNLla3XcgaGiEhe3/QkAI0BzeHT53Dl39dddMyU/1zcK+9UpTFFT0MBc2AIADBpwCV+TWXOy/tJx9EQEYWu9FbN8f7dymbd3//mgMGjCV6cgSEXB2dOQG1dPSb+82v8dPIczrPLMhGR7q7qmepy07i+6Bju9tCQdJ2O/b5CDQMYukhUZARWCr2bPKmvb8AXhaWora/H1uJyPH197wCNjogo9IzqkcKEXpUYwJAmERE2TOjbDkDjrgAiIqJAYg4MERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAEBERUdBhAENERERBhwEMERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAaNTQYPYIiIiIwhcDGI3OVdcBAJpHR5o8EiIiovDDAEaDhoYGHDhRCQCIieJLSEREFGi8+mrw7LIfHI+7tGlp4kiIiIjCEwMYDeZuPOh4nNg82ryBEBERhSkGMBp0btMCABDL5SMiIiJT8AqsQf+MJADAo2O7mzsQIiKiMMUARgOb2QMgIiIKcwxg/GBjKENERGQKBjBEREQUdBjAaMAivEREROZiAOMHG1eQiIiITMEARoMGNkIiIiIyFQMYIiIiCjoMYDTg/AsREZG5GMD4wcYkGCIiIlMwgCEiIqKgwwBGA+bwEhERmYsBjB+4gERERGQOBjAacAKGiIjIXAxg/MAcXiIiInMwgCEiIqKgoyqAmTFjBoYMGYL4+HikpKRg0qRJKCoqcjnmwoULyMrKQuvWrdGyZUtMnjwZZWVljo9v27YNt912GzIyMtCsWTP06tULr7/++kVfa/369Rg4cCBiY2PRrVs3zJ07V9t3aABW4iUiIjKXqgAmNzcXWVlZ2LRpE3JyclBTU4OxY8eisrLSccwjjzyCZcuWYfHixcjNzUVJSQluuukmx8fz8/ORkpKC+fPnY+fOnXjyyScxffp0vPHGG45jDhw4gAkTJmD06NEoKCjA1KlTcd9992HVqlU6fMv64QoSERGROWwNfkwnHD9+HCkpKcjNzcXIkSNht9vRtm1bLFiwADfffDMAYPfu3ejVqxfy8vIwbNgwj+fJysrCrl27sHbtWgDA448/jhUrVqCwsNBxzK233ory8nKsXLlS0dgqKiqQmJgIu92OhIQErd+i5/Eu+B4rth/FM9f3xt3DO+t6biIionCm9PrtVw6M3W4HACQnJwNonF2pqanBmDFjHMf07NkTHTp0QF5entfzSOcAgLy8PJdzAMC4ceO8nqOqqgoVFRUu/4zGSrxERETm0BzA1NfXY+rUqRg+fDj69OkDACgtLUVMTAySkpJcjk1NTUVpaanH82zcuBGLFi3C/fff73iutLQUqampF52joqIC58+f93ieGTNmIDEx0fEvIyND67dGREREFqc5gMnKykJhYSEWLlyo+YsXFhbihhtuwNNPP42xY8dqPg8ATJ8+HXa73fHv0KFDfp3PK+bwEhERmSpKyydlZ2dj+fLl2LBhA9LT0x3Pp6Wlobq6GuXl5S6zMGVlZUhLS3M5xw8//ICrr74a999/P/7yl7+4fCwtLc1l55J0joSEBDRr1szjmGJjYxEbG6vl29GMK0hERETmUDUD09DQgOzsbCxduhRr165F586uCayDBg1CdHQ01qxZ43iuqKgIxcXFyMzMdDy3c+dOjB49GnfddReef/75i75OZmamyzkAICcnx+UcZmrgFAwREZGpVM3AZGVlYcGCBfjss88QHx/vyGtJTExEs2bNkJiYiClTpmDatGlITk5GQkICHnzwQWRmZjp2IBUWFuKqq67CuHHjMG3aNMc5IiMj0bZtWwDAAw88gDfeeAOPPfYY7r33XqxduxYfffQRVqxYoef37jdOwBAREZlD1QzM22+/DbvdjlGjRqFdu3aOf4sWLXIc8+qrr2LixImYPHkyRo4cibS0NCxZssTx8Y8//hjHjx/H/PnzXc4xZMgQxzGdO3fGihUrkJOTg379+uGVV17Be++9h3HjxunwLRMREVGw86sOjJUZWQfm9/Pz8UVhKf56w2W4M7OTrucmIiIKZwGpAxP2mMVLRERkCgYwGoTmnBUREVHwYADjB86/EBERmYMBjAbcRk1ERGQuBjBEREQUdBjA+IE5vEREROZgAKMBk3iJiIjMxQDGDzam8RIREZmCAYwGnIAhIiIyFwMYPzAHhoiIyBwMYIiIiCjoMIDRgEm8RERE5mIA4weuIBEREZmDAYwmnIIhIiIyEwMYPzCJl4iIyBwMYIiIiCjoMIDRgEm8RERE5mIA4wdW4iUiIjIHAxgNOAFDRERkLgYw/uAEDBERkSkYwBAREVHQYQCjQQOzeImIiEzFAMYPXEEiIiIyBwMYDTj/QkREZC4GMH6wsRQvERGRKRjAEBERUdBhAKMBc3iJiIjMxQDGD1xAIiIiMgcDGA04AUNERGQuBjB+YA4vERGRORjAaMBCdkREROZiAOMHzsAQERGZgwEMERERBR0GMERERBR0GMD4wcaN1ERERKZgAKMBc3iJiIjMxQDGD0ziJSIiMgcDGCIiIgo6DGA0aGAtXiIiIlMxgCEiIqKgwwBGAybxEhERmYsBjB9szOIlIiIyBQMYIiIiCjoMYDTgEhIREZG5GMD4gQtIRERE5mAAo0F90xQMU2CIiIjMwQBGg/JzNQCAxGbRJo+EiIgoPDGA0eBkZRUAoE3LWJNHQkREFJ4YwKhUV9+AU5XVAIDWLWNMHg0REVF4YgCj0ulz1ahv2oWU3JwBDBERkRkYwKh08mzj7Eur5tGIiuTLR0REZAZegVU6ebYx/6U181+IiIhMoyqAmTFjBoYMGYL4+HikpKRg0qRJKCoqcjnmwoULyMrKQuvWrdGyZUtMnjwZZWVlLscUFxdjwoQJaN68OVJSUvCnP/0JtbW1LsesX78eAwcORGxsLLp164a5c+dq+w51dqIp/6UN81+IiIhMoyqAyc3NRVZWFjZt2oScnBzU1NRg7NixqKysdBzzyCOPYNmyZVi8eDFyc3NRUlKCm266yfHxuro6TJgwAdXV1di4cSP+85//YO7cuXjqqaccxxw4cAATJkzA6NGjUVBQgKlTp+K+++7DqlWrdPiW/bNp/0kAwMET50weCRERUfiyNTRoL4x//PhxpKSkIDc3FyNHjoTdbkfbtm2xYMEC3HzzzQCA3bt3o1evXsjLy8OwYcPwxRdfYOLEiSgpKUFqaioA4J133sHjjz+O48ePIyYmBo8//jhWrFiBwsJCx9e69dZbUV5ejpUrVyoaW0VFBRITE2G325GQkKD1W7xIpydWOB4fnDlBt/MSERGR8uu3XzkwdrsdAJCcnAwAyM/PR01NDcaMGeM4pmfPnujQoQPy8vIAAHl5ebj88ssdwQsAjBs3DhUVFdi5c6fjGPEc0jHSOTypqqpCRUWFyz8iIiIKTZoDmPr6ekydOhXDhw9Hnz59AAClpaWIiYlBUlKSy7GpqakoLS11HCMGL9LHpY95O6aiogLnz5/3OJ4ZM2YgMTHR8S8jI0Prt6YIq/ASERGZR3MAk5WVhcLCQixcuFDP8Wg2ffp02O12x79Dhw4Z8nU+/O0w3DI4HR8/kGnI+YmIiMi3KC2flJ2djeXLl2PDhg1IT093PJ+Wlobq6mqUl5e7zMKUlZUhLS3NcczmzZtdziftUhKPcd+5VFZWhoSEBDRr1szjmGJjYxEba/zW5syurZHZtbXhX4eIiIjkqZqBaWhoQHZ2NpYuXYq1a9eic+fOLh8fNGgQoqOjsWbNGsdzRUVFKC4uRmZm44xFZmYmduzYgWPHjjmOycnJQUJCAnr37u04RjyHdIx0DiIiIgpvqnYh/eEPf8CCBQvw2WefoUePHo7nExMTHTMjv//97/H5559j7ty5SEhIwIMPPggA2LhxI4DGbdT9+/dH+/bt8eKLL6K0tBR33nkn7rvvPrzwwgsAGrdR9+nTB1lZWbj33nuxdu1aPPTQQ1ixYgXGjRunaKxG7UIiIiIi4yi+fjeoAMDjvzlz5jiOOX/+fMMf/vCHhlatWjU0b9684cYbb2w4evSoy3kOHjzYMH78+IZmzZo1tGnTpuHRRx9tqKmpcTlm3bp1Df3792+IiYlp6NKli8vXUMJutzcAaLDb7ao+j4iIiMyj9PrtVx0YK+MMDBERUfAJSB0YIiIiIjMwgCEiIqKgwwCGiIiIgg4DGCIiIgo6DGCIiIgo6DCAISIioqDDAIaIiIiCDgMYIiIiCjoMYIiIiCjoaOpGHQykAsMVFRUmj4SIiIiUkq7bvhoFhGwAc+bMGQBARkaGySMhIiIitc6cOYPExETZj4dsL6T6+nqUlJQgPj4eNptNt/NWVFQgIyMDhw4dYo8lg/G1Dgy+zoHB1zkw+DoHhpGvc0NDA86cOYP27dsjIkI+0yVkZ2AiIiKQnp5u2PkTEhL4xxEgfK0Dg69zYPB1Dgy+zoFh1OvsbeZFwiReIiIiCjoMYIiIiCjoMIBRKTY2Fk8//TRiY2PNHkrI42sdGHydA4Ovc2DwdQ4MK7zOIZvES0RERKGLMzBEREQUdBjAEBERUdBhAENERERBhwEMERERBR0GMCq9+eab6NSpE+Li4jB06FBs3rzZ7CGFnA0bNuD6669H+/btYbPZ8Omnn5o9pJAzY8YMDBkyBPHx8UhJScGkSZNQVFRk9rBC0ttvv42+ffs6Cn5lZmbiiy++MHtYIW3mzJmw2WyYOnWq2UMJOc888wxsNpvLv549e5oyFgYwKixatAjTpk3D008/je+//x79+vXDuHHjcOzYMbOHFlIqKyvRr18/vPnmm2YPJWTl5uYiKysLmzZtQk5ODmpqajB27FhUVlaaPbSQk56ejpkzZyI/Px/fffcdrrrqKtxwww3YuXOn2UMLSVu2bMG//vUv9O3b1+yhhKzLLrsMR48edfz7+uuvTRkHt1GrMHToUAwZMgRvvPEGgMZ+SxkZGXjwwQfxxBNPmDy60GSz2bB06VJMmjTJ7KGEtOPHjyMlJQW5ubkYOXKk2cMJecnJyXjppZcwZcoUs4cSUs6ePYuBAwfirbfewt/+9jf0798fr732mtnDCinPPPMMPv30UxQUFJg9FM7AKFVdXY38/HyMGTPG8VxERATGjBmDvLw8E0dG5D+73Q6g8cJKxqmrq8PChQtRWVmJzMxMs4cTcrKysjBhwgSX92nS3549e9C+fXt06dIFd9xxB4qLi00ZR8g2c9TbiRMnUFdXh9TUVJfnU1NTsXv3bpNGReS/+vp6TJ06FcOHD0efPn3MHk5I2rFjBzIzM3HhwgW0bNkSS5cuRe/evc0eVkhZuHAhvv/+e2zZssXsoYS0oUOHYu7cuejRoweOHj2KZ599FiNGjEBhYSHi4+MDOhYGMERhLisrC4WFhaatY4eDHj16oKCgAHa7HR9//DHuuusu5ObmMojRyaFDh/Dwww8jJycHcXFxZg8npI0fP97xuG/fvhg6dCg6duyIjz76KOBLogxgFGrTpg0iIyNRVlbm8nxZWRnS0tJMGhWRf7Kzs7F8+XJs2LAB6enpZg8nZMXExKBbt24AgEGDBmHLli14/fXX8a9//cvkkYWG/Px8HDt2DAMHDnQ8V1dXhw0bNuCNN95AVVUVIiMjTRxh6EpKSkL37t2xd+/egH9t5sAoFBMTg0GDBmHNmjWO5+rr67FmzRquZVPQaWhoQHZ2NpYuXYq1a9eic+fOZg8prNTX16OqqsrsYYSMq6++Gjt27EBBQYHj3+DBg3HHHXegoKCAwYuBzp49i3379qFdu3YB/9qcgVFh2rRpuOuuuzB48GBcccUVeO2111BZWYl77rnH7KGFlLNnz7pE8wcOHEBBQQGSk5PRoUMHE0cWOrKysrBgwQJ89tlniI+PR2lpKQAgMTERzZo1M3l0oWX69OkYP348OnTogDNnzmDBggVYv349Vq1aZfbQQkZ8fPxF+VstWrRA69atmdelsz/+8Y+4/vrr0bFjR5SUlODpp59GZGQkbrvttoCPhQGMCr/61a9w/PhxPPXUUygtLUX//v2xcuXKixJ7yT/fffcdRo8e7fj/tGnTAAB33XUX5s6da9KoQsvbb78NABg1apTL83PmzMHdd98d+AGFsGPHjuE3v/kNjh49isTERPTt2xerVq3CNddcY/bQiFQ7fPgwbrvtNpw8eRJt27bFlVdeiU2bNqFt27YBHwvrwBAREVHQYQ4MERERBR0GMERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREim3YsAHXX3892rdvD5vNhk8//VT1ORoaGvDyyy+je/fuiI2NxSWXXILnn39e1TlYiZeIiIgUq6ysRL9+/XDvvffipptu0nSOhx9+GKtXr8bLL7+Myy+/HKdOncKpU6dUnYOVeImIiEgTm82GpUuXYtKkSY7nqqqq8OSTT+LDDz9EeXk5+vTpg7///e+O1iW7du1C3759UVhYiB49emj+2lxCIiIiIt1kZ2cjLy8PCxcuxPbt2/HLX/4S1157Lfbs2QMAWLZsGbp06YLly5ejc+fO6NSpE+677z7VMzAMYIiIiEgXxcXFmDNnDhYvXowRI0aga9eu+OMf/4grr7wSc+bMAQDs378fP/30ExYvXox58+Zh7ty5yM/Px80336zqazEHhoiIiHSxY8cO1NXVoXv37i7PV1VVoXXr1gCA+vp6VFVVYd68eY7jZs+ejUGDBqGoqEjxshIDGCIiItLF2bNnERkZifz8fERGRrp8rGXLlgCAdu3aISoqyiXI6dWrF4DGGRwGMERERBRQAwYMQF1dHY4dO4YRI0Z4PGb48OGora3Fvn370LVrVwDAjz/+CADo2LGj4q/FXUhERESk2NmzZ7F3714AjQHLrFmzMHr0aCQnJ6NDhw749a9/jW+++QavvPIKBgwYgOPHj2PNmjXo27cvJkyYgPr6egwZMgQtW7bEa6+9hvr6emRlZSEhIQGrV69WPA4GMERERKTY+vXrMXr06Iuev+uuuzB37lzU1NTgb3/7G+bNm4cjR46gTZs2GDZsGJ599llcfvnlAICSkhI8+OCDWL16NVq0aIHx48fjlVdeQXJysuJxMIAhIiKioMNt1ERERBR0GMAQERFR0GEAQ0REREGHAQwREREFHQYwREREFHQYwBAREVHQYQBDREREQYcBDBEREQUdBjBEREQUdBjAEBERUdBhAENERERBhwEMERERBZ3/B1bXq0p/cA7rAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "N = 5_000_000\n", + "seq = generate_seq(N, 50, 40)\n", + "# Approach 2 - calculate a decaying average\n", + "D = 0.99999\n", + "y = []\n", + "avg = 2000\n", + "for i in range(N):\n", + " avg = avg * D + seq[i] * (1 - D) \n", + " y.append(avg)\n", + "\n", + "x = np.arange(0, N)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Approach 1: \n", + "Clearly fails in this case and is highly influenced by the unusual clusters. \n", + "\n", + "#### Approach 2:\n", + "Learns the long term average and clusters only cause minor spikes." + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj4AAAGvCAYAAABb4N/XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA7CElEQVR4nO3de1yUdd7/8fcAAiIgKoqiIp4PqVgi5pprItWS6y/XrW3XuiPb6m5vbE23Wm0r213TujusbXFrmWm56+rWpm15SvFAtZqKYpqpqeRZ8AQCKoeZ+f0BMzDODCcdxrl4PR8PHzIX18x8R796vfl8D5fJarVaBQAA0Aj4ebsBAAAADYXgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0CD4AAKDRCPB2A643FotFJ06cUFhYmEwmk7ebAwAAasFqtaqgoEDR0dHy83Nf1yH4XOHEiRPq2LGjt5sBAADq4ejRo+rQoYPb7xN8rhAWFiap/A8uPDzcy60BAAC1ceHCBXXs2NF+HXeH4HMF2/BWeHg4wQcAAB9T0zQVJjcDAIBGg+ADAAAaDYIPAABoNAg+AACg0SD4AACARoPgAwAAGg2CDwAAaDQIPgAAoNEwbPDJy8tTfHy8BgwYoL59+2ru3LnebhIAAPAyw+7cHBYWpoyMDIWEhKioqEh9+/bV2LFj1apVK283DQAAeIlhKz7+/v4KCQmRJBUXF8tqtcpqtXq5VQAAwJuuKvi89NJLMplMeuKJJ65Rc8plZGRo9OjRio6Olslk0rJly1yel5aWptjYWAUHB2vw4MHasmWLw/fz8vIUFxenDh066KmnnlJkZOQ1bScAAPAt9R7q2rp1q95++23179+/2vO++uorJSQkqEmTJg7H9+zZo1atWikqKsrpOUVFRYqLi9NDDz2ksWPHunzdJUuWaPLkyZozZ44GDx6sWbNm6Y477tC+ffvUpk0bSVJERIR27typnJwcjR07VnfffbfL9wMAXL9Kyiw6W1SsMwUlOl14WRculamopEyXSswqKjbrYmmZikstMlusKrNYZbZYKn6veGwu/71c+e+2AQD7Uau1ytfO36uKwYOr996DgxQY4J1Bp3oFn8LCQt13332aO3eupk+f7vY8i8Wi1NRUde/eXYsXL5a/v78kad++fUpMTNTkyZP19NNPOz0vOTlZycnJ1bbh9ddf1yOPPKLx48dLkubMmaPly5frvffe05QpUxzOjYqKUlxcnL744gvdfffdLl8vLS1NaWlpMpvN1b4vAMBzci5c1tfZ55R1JE/f5xbo0OkiHc+75O1m4RqrjJkNr17BJzU1VaNGjVJSUlK1wcfPz08rVqzQj3/8Yz3wwANauHChsrOzlZiYqDFjxrgMPbVRUlKizMxMTZ061eG9kpKStGnTJklSTk6OQkJCFBYWpvz8fGVkZOg3v/lNtZ8pNTVVFy5cUPPmzevVLgBA3Z3Iu6SlO47r050ntPdUgctzAvxMahUaqMjQIEWENFFIYICaBfqracXvgQF+CvD3UxM/k/z9TQrwM8nfz6/i9/JfporXMlV8YT/i+JtMthMcjrl+bpVTUQcBft6bYlzn4LN48WJt375dW7durdX50dHRWrdunYYNG6Zx48Zp06ZNSkpK0uzZs+vcWJszZ87IbDY7DVtFRUVp7969kqTDhw/r0UcftU9qfvzxx9WvX796vycA4No6eu6iZq39Xp9kHbcPRZlMUp924RoU21K92oapW5tQxUY2U8uQQPn5kTJw9eoUfI4ePaqJEydqzZo1Cg4OrvXzYmJitHDhQg0fPlxdunTRvHnzHBK1JyQkJCgrK8uj7wEAqDuzxap3Mg5p1tr9Ki6zSJISOrfUz29qrztuaKuIkEAvtxBGVqfgk5mZqdzcXN100032Y2azWRkZGXrrrbdUXFxsn8dTVU5Ojh599FGNHj1aW7du1aRJk/Tmm2/Wu9GRkZHy9/dXTk6O0/u0bdu23q8LAPCs/EulemLxDq3fd1qSNKRLK/0+uZcGdIzwbsPQaNQp+IwcOVK7du1yODZ+/Hj16tVLv//9712GnjNnzmjkyJHq3bu3PvzwQ+3fv1+33nqrgoKC9Oqrr9ar0YGBgRo4cKDS09M1ZswYSeUTqdPT0zVhwoR6vSYAwLPyL5Zq3Lub9e2JCwoK8NOf7+qre+I7eHwEAKiqTsEnLCxMffv2dTjWrFkztWrVyum4VB5GkpOT1alTJy1ZskQBAQHq06eP1qxZo8TERLVv316TJk1yel5hYaEOHDhgf5ydna2srCy1bNlSMTExkqTJkycrJSVF8fHxSkhI0KxZs1RUVGRf5QUAuH5cLjXrgflb9O2JC2rVLFALxieoXwcWkqDhefSWFX5+fpoxY4aGDRumwMDKMdu4uDitXbtWrVu3dvm8bdu2acSIEfbHkydPliSlpKRowYIFkqR7771Xp0+f1vPPP69Tp05pwIABWrVqFfv0AMB1xmq16rllu7XzaJ4iQpro748MVq+24d5uFhopk5X7ODiwLWfPz89XeDj/MAHgan2SdVwTF2fJzyQt/PVgDe3GLvq49mp7/TbsvboAAN6Xd7FEf/p0jyTptyO7E3rgdQQfAIDHvPr5Pp0tKlGPqFD9z63dvN0cgOADAPCMo+cuasnWo5KkP/6/vl67NxNQFb0QAOARaesPqNRs1dBurTSkaytvNweQRPABAHjA2cJifbz9uCTpiaQeXm4NUIngAwC45j7MPKYSs0X92jfXoNiW3m4OYEfwAQBcUxaLVYu+PiJJ+q+bO3m5NYAjgg8A4JrKPHJeR85dVFhQgEbHRXu7OYADgg8A4Jpa/s1JSdJtN0SpaaDzPRwBbyL4AACuGYvFqhW7yoPPqH7tvNwawBnBBwBwzWw/cl65BcUKCw7QLd3ZpRnXH4IPAOCa2bj/tCTp1p5tFBTAMBeuPwQfAMA1k/H9GUnSMKo9uE4RfAAA10TexRLtOpYnieCD6xfBBwBwTfzn4FlZrFL3NqFq17ypt5sDuETwAQBcE1uyz0mShnaj2oPrF8EHAHBNbD9yXpJ0U6cWXm4J4B7BBwBw1S6XmrXnxAVJ0k0xEd5tDFANgg8A4KrtOp6vMotVrcOC1D6C+T24fhF8AABXbfvhimGumAiZTCYvtwZwj+ADALhqOyuWsd8Yw/weXN8IPgCAq7b3ZIEk6YbocC+3BKgewQcAcFUulZiVfbZIktSrLcEH1zeCDwDgquzPKZDVKkWGBqp1WJC3mwNUi+ADALgqe0+VL2On2gNfQPABAFyV7yrm9/RqG+bllgA1I/gAAK6KveLTjooPrn8EHwDAVTl4unxic/c2oV5uCVAzgg8AoN4Ki8t0uqBYktS5dTMvtwaoGcEHAFBvP5wpr/ZEhgYqPLiJl1sD1IzgAwCot+yK4BPbimoPfAPBBwBQb7bg0zmS4APfQPABANSbbaiL+T3wFQQfAEC9HbIFH4a64CMIPgCAevuh4h5dsQx1wUcQfAAA9VJYXKa8i6WSpJiWIV5uDVA7BB8AQL2cyLskSYoIaaJmQQFebg1QOwQfAEC9HD9fHnyimzf1ckuA2iP4AADq5XhFxSc6guAD30HwAQDUiy34dGhB8IHvIPgAAOrlhL3iE+zllgC1R/ABANSLbY5P+whWdMF3EHwAAPVCxQe+iOADAKizUrNFpy5cliS1Z3IzfAjBBwBQZ7kFxbJYpSb+JkWGBnm7OUCtEXwAAHWWW1HtaR0aJD8/k5dbA9QewQcAUGenC4olSa3Dmd8D30LwAQDUWW5F8GkTxjAXfAvBBwBQZ7bg05rgAx9D8AEA1NlpKj7wUQQfAECdnS6omNxM8IGPIfgAAOqsco4Pk5vhWwg+AIA6O80cH/gogg8AoE4sFitzfOCzCD4AgDo5f7FEZRarJLFrM3wOwQcAUCenC8urPS1CmigwgMsIfAs9FgBQJ2cLSyRR7YFvIvgAAOrkXFF58GnZLNDLLQHqjuADAKiT8xfLg0+LEIIPfA/BBwBQJ7aKTwsqPvBBBB8AQJ2ctw91NfFyS4C6I/gAAOrk3MVSSQx1wTcRfAAAdXKeyc3wYQQfAECd2Cc3E3zggwg+AIA6sVd8GOqCDyL4AADq5NxFhrrguwg+AIBau1Ri1uVSiySGuuCbCD4AgFqzVXsC/f3ULNDfy60B6o7gAwCotfP2zQubyGQyebk1QN0RfAAAtWbftZmJzfBRBB8AQK3ZlrJHhLBrM3wTwQcAUGsXLpdJkpo3JfjANxF8AAC1duFS+e0qCD7wVQQfAECtXbhcHnzCgwk+8E0EHwBArV24VD7UFU7FBz6K4AMAqLXKik+Al1sC1A/BBwBQa7Y5PlR84KsIPgCAWrOt6mKOD3wVwQcAUGsFVHzg4wg+AIBas8/xacocH/gmgg8AoFasVmvlqi6GuuCjCD4AgFopLrOoxGyRxFAXfBfBBwBQK/kV83v8/UxqFujv5dYA9UPwAQDUin0pe3CATCaTl1sD1A/BBwBQK5UTmxnmgu8i+AAAaoWJzTACgg8AoFZYyg4jIPgAAGrFNscnLIiKD3wXwQcAUCsFxeVDXaHcoBQ+jOADAKiVIlvwCSL4wHcRfAAAtVJUbJYkNQtiDx/4LoIPAKBWbBWfZlR84MMIPgCAWikqYagLvo/gAwColULbUFcgwQe+i+ADAKgVhrpgBAQfAECtsKoLRkDwAQDUSqG94sOqLvgugg8AoFao+MAICD4AgFqx7eMTQvCBDyP4AABqVFJmUYnZIkkKZVUXfBjBBwBQo4sVe/hIzPGBbyP4AABqZJvYHBTgpwB/Lh3wXfReAECNbPN7mNgMX0fwAQDUqJDNC2EQBB8AQI3YtRlGQfABANSocg8fJjbDtxF8AAA1YqgLRkHwAQDUyD7UxR4+8HEEHwBAjYpKyld1sYcPfB3BBwBQI9sGhiFUfODjCD4AgBpdKim/XUVIIBUf+DaCDwCgRpdKy4e6mjYh+MC3EXwAADW6VDHU1ZSKD3wcwQcAUCN7xYfgAx9H8AEA1OhSafkcH4a64OsIPgCAGtmHugg+8HEEHwBAjWxDXcEMdcHHEXwAADW6VLGBYQgVH/g4gg8AoEa24MPkZvg6gg8AoEbs4wOjIPgAAGrEcnYYBcEHAFAti8Wqyyxnh0EQfAAA1bpcZrZ/TcUHvo7gAwColm1isyQFBxB84NsIPgCAatn38GniJz8/k5dbA1wdwwafvLw8xcfHa8CAAerbt6/mzp3r7SYBgE+yL2Vnfg8MIMDbDfCUsLAwZWRkKCQkREVFRerbt6/Gjh2rVq1aebtpAOBTWMoOIzFsxcff318hISGSpOLiYlmtVlmtVi+3CgB8D5sXwkjqHHxmz56t/v37Kzw8XOHh4RoyZIhWrlx5TRuVkZGh0aNHKzo6WiaTScuWLXN5XlpammJjYxUcHKzBgwdry5YtDt/Py8tTXFycOnTooKeeekqRkZHXtJ0A0BhcZA8fGEidg0+HDh300ksvKTMzU9u2bVNiYqLuuusuffvtty7P/+qrr1RaWup0fM+ePcrJyXH5nKKiIsXFxSktLc1tO5YsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfXfk5ERIR27typ7OxsLVq0yO37AQDcu8wcHxhInYPP6NGjdeedd6p79+7q0aOHXnzxRYWGhmrz5s1O51osFqWmpmrcuHEymyuXQ+7bt0+JiYl6//33Xb5HcnKypk+frp/97Gdu2/H666/rkUce0fjx49WnTx/NmTNHISEheu+995zOjYqKUlxcnL744ou6flwAaPQqV3URfOD7rmqOj9ls1uLFi1VUVKQhQ4Y4v7ifn1asWKEdO3bogQcekMVi0cGDB5WYmKgxY8bo6aefrtf7lpSUKDMzU0lJSQ7vlZSUpE2bNkmScnJyVFBQIEnKz89XRkaGevbs6fY109LS1KdPHw0aNKhebQIAo7pouzM7Q10wgHqt6tq1a5eGDBmiy5cvKzQ0VEuXLlWfPn1cnhsdHa1169Zp2LBhGjdunDZt2qSkpCTNnj273o0+c+aMzGazoqKiHI5HRUVp7969kqTDhw/r0UcftU9qfvzxx9WvXz+3r5mamqrU1FRduHBBzZs3r3fbAMBoLrOqCwZSr+DTs2dPZWVlKT8/Xx999JFSUlK0ceNGt+EnJiZGCxcu1PDhw9WlSxfNmzdPJpNnN8FKSEhQVlaWR98DABoDVnXBSOo11BUYGKhu3bpp4MCBmjlzpuLi4vTGG2+4PT8nJ0ePPvqoRo8erYsXL2rSpEn1brAkRUZGyt/f32myck5Ojtq2bXtVrw0AcGRf1dXEsFu/oRG5Jvv4WCwWFRcXu/zemTNnNHLkSPXu3Vsff/yx0tPTtWTJEj355JP1fr/AwEANHDhQ6enpDm1IT093OdcIAFB/tqGuoCaG3foNjUid4/vUqVOVnJysmJgYFRQUaNGiRdqwYYNWr17tdK7FYlFycrI6deqkJUuWKCAgQH369NGaNWuUmJio9u3bu6z+FBYW6sCBA/bH2dnZysrKUsuWLRUTEyNJmjx5slJSUhQfH6+EhATNmjVLRUVFGj9+fF0/EgCgGsVlFkncoBTGUOfgk5ubqwceeEAnT55U8+bN1b9/f61evVq33Xab07l+fn6aMWOGhg0bpsDAQPvxuLg4rV27Vq1bt3b5Htu2bdOIESPsjydPnixJSklJ0YIFCyRJ9957r06fPq3nn39ep06d0oABA7Rq1SqnCc8AgKtTXFoefKj4wAhMVu7j4MC2qis/P1/h4eHebg4AeN2ERdv12TcnNW10H40f2tnbzQFcqu31m/gOAKjW5YqKDxsYwggIPgCAahWXVUxuDuCSAd9HLwYAVMs2uTmIyc0wAIIPAKBalcGHSwZ8H70YAFCtYvbxgYHQiwEA1SphqAsGQvABAFTLtnNzMBUfGAC9GABQLSY3w0gIPgCAajG5GUZCLwYAVMu+jw9DXTAAejEAwC2zxapSc/mdjRjqghEQfAAAbtmqPRJDXTAGejEAwC3bndklgg+MgV4MAHDLNrE5wM+kAH8uGfB99GIAgFvcoBRGQ08GALhlX8rehInNMAaCDwDALdscHyo+MAp6MgDArcsMdcFg6MkAALcqKz4MdcEYCD4AALdsk5u5QSmMgp4MAHCLG5TCaAg+AAC3uE8XjIaeDABw6zKrumAw9GQAgFvFpbZVXQx1wRgIPgAAtyrn+HC5gDHQkwEAbrFzM4yG4AMAcIt7dcFo6MkAALe4ZQWMhp4MAHCLOT4wGnoyAMCtUnN58Gniz+UCxkBPBgC4VVIRfAKp+MAg6MkAALdKyqj4wFjoyQAAt0qp+MBg6MkAALdsFZ9AKj4wCHoyAMCtUrNVktQkwOTllgDXBsEHAOBWZcWHnZthDAQfAIBbJfbl7FR8YAwEHwCAW0xuhtHQkwEAbjG5GUZDTwYAuEXFB0ZDTwYAuMUGhjAaejIAwK2SiuXsVHxgFPRkAIBbJWVmSVR8YBz0ZACAW7YNDIOo+MAg6MkAALdKzczxgbHQkwEALlksVpVZKm5ZwQaGMAiCDwDAJduuzRKTm2Ec9GQAgEtVgw9DXTAKejIAwKXSsioVH4IPDIKeDABwyVbxCfAzyc+POT4wBoIPAMCl0jI2L4Tx0JsBAC6VsJQdBkRvBgC4ZL8zOxUfGAi9GQDgkv3O7FR8YCD0ZgCAS7ahLio+MBJ6MwDAJdtydnZthpEQfAAALhVT8YEB0ZsBAC5VVny4VMA46M0AAJdKzbYblHKpgHHQmwEALpWYzZKkIIa6YCD0ZgCAS7adm6n4wEjozQAAl4rZxwcGRG8GALhkn9zMUBcMhN4MAHCphIoPDIjeDABwqdR+ry42MIRxEHwAAC5xry4YEb0ZAOCSbXJzAMEHBkJvBgC4VMYGhjAgejMAwKUyMzcphfEQfAAALpVayis+AX5cKmAc9GYAgEtl9jk+VHxgHAQfAIBLlXN8CD4wDoIPAMAl2waGDHXBSOjNAACXqPjAiAg+AACXyizs4wPjoTcDAFwqNdtWdVHxgXEQfAAALtkqPmxgCCOhNwMAXLJXfJjjAwMh+AAAXCpjVRcMiN4MAHCpzMKqLhgPwQcA4FLlUBeXChgHvRkA4JL9JqWs6oKBEHwAAC7Zhrqo+MBI6M0AAJdKuUkpDIjgAwBwyX7LClZ1wUDozQAAlypvWUHFB8ZB8AEAuFTKTUphQAQfAIBLbGAII6I3AwBcKrVwywoYD8EHAOCSfR8flrPDQOjNAAAnFotVFQUfBbCBIQyE4AMAcFJasaJLYgNDGAu9GQDgxLaHj8SqLhgLwQcA4KRq8GFVF4yE3gwAcFJ1qIuKD4yE4AMAcGKr+Pj7mWQyEXxgHAQfAICTUvtSdkIPjIXgAwBwUmbhBqUwJno0AMCJ/XYVVHxgMAQfAIAT2w1K2cMHRkOPBgA4KatY1dWEXZthMAQfAIATKj4wKno0AMBJKXN8YFAEHwCAE9s+PqzqgtHQowEATmw7N1PxgdEQfAAATsqY4wODokcDAJzY9vFhVReMhuADAHBSarFVfAg+MBaCDwDAib3iw1AXDIYeDQBwYp/jw1AXDIbgAwBwUrmqi8sEjIUeDQBwYt/Hhzk+MBiCDwDAiX3nZjYwhMHQowEATspY1QWDIvgAAJyYLUxuhjERfAAATti5GUZFjwYAODHbVnVR8YHBEHwAAE5sc3z8CT4wGIIPAMCJbY6Pv4ngA2Mh+AAAnNgrPqzqgsEQfAAATljVBaMi+AAAnNiHutjAEAZDjwYAOCmj4gODIvgAAJzYlrOzqgtGQ/ABADih4gOjIvgAAJyY2ccHBkXwAQA4YQNDGBXBBwDgxGxmqAvGRPABADgxW1nODmOiRwMAnLCBIYyK4AMAcMIcHxgVwQcA4MS2j08A9+qCwRB8AABOysxUfGBMBB8AgBPm+MCoCD4AACe2OT5+JoIPjIXgAwBwYq/4MMcHBkPwAQA4qbxlBZcJGAs9GgDghDk+MCqCDwDASVnFcnZWdcFoCD4AACdUfGBUBB8AgBN2boZREXwAAE4qKz5cJmAs9GgAgBP7Pj5cJWAwdGkAgBMqPjAqejQAwImZOT4wKIIPAMAJq7pgVAQfAIAT9vGBURF8AABOuFcXjIrgAwBwwj4+MCqCDwDAgcVilbU897CqC4ZDjwYAOLBVeyTJ30TFB8ZC8AEAODBXDT7M8YHBEHwAAA7M1srgw3J2GA3BBwDgwGyuUvEh+MBgCD4AAAe2PXwk5vjAeAg+AAAHtjk+fibJj4oPDIbgAwBwUMYNSmFg9GoAgANuUAojI/gAABywazOMjOADAHBAxQdGRvABADiw36CU4AMDIvgAABzYlrNT8YEREXwALysps6jnsyv19Ec7vd0UQBIVHxgbwQfXBWuVLfIbmy++P63iMov+ue2Y8i+Vers5QOXkZu7TBQMi+MDrPt15QgOnr9V/DpzxdlOuueIysw7kFlR7TtMm/vavX/j3t55uElAjM/v4wMDo1fC6GSu+07miEo1792vDVX5eWbVPSa9nKOW9Le5PqvJD9dIdx2WxGOvPAL6nzFy5czNgNAQfeF2vtmH2r6/niseH245q5GsbdPB0Ya2f8+6X2ZKkjftP2wON1WpVwWX3Q1rLso5fXUM94EBuoTbuP+2x179UYta8L7OVc+Gyx94DtUfFB0ZGr0adfXXgjD7efuyavNbu4/n64vvKIa73Nx2+bqs+T330jQ6eLtLI1zbW+jlDu7Wyfz3/Pz9Ikn7x9ib1e+Fzvb3xoCRp1trvHZ7zx0/3eL3qs/ybk4qdslzp3+VIkpJe36iU97Yobf2BGp976HSh1u/NtT8+U1isBV9lVxv2Ptp+TH/+bI8Gz0i/bv/+GxOzlX18YFwEH9TZfe9+rcn/3KnURduv+rV+8/dM+0RKm85TV1z1617pcqn5ql+jRUgT+9fr9+U6ff9sYbHD4ycW79BXB87aH//5sz26XGrW1h/OS5Jmrtyrgsul2pJ9zuF5+ZdK9ev3t151e6+G7e/21+9vcwhhr6zeV2MwSXxto8Yv2KqZK76TJM3NOKQXPt2jH//verfPPZF3yf714q1Hr7b5uErmiuXsAUxuhgERfFAr24+c1z+3HnW4CC7/5qS9JF5fR89dcnm8uupAXU3/bI96PbdK97/7tcvv17a60ic63P71+PlbVWa22B9/vP2YBk5fq1tfWW8/tizrhNNrTFqSpZtiIuyP+73wuVo2C5Qk3RAdrh/3aC1JWr/vtA7VYUjtWthx5LxunpGuf249qvDgAPvxhZsPO5w3oyLQVFVUXObw5yFJb2ccUkmZRbuO50uSzl8sdRrGW/3tKQ16ca1O5VcOcU39eJf25zhOCH/yw52avCSrXp+rsTpbWKzX1+zX0XMXXX6/un+7tjk+VHxgRAQf1MrkJVl6+l/f6M/L9zgc7/qM++pMbYYsOrRoav/6/ptj7F/3e+HzerTSNds8my8PnFH2mSKH7y3cfFhdnlmh/9tQ8xDOlbr9YaX96/SKoZ0fzl7U14fOunuKVu4+pe1H8hyOnSsqkST97vYeevWe/vbjiXUYUrsWPsk6oVMXLuvpf32j1mFB9uPTrph3NfeLbB07X3kxzbtYosEz0vXTN790Wo7f49mV6t4mtPK1PvlWuVXm8fzp0z06XVCspTscA9Htf8mwf30q/7I+yjymj3ccV+yU5Vf3IRuRxVuP6q/p32vY/65XYXGZw/dW7T6prs+s0B+W7nJ6Xubhc3p0YaYk9vGBMRF8UCs/nC2/0M3/6gen7/33wm1Ox4qKy9R56grFTlnusnqzclf5HJJj5ysrPk8k9dA7/zXQ/vhaXeQGdmph/3rEqxtUXFY57DW7Ys7K/67ap13H8qt9nc2HyoekekZVTsZ+M718fk7VAHfvO5u1+3i+QoMqqybP/bRPrdraJixYI3q2tj8e+tK6Wj2vvkrNFhVVXBQjQwPtxw+eLnL3FEnSIx9k2p935NxFFRaXae+pAj38/laHzy2Vz9uyuXC5TAkz0u3VhsAA9/8F/Whmur2NVdVmnlFJmaVB5kldT/OR1u/LVZ/nV9krhacLKode+05b7dDWjzLLg+bfvz5in2tm8/PZm+xfm0wEHxgPwccHbD50Vre8vE57T11o0Pe1Wq06dv6irFaropsHuz1v9bc5+uSKIYwDuZXDNP1e+Nzp4vXWFRevjx4bosjQIN1+Q1uH41cTftLWH1DslOVOF+Kez65SSVl5e/q2b24/PvqtLx2GXKoqLjPbL9b/PbyL/fhra/a7HEL76Ztf2n/KfuOXA/TQ0Fil/264wzmPDe/q8Nh2XZo/PkGRoeUVl+N5l7Rk65EaP6vNJ1nHNfb/vtI3x/Jqdf69b2/SkJnp2pJ9zuVFLq5jhMPjRQ8PVmRooL47eUE/ffNLp3Cx9YfzTtUFmztuiLJ/fcvL62SxWB0Co+Q4GfxE/mVNcjG89crq6kPqxZIy9Xh2pbo8s0I/nKk+wLmz61i+Br24Vv/c5n6+0Usr99rDfUNyF7beTP9eF0vMSnxto07kXVKLkECH71edO1f1z33myr1644oJ9jZXzj8DjIDg4wNmbzioY+cv6SezvtAHm35osPf9MPOYbnl5vTpPXaFSFz89r538Y/vXExdnqcezK53Osen+h5UOwyM9qyxhv9L+6ckOj5/6sH63cnhl9T5JcrkMu8ezK1VSZlFQlc0DJenmmelOIU6SPShJ0k0xLbTrhdvtj788cEZvbzwkSQpyUcGI6xAhk8mkrq1D9bdfD7Yf79e+ubpENrM/rjrJe8szI+1f//5fu2p9cZ294aC2H8nT/3vrK728am+N528/kqcLl8v0i7c3ac6G8p/8E2Jb2r8/ZkC0Q0Dr1S5cc+4vr8plnylSl2dWqNRc2e6qFZzPHr/F4b1iI5tpSnIvSdLJ/Mvq8swKhz9XSfpR10gdmnGn/fHSHcd195z/OLV79Ftfavk3J11+puNVqoi3vrpBX35f940xF205rNMFxXr6o2+00M2/uTlVKiWxU5Y3SPXnnjn/UeepK/TuF4ecvpdbpcLzo5fW6fM9p5zOiZ2yXGaLVVdm3L+s3W8PP8O6R9qPV+0LgFEQfBpQSZlF357Ir/N/kFUDw/OffNtgP2F+d7KywmQrm//5rhvsxzpHhirz2ST745Iyi2KnLHea5Gpzy8vrtXhLefXiyrkD3asMHwUG+OnrKhf+DzOPKXbKcqeLZF29NLafw+Mez67UpzvLJyD/qGtlpWHi4qxq/4zbRQQrLLiJ1j95q9P37hvcSb+7rYfDMUuVv+9bukdq5cRhmpLcSyN7t9G6J2/Vnf3aqne7cCX1rqyI+PmZtPfPP3F4ndgpy3XmipVjVwoJrAxyszccrPbv40oFFZWaTq1C9LdfD9aInq01ql87TUnupbkPxGteSrxaNgtUfGxL/e/PK+ci/Xx2eTBpH9FUb/3qRvvxiJAm2vXC7WofUV5diGkZoseGd3Wo/HxdUVH4y71x2vn87Uod0U1+fiaHv/+cC+Wf2c8kvf6LOPvx1EXbXQaOKy/qDy3YqsVbjtTp393l0so/s+cq/s1VHSKVpF/Ed3B43HnqCo9XSGwrAqcv/86+as6ma+tQh8ffnij/93vf4BjH855ZoYyKHwZuqDJh/y9r9zv0+z/ddYOW/PfN167xwHWC4NOAbvrzGo3665fqPHWFVu5y/dOqK9ERTZ2OxU5Z7vHqT9WLqE1cxwhlz7xT2TPvlL+fSa1Cg7T8t44/2Xf7w0r9q2Kfn/YRTRVWZahpysfl1YtNFROAn/5JT2XPvFPNmzZxeI2o8GDtm+544e/x7Er7T6y1UXX5uSSFN22iH14apd8mdnM697Y+UZo+pq/Dsdgpy6sdZuoc2UxZz9/mcOw/B8/o8ZHdteiRysrOlX9/vduF67HhXRVcUW36v/sGauXEYU4raIKb+Gvn87c7HIufvlaLvj7idnl+syuG9aTyvw9382KCmzj/F/Bh5jHd0j1S88cnqE14+RDnbX2iNLJKMPvFoI4OQ35S+bDc7Te0Vdq4mzRzbD91aBGisOAm2vDUrVr6Pz/SLweVX4Df/q94jezVxuG554pK1bzK31dUeLB2//EOh3MsVmnsTR2chgw7T12he9/eJHdKzBZN+XiXOk9doUVf127Y0FXf7/nsKv16wVZ7gPJzMTT4i7c3KXbKch0+W78htrp4O+OQYqcsdwpAgzs7VmksVqt+eGmUffWgVDmHa3iP1pr/4CCH8237aoUHN2GODwyJ4NOAqs59+M3fy39ajZ2yXN/nVH8vJxvbMIGNrfoTO2W59p2q3WvUR9XSd5fWoTKZTA7/Id4Q3VzZM+90eM4HFRNaL5eateuPdziFI9sy9rOFJW7/cw0K8Ff2zDud5pl0faZ8XsVb61zPS7jSm7+6Uf91cyfd1qf8wj359p768xUhJyjAX/ff3EnbqlSwpMphpoHT17p87YiQQIehmZkVVaUfdY1U9sw7deDFZHvAqY/mIU2UPfNOhzkZzyzdpV7PrdLgGWv1/Ce7XT6vamVEKh/2s/WVI2edlzd/8fQI+9epI7o6fd+Vqcm97cNeVY3q306/SqisMjTx99ONMS0cgt28Bwc5VI16uxj6DA0KcOpXUnll48rh0K+zz9k/33cny/8tRIQ00cEZdzpU4J5Zust+Xm3m/0xK6qGf31RZ2Unfm2uf12PbwfvJ23vol4M6Ojxv+CsbFDtludbuyXE756k+bEG1aoixBSDbkO498R31nymJ9u/f0q18svz2525Tx5aOIdzPZNKIXm20c5pjwJacK2eAUZis19OyhOvAhQsX1Lx5c+Xn5ys8PLzmJ9TB/e9+rS9ruBHn1OReio5oqpu7tLIvKf6veV/ri+/P6C/3xulnN3bQ0h3HNGlJ9fNeOkc205u/ulFdWjdTSKBzFaA2Xlm9V2nrD2r80FhNG31DzU9Q+Wqtx/+xwz5fpWvrZkr/3a2SyidlXrk5YadWIdr41IgrX8bJhn25enB+zZv6JcS21G9Hdld8bAsNmZmu8xdLtXbyj9WtjfOF1WKx6k+f7dHxvEt6+ef9HS4mjy3M1KpvnedISNK+6T9RUED9w0x95V8q1UMLtupE3iWddDMJ28bWV/adKtAdszKqPVeSvvz9CHVoEVKvdhWXmTXtk2/18LDOLv+cq2O1WnW51KKmLiosVV0uNSvAz6QAf8ef1dLWH7DP5XLlh5dGSZL+seWIpn7svHS7qnbNg/VuSrx6tw3Xc5/s1t+/PqJJST00Mam7Dp4udLtj95O399CExO4qLjOr57OrnL4f4Gdy2qQz/XfD1SWyWZ0rKr2eW6nLpRZ98fQInci7pHvf2ex0zrOjeuvhYeXVOKvV6vQepWaL/rb5sNbtzdUzd/ZW73aV/88dPXdR/7fhgP6VeVzrn7rVPkwJ+ILaXr8JPldoiODzxi8HaH9OgdLWH6z5SVXYLmY2Czcf1nPLXP/Efy3VJfjYZB4+pz999p3+59auuuOKlVpWq1Xj5n6tTYfOasdzt6lFs0A3r+LsbGGxpi//zmnfl+p8PunH6hFVtwuyzcJNP+i5Txz3sTk04075eXF/kzKzRct3ndTExVluz7myr6z+9pT+u2JvFlfWP3mrOleZaO1rVuw6qf/5u/NO4rbgY/P+f35w2peoOo8ndtPvbu9pf3zs/EXd8vJ6h3Pe+OUA3TWgvf2x2WLV/e9+rV3H89WyWaCOuNlA8GpUDaozV35nn1wvSf+eMFT9O0Rc8/cErncEn3pqqOBT9T/KDzb9oOc/qfk/448eG6J4N6ssavrJ92rUJ/g0hN3H8/XTN7+s8bytf0hy2JCvPiwWqzZnn1Xr0CCHidjedq6oRDf9eY3DsQ4tmuofj9ysji1dV3BchYTvX0xWE39jjHxnnynSiFc3KG3cTRrVv53b8z7KPKYna1gx+NQdPZU6wnlOmCTlXLisE3mX1L9DRLU7HB85e1H3zdvsdpfy+sh6/jZFhDj/wOCqwgM0FgSfevJk8On/wmpduFzmFHxcyT5TpM92ntBra/ZLkj58bIgG1XFpaVFxmXYcydNf07/XmcJiHarnniYbn7pVnVr5TjWgpMyi3SfyNe+LbP3m1q4Oe/UA1blUYtY/tx3V0h3HFd+phabe2dsjt23Iu1iiv20+rI93HNehKzaLjAhpohs7Rmj9PudtGKTyVY9XznECQPCpN08Fn51H83RX2leSpLkPxNsn2gIAgKtX2+u3MWrbPqDqrRkSOrMpGAAA3mDY4JOXl6f4+HgNGDBAffv21dy5c73aHtu+IP3aN3faswYAADSM+q1z9gFhYWHKyMhQSEiIioqK1LdvX40dO1atWrWq+ckeUFKxe24TfyYeAgDgLYat+Pj7+yskpHxVS3FxsaxWq1fvpFxqDz6G/SMHAOC6V+er8MyZMzVo0CCFhYWpTZs2GjNmjPbtu7bLqDMyMjR69GhFR0fLZDJp2bJlLs9LS0tTbGysgoODNXjwYG3ZssXh+3l5eYqLi1OHDh301FNPKTIy0uXrNISCy9du91YAAFA/dQ4+GzduVGpqqjZv3qw1a9aotLRUt99+u4qKXC+V/uqrr1RaWup0fM+ePcrJyXH5nKKiIsXFxSktLc1tO5YsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfXfk5ERIR27typ7OxsLVq0yO37NYRnKzYa7NSqfrvjAgCAq1fn4LNq1So9+OCDuuGGGxQXF6cFCxboyJEjysx03hXWYrEoNTVV48aNk9lceVPFffv2KTExUe+//77L90hOTtb06dP1s5/9zG07Xn/9dT3yyCMaP368+vTpozlz5igkJETvvfee07lRUVGKi4vTF1984fb10tLS1KdPHw0aNMjtOfV18HSh/caaY2rYvwcAAHjOVU84yc/PlyS1bOm8RNvPz08rVqzQjh079MADD8hisejgwYNKTEzUmDFj9PTTT9frPUtKSpSZmamkpMobSvr5+SkpKUmbNpXfpTknJ0cFBQX2NmZkZKhnz54uX0+SUlNTtWfPHm3dWvP9oOqqa+tQfZI6VM/c2Us/6ua94TYAABq7q1rVZbFY9MQTT2jo0KHq27evy3Oio6O1bt06DRs2TOPGjdOmTZuUlJSk2bNn1/t9z5w5I7PZrKgox00Ao6KitHfvXknS4cOH9eijj9onNT/++OPq169fvd/zasV1jHC6yzgAAGhYVxV8UlNTtXv3bn35ZfX3S4qJidHChQs1fPhwdenSRfPmzfP4/WQSEhKUlZXl0fcAAAC+pd5DXRMmTNBnn32m9evXq0OHDtWem5OTo0cffVSjR4/WxYsXNWnSpPq+rSQpMjJS/v7+TpOVc3Jy1LZtWzfPAgAAjV2dg4/VatWECRO0dOlSrVu3Tp07d672/DNnzmjkyJHq3bu3Pv74Y6Wnp2vJkiV68skn693owMBADRw4UOnp6fZjFotF6enpGjJkSL1fFwAAGFudh7pSU1O1aNEiffLJJwoLC9OpU6ckSc2bN1fTpk0dzrVYLEpOTlanTp20ZMkSBQQEqE+fPlqzZo0SExPVvn17l9WfwsJCHThwwP44OztbWVlZatmypWJiYiRJkydPVkpKiuLj45WQkKBZs2apqKhI48ePr+tHAgAAjUSd787ubm7O/Pnz9eCDDzodX7NmjYYNG6bg4GCH4zt27FDr1q1dDpNt2LBBI0aMcDqekpKiBQsW2B+/9dZbeuWVV3Tq1CkNGDBAf/3rXzV48OC6fBwnnro7OwAA8JzaXr/rHHyMjuADAIDvqe31mxtHAQCARoPgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0ruompUZk29bowoULXm4JAACoLdt1u6btCQk+VygoKJAkdezY0cstAQAAdVVQUKDmzZu7/T47N1/BYrHoxIkTCgsLc3t7jvq4cOGCOnbsqKNHjzaKHaH5vMbW2D6v1Pg+M5/X2Iz4ea1WqwoKChQdHS0/P/czeaj4XMHPz8/l/cOulfDwcMN0strg8xpbY/u8UuP7zHxeYzPa562u0mPD5GYAANBoEHwAAECjQfBpIEFBQZo2bZqCgoK83ZQGwec1tsb2eaXG95n5vMbW2D5vVUxuBgAAjQYVHwAA0GgQfAAAQKNB8AEAAI0GwQcAADQaBJ8GkpaWptjYWAUHB2vw4MHasmWLt5vkERkZGRo9erSio6NlMpm0bNkybzfJo2bOnKlBgwYpLCxMbdq00ZgxY7Rv3z5vN8tjZs+erf79+9s3PRsyZIhWrlzp7WY1mJdeekkmk0lPPPGEt5viES+88IJMJpPDr169enm7WR51/Phx3X///WrVqpWaNm2qfv36adu2bd5ulsfExsY6/R2bTCalpqZ6u2kNhuDTAJYsWaLJkydr2rRp2r59u+Li4nTHHXcoNzfX20275oqKihQXF6e0tDRvN6VBbNy4Uampqdq8ebPWrFmj0tJS3X777SoqKvJ20zyiQ4cOeumll5SZmalt27YpMTFRd911l7799ltvN83jtm7dqrffflv9+/f3dlM86oYbbtDJkyftv7788ktvN8ljzp8/r6FDh6pJkyZauXKl9uzZo9dee00tWrTwdtM8ZuvWrQ5/v2vWrJEk3XPPPV5uWQOywuMSEhKsqamp9sdms9kaHR1tnTlzphdb5XmSrEuXLvV2MxpUbm6uVZJ148aN3m5Kg2nRooX13Xff9XYzPKqgoMDavXt365o1a6zDhw+3Tpw40dtN8ohp06ZZ4+LivN2MBvP73//eesstt3i7GV41ceJEa9euXa0Wi8XbTWkwVHw8rKSkRJmZmUpKSrIf8/PzU1JSkjZt2uTFlsET8vPzJUktW7b0cks8z2w2a/HixSoqKtKQIUO83RyPSk1N1ahRoxz+HRvV999/r+joaHXp0kX33Xefjhw54u0mecy///1vxcfH65577lGbNm104403au7cud5uVoMpKSnR3/72Nz300EPX9Kbc1zuCj4edOXNGZrNZUVFRDsejoqJ06tQpL7UKnmCxWPTEE09o6NCh6tu3r7eb4zG7du1SaGiogoKC9Nhjj2np0qXq06ePt5vlMYsXL9b27ds1c+ZMbzfF4wYPHqwFCxZo1apVmj17trKzszVs2DAVFBR4u2kecejQIc2ePVvdu3fX6tWr9Zvf/Ea//e1v9f7773u7aQ1i2bJlysvL04MPPujtpjQo7s4OXCOpqanavXu3oedESFLPnj2VlZWl/Px8ffTRR0pJSdHGjRsNGX6OHj2qiRMnas2aNQoODvZ2czwuOTnZ/nX//v01ePBgderUSf/85z/161//2ost8wyLxaL4+HjNmDFDknTjjTdq9+7dmjNnjlJSUrzcOs+bN2+ekpOTFR0d7e2mNCgqPh4WGRkpf39/5eTkOBzPyclR27ZtvdQqXGsTJkzQZ599pvXr16tDhw7ebo5HBQYGqlu3bho4cKBmzpypuLg4vfHGG95ulkdkZmYqNzdXN910kwICAhQQEKCNGzfqr3/9qwICAmQ2m73dRI+KiIhQjx49dODAAW83xSPatWvnFNh79+5t6OE9m8OHD2vt2rV6+OGHvd2UBkfw8bDAwEANHDhQ6enp9mMWi0Xp6emGnxfRGFitVk2YMEFLly7VunXr1LlzZ283qcFZLBYVFxd7uxkeMXLkSO3atUtZWVn2X/Hx8brvvvuUlZUlf39/bzfRowoLC3Xw4EG1a9fO203xiKFDhzptP7F//3516tTJSy1qOPPnz1ebNm00atQobzelwTHU1QAmT56slJQUxcfHKyEhQbNmzVJRUZHGjx/v7aZdc4WFhQ4/HWZnZysrK0stW7ZUTEyMF1vmGampqVq0aJE++eQThYWF2edtNW/eXE2bNvVy6669qVOnKjk5WTExMSooKNCiRYu0YcMGrV692ttN84iwsDCn+VrNmjVTq1atDDmP68knn9To0aPVqVMnnThxQtOmTZO/v79+9atfebtpHjFp0iT96Ec/0owZM/SLX/xCW7Zs0TvvvKN33nnH203zKIvFovnz5yslJUUBAY0wBnh7WVlj8eabb1pjYmKsgYGB1oSEBOvmzZu93SSPWL9+vVWS06+UlBRvN80jXH1WSdb58+d7u2ke8dBDD1k7depkDQwMtLZu3do6cuRI6+eff+7tZjUoIy9nv/fee63t2rWzBgYGWtu3b2+99957rQcOHPB2szzq008/tfbt29caFBRk7dWrl/Wdd97xdpM8bvXq1VZJ1n379nm7KV5hslqtVu9ELgAAgIbFHB8AANBoEHwAAECjQfABAACNBsEHAAA0GgQfAADQaBB8AABAo0HwAQAAjQbBBwAAeFxGRoZGjx6t6OhomUwmLVu2rM6vYbVa9eqrr6pHjx4KCgpS+/bt9eKLL9bpNRrhXtUAAKChFRUVKS4uTg899JDGjh1br9eYOHGiPv/8c7366qvq16+fzp07p3PnztXpNdi5GQAANCiTyaSlS5dqzJgx9mPFxcX6wx/+oH/84x/Ky8tT37599fLLL+vWW2+VJH333Xfq37+/du/erZ49e9b7vRnqAgAAXjdhwgRt2rRJixcv1jfffKN77rlHP/nJT/T9999Lkj799FN16dJFn332mTp37qzY2Fg9/PDDda74EHwAAIBXHTlyRPPnz9eHH36oYcOGqWvXrnryySd1yy23aP78+ZKkQ4cO6fDhw/rwww/1wQcfaMGCBcrMzNTdd99dp/dijg8AAPCqXbt2yWw2q0ePHg7Hi4uL1apVK0mSxWJRcXGxPvjgA/t58+bN08CBA7Vv375aD38RfAAAgFcVFhbK399fmZmZ8vf3d/heaGioJKldu3YKCAhwCEe9e/eWVF4xIvgAAACfcOONN8psNis3N1fDhg1zec7QoUNVVlamgwcPqmvXrpKk/fv3S5I6depU6/diVRcAAPC4wsJCHThwQFJ50Hn99dc1YsQItWzZUjExMbr//vv11Vdf6bXXXtONN96o06dPKz09Xf3799eoUaNksVg0aNAghYaGatasWbJYLEpNTVV4eLg+//zzWreD4AMAADxuw4YNGjFihNPxlJQULViwQKWlpZo+fbo++OADHT9+XJGRkbr55pv1xz/+Uf369ZMknThxQo8//rg+//xzNWvWTMnJyXrttdfUsmXLWreD4AMAABoNlrMDAIBGg+ADAAAaDYIPAABoNAg+AACg0SD4AACARoPgAwAAGg2CDwAAaDQIPgAAoNEg+AAAgEaD4AMAABoNgg8AAGg0CD4AAKDR+P8cUnyKMvhSngAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Increase the mean in the long term and see how long it takes the decaying avg to fully adjust\n", + "seq2 = np.concatenate((seq, np.repeat(4000, N // 2)))\n", + "N2 = len(seq2)\n", + "y = []\n", + "avg = 2000\n", + "for i in range(N2):\n", + " avg = avg * D + seq2[i] * (1 - D) \n", + " y.append(avg)\n", + "\n", + "x = np.arange(0, N2)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.yscale('log')\n", + "plt.show()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -474,9 +623,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python [conda env:gr]", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "conda-env-gr-py" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -488,9 +637,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.5" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index e759a9e50..ef4d3fb9e 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,6 +2,7 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, @@ -114,7 +115,7 @@ impl Mempool { /// It is exposed by [MiningManager] for use by transaction generators and wallets. pub(crate) fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { // Unspendable outputs are considered dust. - if is_unspendable::(transaction_output.script_public_key.script()) { + if is_unspendable::(transaction_output.script_public_key.script()) { return true; } @@ -175,7 +176,6 @@ impl Mempool { if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } - for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since // they have already been checked prior to calling this @@ -188,9 +188,11 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - get_sig_op_count::(&input.signature_script, &entry.script_public_key); - let num_sig_ops = 1; - if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS { + let num_sig_ops = get_sig_op_count::( + &input.signature_script, + &entry.script_public_key, + ); + if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS as u64 { return Err(NonStandardError::RejectSignatureCount(transaction_id, i, num_sig_ops, MAX_STANDARD_P2SH_SIG_OPS)); } } diff --git a/mining/src/mempool/model/frontier.rs b/mining/src/mempool/model/frontier.rs index 8d2195327..70ac215ba 100644 --- a/mining/src/mempool/model/frontier.rs +++ b/mining/src/mempool/model/frontier.rs @@ -25,20 +25,30 @@ const COLLISION_FACTOR: u64 = 4; /// hard limit in order to allow the SequenceSelector to compensate for consensus rejections. const MASS_LIMIT_FACTOR: f64 = 1.2; -/// A rough estimation for the average transaction mass. The usage is a non-important edge case -/// hence we just throw this here (as oppose to performing an accurate estimation) -const TYPICAL_TX_MASS: f64 = 2000.0; +/// Initial estimation of the average transaction mass. +const INITIAL_AVG_MASS: f64 = 2036.0; + +/// Decay factor of average mass weighting. +const AVG_MASS_DECAY_FACTOR: f64 = 0.99999; /// Management of the transaction pool frontier, that is, the set of transactions in /// the transaction pool which have no mempool ancestors and are essentially ready /// to enter the next block template. -#[derive(Default)] pub struct Frontier { /// Frontier transactions sorted by feerate order and searchable for weight sampling search_tree: SearchTree, /// Total masses: Σ_{tx in frontier} tx.mass total_mass: u64, + + /// Tracks the average transaction mass throughout the mempool's lifespan using a decayed weighting mechanism + average_transaction_mass: f64, +} + +impl Default for Frontier { + fn default() -> Self { + Self { search_tree: Default::default(), total_mass: Default::default(), average_transaction_mass: INITIAL_AVG_MASS } + } } impl Frontier { @@ -62,6 +72,11 @@ impl Frontier { let mass = key.mass; if self.search_tree.insert(key) { self.total_mass += mass; + // A decaying average formula. Denote ɛ = 1 - AVG_MASS_DECAY_FACTOR. A transaction inserted N slots ago has + // ɛ * (1 - ɛ)^N weight within the updated average. This gives some weight to the full mempool history while + // giving higher importance to more recent samples. + self.average_transaction_mass = + self.average_transaction_mass * AVG_MASS_DECAY_FACTOR + mass as f64 * (1.0 - AVG_MASS_DECAY_FACTOR); true } else { false @@ -210,10 +225,7 @@ impl Frontier { /// Builds a feerate estimator based on internal state of the ready transactions frontier pub fn build_feerate_estimator(&self, args: FeerateEstimatorArgs) -> FeerateEstimator { - let average_transaction_mass = match self.len() { - 0 => TYPICAL_TX_MASS, - n => self.total_mass() as f64 / n as f64, - }; + let average_transaction_mass = self.average_transaction_mass; let bps = args.network_blocks_per_second as f64; let mut mass_per_block = args.maximum_mass_per_block as f64; let mut inclusion_interval = average_transaction_mass / (mass_per_block * bps); @@ -368,8 +380,12 @@ mod tests { assert_eq!(frontier.total_mass(), frontier.search_tree.ascending_iter().map(|k| k.mass).sum::()); } + /// Epsilon used for various test comparisons + const EPS: f64 = 0.000001; + #[test] fn test_feerate_estimator() { + const MIN_FEERATE: f64 = 1.0; let mut rng = thread_rng(); let cap = 2000; let mut map = HashMap::with_capacity(cap); @@ -394,13 +410,13 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(1.0); + let estimations = estimator.calc_estimations(MIN_FEERATE); let buckets = estimations.ordered_buckets(); // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= 1.0, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -441,7 +457,7 @@ mod tests { // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -492,7 +508,7 @@ mod tests { // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { assert!( - b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + b.feerate.is_normal() && b.feerate >= MIN_FEERATE - EPS, "bucket feerate must be a finite number greater or equal to the minimum standard feerate" ); assert!( @@ -506,6 +522,7 @@ mod tests { #[test] fn test_feerate_estimator_with_less_than_block_capacity() { + const MIN_FEERATE: f64 = 1.0; let mut map = HashMap::new(); for i in 0..304 { let mass: u64 = 1650; @@ -524,13 +541,16 @@ mod tests { let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; // We are testing that the build function actually returns and is not looping indefinitely let estimator = frontier.build_feerate_estimator(args); - let estimations = estimator.calc_estimations(1.0); + let estimations = estimator.calc_estimations(MIN_FEERATE); let buckets = estimations.ordered_buckets(); // Test for the absence of NaN, infinite or zero values in buckets for b in buckets.iter() { // Expect min feerate bcs blocks are not full - assert!(b.feerate == 1.0, "bucket feerate is expected to be equal to the minimum standard feerate"); + assert!( + (b.feerate - MIN_FEERATE).abs() <= EPS, + "bucket feerate is expected to be equal to the minimum standard feerate" + ); assert!( b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0 && b.estimated_seconds <= 1.0, "bucket estimated seconds must be a finite number greater than zero & less than 1.0" diff --git a/mining/src/mempool/model/frontier/search_tree.rs b/mining/src/mempool/model/frontier/search_tree.rs index edf34c271..136269a79 100644 --- a/mining/src/mempool/model/frontier/search_tree.rs +++ b/mining/src/mempool/model/frontier/search_tree.rs @@ -111,7 +111,7 @@ impl<'a> PrefixWeightVisitor<'a> { } } -impl<'a> DescendVisit for PrefixWeightVisitor<'a> { +impl DescendVisit for PrefixWeightVisitor<'_> { type Result = f64; fn visit_inner(&mut self, keys: &[FeerateKey], arguments: &[FeerateWeight]) -> DescendVisitResult { diff --git a/mining/src/model/topological_sort.rs b/mining/src/model/topological_sort.rs index aa88cce02..fb276ac00 100644 --- a/mining/src/model/topological_sort.rs +++ b/mining/src/model/topological_sort.rs @@ -166,8 +166,8 @@ impl<'a, T: AsRef> Iterator for TopologicalIter<'a, T> { } } -impl<'a, T: AsRef> FusedIterator for TopologicalIter<'a, T> {} -impl<'a, T: AsRef> ExactSizeIterator for TopologicalIter<'a, T> { +impl> FusedIterator for TopologicalIter<'_, T> {} +impl> ExactSizeIterator for TopologicalIter<'_, T> { fn len(&self) -> usize { self.transactions.len() } diff --git a/protocol/flows/src/flowcontext/orphans.rs b/protocol/flows/src/flowcontext/orphans.rs index f18649e55..41bf940a1 100644 --- a/protocol/flows/src/flowcontext/orphans.rs +++ b/protocol/flows/src/flowcontext/orphans.rs @@ -6,7 +6,6 @@ use kaspa_consensus_core::{ use kaspa_consensusmanager::{BlockProcessingBatch, ConsensusProxy}; use kaspa_core::debug; use kaspa_hashes::Hash; -use kaspa_utils::option::OptionExtensions; use rand::Rng; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -166,7 +165,7 @@ impl OrphanBlocksPool { } } else { let status = consensus.async_get_block_status(current).await; - if status.is_none_or_ex(|s| s.is_header_only()) { + if status.is_none_or(|s| s.is_header_only()) { // Block is not in the orphan pool nor does its body exist consensus-wise, so it is a root roots.push(current); } @@ -193,8 +192,7 @@ impl OrphanBlocksPool { if let Occupied(entry) = self.orphans.entry(orphan_hash) { let mut processable = true; for p in entry.get().block.header.direct_parents().iter().copied() { - if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or_ex(|s| s.is_header_only()) - { + if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or(|s| s.is_header_only()) { processable = false; break; } @@ -250,7 +248,7 @@ impl OrphanBlocksPool { let mut processable = true; for parent in block.block.header.direct_parents().iter().copied() { if self.orphans.contains_key(&parent) - || consensus.async_get_block_status(parent).await.is_none_or_ex(|status| status.is_header_only()) + || consensus.async_get_block_status(parent).await.is_none_or(|status| status.is_header_only()) { processable = false; break; diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 110b378b7..5fe1bb593 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -47,8 +47,7 @@ impl TransactionsSpread { // Keep the launching times aligned to exact intervals. Note that `delta=10.1` seconds will result in // adding 10 seconds to last scan time, while `delta=11` will result in adding 20 (assuming scanning // interval is 10 seconds). - self.last_scanning_time += - Duration::from_secs(((delta.as_secs() + SCANNING_TASK_INTERVAL - 1) / SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); + self.last_scanning_time += Duration::from_secs(delta.as_secs().div_ceil(SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); self.scanning_job_count += 1; self.scanning_task_running = true; diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 1e38deddd..0dd7fe64f 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -10,7 +10,7 @@ use kaspa_consensus_core::{ api::BlockValidationFuture, block::Block, header::Header, - pruning::{PruningPointProof, PruningPointsList}, + pruning::{PruningPointProof, PruningPointsList, PruningProofMetadata}, BlockHashSet, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy, StagingConsensus}; @@ -218,7 +218,7 @@ impl IbdFlow { let staging_session = staging.session().await; - let pruning_point = self.sync_and_validate_pruning_proof(&staging_session).await?; + let pruning_point = self.sync_and_validate_pruning_proof(&staging_session, relay_block).await?; self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?; staging_session.async_validate_pruning_points().await?; self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?; @@ -226,7 +226,7 @@ impl IbdFlow { Ok(()) } - async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy) -> Result { + async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy, relay_block: &Block) -> Result { self.router.enqueue(make_message!(Payload::RequestPruningPointProof, RequestPruningPointProofMessage {})).await?; // Pruning proof generation and communication might take several minutes, so we allow a long 10 minute timeout @@ -234,11 +234,14 @@ impl IbdFlow { let proof: PruningPointProof = msg.try_into()?; debug!("received proof with overall {} headers", proof.iter().map(|l| l.len()).sum::()); + let proof_metadata = PruningProofMetadata::new(relay_block.header.blue_work); + // Get a new session for current consensus (non staging) let consensus = self.ctx.consensus().session().await; // The proof is validated in the context of current consensus - let proof = consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof).map(|()| proof)).await?; + let proof = + consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof, &proof_metadata).map(|()| proof)).await?; let proof_pruning_point = proof[0].last().expect("was just ensured by validation").hash; @@ -316,7 +319,7 @@ impl IbdFlow { if mismatch_detected { info!("Validating the locally built proof (sanity test fallback #2)"); // Note: the proof is validated in the context of *current* consensus - if let Err(err) = con.validate_pruning_proof(&built_proof) { + if let Err(err) = con.validate_pruning_proof(&built_proof, &proof_metadata) { panic!("Locally built proof failed validation: {}", err); } info!("Locally built proof was validated successfully"); diff --git a/protocol/p2p/build.rs b/protocol/p2p/build.rs index b41fe87f5..b4aea6939 100644 --- a/protocol/p2p/build.rs +++ b/protocol/p2p/build.rs @@ -5,7 +5,7 @@ fn main() { tonic_build::configure() .build_server(true) .build_client(true) - .compile(&proto_files[0..1], dirs) + .compile_protos(&proto_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compilation failed, error: {e}")); // recompile protobufs only if any of the proto files changes. for file in proto_files { diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index a8ec431e4..54d387043 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -9,7 +9,7 @@ use kaspa_core::{debug, info}; use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use std::net::ToSocketAddrs; use std::pin::Pin; @@ -20,7 +20,6 @@ use tokio::sync::mpsc::{channel as mpsc_channel, Sender as MpscSender}; use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneshotSender}; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; -use tonic::codegen::Body; use tonic::transport::{Error as TonicError, Server as TonicServer}; use tonic::{Request, Response, Status as TonicStatus, Streaming}; @@ -80,7 +79,7 @@ impl ConnectionHandler { // TODO: check whether we should set tcp_keepalive let serve_result = TonicServer::builder() - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(proto_server) .serve_with_shutdown(serve_address.into(), termination_receiver.map(drop)) @@ -110,9 +109,7 @@ impl ConnectionHandler { let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_rx.clone()))) - .layer(measure_request_body_size_layer(self.counters.bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_tx.clone()).boxed_unsync())) .service(channel); let mut client = ProtoP2pClient::new(channel) diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 35d08493b..9baeaa04e 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -254,7 +254,7 @@ async fn main() { (stats.utxos_amount / stats.num_utxos as u64), stats.num_utxos / stats.num_txs, stats.num_outs / stats.num_txs, - if utxos_len > pending_len { utxos_len - pending_len } else { 0 }, + utxos_len.saturating_sub(pending_len), ); stats.since = now; stats.num_txs = 0; diff --git a/rpc/core/src/model/header.rs b/rpc/core/src/model/header.rs index dddf767b7..fda6b70e1 100644 --- a/rpc/core/src/model/header.rs +++ b/rpc/core/src/model/header.rs @@ -8,7 +8,6 @@ use workflow_serializer::prelude::*; /// Used for mining APIs (get_block_template & submit_block) #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] - pub struct RpcRawHeader { pub version: u16, pub parents_by_level: Vec>, diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index 00dadee23..b7e53bb5e 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -38,7 +38,7 @@ use kaspa_rpc_core::{ use kaspa_utils::{channel::Channel, triggers::DuplexTrigger}; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use regex::Regex; use std::{ @@ -50,7 +50,6 @@ use std::{ }; use tokio::sync::Mutex; use tonic::codec::CompressionEncoding; -use tonic::codegen::Body; use tonic::Streaming; mod connection_event; @@ -544,9 +543,7 @@ impl Inner { let bytes_tx = &counters.bytes_tx; let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()))) - .layer(measure_request_body_size_layer(bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()).boxed_unsync())) .service(channel); // Build the gRPC client with an interceptor setting the request timeout diff --git a/rpc/grpc/core/build.rs b/rpc/grpc/core/build.rs index fdf54486d..b3a0614ea 100644 --- a/rpc/grpc/core/build.rs +++ b/rpc/grpc/core/build.rs @@ -10,7 +10,7 @@ fn main() { // uncomment this line and reflect the change in src/lib.rs //.out_dir("./src") - .compile(&protowire_files[0..1], dirs) + .compile_protos(&protowire_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compile error: {e}")); // recompile protobufs only if any of the proto files changes. diff --git a/rpc/grpc/server/src/connection_handler.rs b/rpc/grpc/server/src/connection_handler.rs index d581ea441..fd13cf9bb 100644 --- a/rpc/grpc/server/src/connection_handler.rs +++ b/rpc/grpc/server/src/connection_handler.rs @@ -29,7 +29,7 @@ use kaspa_rpc_core::{ use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer}, }; use std::fmt::Debug; use std::{ @@ -144,7 +144,7 @@ impl ConnectionHandler { let serve_result = TonicServer::builder() // .http2_keepalive_interval(Some(GRPC_KEEP_ALIVE_PING_INTERVAL)) // .http2_keepalive_timeout(Some(GRPC_KEEP_ALIVE_PING_TIMEOUT)) - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(protowire_server) .serve_with_shutdown( diff --git a/rpc/wrpc/wasm/src/resolver.rs b/rpc/wrpc/wasm/src/resolver.rs index 7abfdb688..175353437 100644 --- a/rpc/wrpc/wasm/src/resolver.rs +++ b/rpc/wrpc/wasm/src/resolver.rs @@ -198,7 +198,7 @@ impl TryFrom for NativeResolver { impl TryCastFromJs for Resolver { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { diff --git a/simpa/src/main.rs b/simpa/src/main.rs index c66656be3..c35c0c640 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -13,7 +13,7 @@ use kaspa_consensus::{ headers::HeaderStoreReader, relations::RelationsStoreReader, }, - params::{Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, + params::{ForkActivation, Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, }; use kaspa_consensus_core::{ api::ConsensusApi, block::Block, blockstatus::BlockStatus, config::bps::calculate_ghostdag_k, errors::block::BlockProcessResult, @@ -122,6 +122,8 @@ struct Args { rocksdb_files_limit: Option, #[arg(long)] rocksdb_mem_budget: Option, + #[arg(long, default_value_t = false)] + long_payload: bool, } #[cfg(feature = "heap")] @@ -189,8 +191,9 @@ fn main_impl(mut args: Args) { } args.bps = if args.testnet11 { Testnet11Bps::bps() as f64 } else { args.bps }; let mut params = if args.testnet11 { TESTNET11_PARAMS } else { DEVNET_PARAMS }; - params.storage_mass_activation_daa_score = 400; + params.storage_mass_activation = ForkActivation::new(400); params.storage_mass_parameter = 10_000; + params.payload_activation = ForkActivation::always(); let mut builder = ConfigBuilder::new(params) .apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params)) .apply_args(|config| apply_args_to_perf_params(&args, &mut config.perf)) @@ -245,6 +248,7 @@ fn main_impl(mut args: Args) { args.rocksdb_stats_period_sec, args.rocksdb_files_limit, args.rocksdb_mem_budget, + args.long_payload, ) .run(until); consensus.shutdown(handles); @@ -306,12 +310,12 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) { if args.daa_legacy { // Scale DAA and median-time windows linearly with BPS - params.sampling_activation_daa_score = u64::MAX; + params.sampling_activation = ForkActivation::never(); params.legacy_timestamp_deviation_tolerance = (params.legacy_timestamp_deviation_tolerance as f64 * args.bps) as u64; params.legacy_difficulty_window_size = (params.legacy_difficulty_window_size as f64 * args.bps) as usize; } else { // Use the new sampling algorithms - params.sampling_activation_daa_score = 0; + params.sampling_activation = ForkActivation::always(); params.past_median_time_sample_rate = (10.0 * args.bps) as u64; params.new_timestamp_deviation_tolerance = (600.0 * args.bps) as u64; params.difficulty_sample_rate = (2.0 * args.bps) as u64; @@ -425,12 +429,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64; diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index a9a4a3423..9cd985937 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -74,6 +74,7 @@ pub struct Miner { target_txs_per_block: u64, target_blocks: Option, max_cached_outpoints: usize, + long_payload: bool, // Mass calculator mass_calculator: MassCalculator, @@ -90,6 +91,7 @@ impl Miner { params: &Params, target_txs_per_block: u64, target_blocks: Option, + long_payload: bool, ) -> Self { let (schnorr_public_key, _) = pk.x_only_public_key(); let script_pub_key_script = once(0x20).chain(schnorr_public_key.serialize()).chain(once(0xac)).collect_vec(); // TODO: Use script builder when available to create p2pk properly @@ -114,6 +116,7 @@ impl Miner { params.mass_per_sig_op, params.storage_mass_parameter, ), + long_payload, } } @@ -143,7 +146,10 @@ impl Miner { .iter() .filter_map(|&outpoint| { let entry = self.get_spendable_entry(virtual_utxo_view, outpoint, virtual_state.daa_score)?; - let unsigned_tx = self.create_unsigned_tx(outpoint, entry.amount, multiple_outputs); + let mut unsigned_tx = self.create_unsigned_tx(outpoint, entry.amount, multiple_outputs); + if self.long_payload { + unsigned_tx.payload = vec![0; 90_000]; + } Some(MutableTransaction::with_entries(unsigned_tx, vec![entry])) }) .take(self.target_txs_per_block as usize) diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 63e5a3b6c..79ac6fad7 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -50,6 +50,7 @@ impl KaspaNetworkSimulator { rocksdb_stats_period_sec: Option, rocksdb_files_limit: Option, rocksdb_mem_budget: Option, + long_payload: bool, ) -> &mut Self { let secp = secp256k1::Secp256k1::new(); let mut rng = rand::thread_rng(); @@ -98,6 +99,7 @@ impl KaspaNetworkSimulator { &self.config, target_txs_per_block, self.target_blocks, + long_payload, )); self.simulation.register(i, miner_process); self.consensuses.push((consensus, handles, lifetime)); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index e66baaf69..52d9b7986 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -16,7 +16,9 @@ use kaspa_consensus::model::stores::headers::HeaderStoreReader; use kaspa_consensus::model::stores::reachability::DbReachabilityStore; use kaspa_consensus::model::stores::relations::DbRelationsStore; use kaspa_consensus::model::stores::selected_chain::SelectedChainStoreReader; -use kaspa_consensus::params::{Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64}; +use kaspa_consensus::params::{ + ForkActivation, Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, +}; use kaspa_consensus::pipeline::monitor::ConsensusMonitor; use kaspa_consensus::pipeline::ProcessingCounters; use kaspa_consensus::processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}; @@ -25,7 +27,8 @@ use kaspa_consensus_core::api::{BlockValidationFutures, ConsensusApi}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::blockhash::new_unique; use kaspa_consensus_core::blockstatus::BlockStatus; -use kaspa_consensus_core::constants::{BLOCK_VERSION, STORAGE_MASS_PARAMETER}; +use kaspa_consensus_core::coinbase::MinerData; +use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}; use kaspa_consensus_core::errors::block::{BlockProcessResult, RuleError}; use kaspa_consensus_core::header::Header; use kaspa_consensus_core::network::{NetworkId, NetworkType::Mainnet}; @@ -41,9 +44,13 @@ use kaspa_core::time::unix_now; use kaspa_database::utils::get_kaspa_tempdir; use kaspa_hashes::Hash; +use crate::common; use flate2::read::GzDecoder; use futures_util::future::try_join_all; use itertools::Itertools; +use kaspa_consensus_core::errors::tx::TxRuleError; +use kaspa_consensus_core::merkle::calc_hash_merkle_root; +use kaspa_consensus_core::muhash::MuHashExtensions; use kaspa_core::core::Core; use kaspa_core::signals::Shutdown; use kaspa_core::task::runtime::AsyncRuntime; @@ -55,6 +62,7 @@ use kaspa_math::Uint256; use kaspa_muhash::MuHash; use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_txscript::caches::TxScriptCacheCounters; +use kaspa_txscript::opcodes::codes::OpTrue; use kaspa_utxoindex::api::{UtxoIndexApi, UtxoIndexProxy}; use kaspa_utxoindex::UtxoIndex; use serde::{Deserialize, Serialize}; @@ -70,8 +78,6 @@ use std::{ str::{from_utf8, FromStr}, }; -use crate::common; - #[derive(Serialize, Deserialize, Debug)] struct JsonBlock { id: String, @@ -553,7 +559,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation_daa_score = u64::MAX; + p.sampling_activation = ForkActivation::never(); }) .build(), }, @@ -562,7 +568,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); p.new_timestamp_deviation_tolerance = 120; p.past_median_time_sample_rate = 3; p.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; @@ -807,7 +813,7 @@ impl KaspadGoParams { past_median_time_sample_rate: 1, past_median_time_sampled_window_size: 2 * self.TimestampDeviationTolerance - 1, target_time_per_block: self.TargetTimePerBlock / 1_000_000, - sampling_activation_daa_score: u64::MAX, + sampling_activation: ForkActivation::never(), max_block_parents: self.MaxBlockParents, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, @@ -830,13 +836,15 @@ impl KaspadGoParams { mass_per_sig_op: self.MassPerSigOp, max_block_mass: self.MaxBlockMass, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation_daa_score: u64::MAX, + storage_mass_activation: ForkActivation::never(), + kip10_activation: ForkActivation::never(), deflationary_phase_daa_score: self.DeflationaryPhaseDaaScore, pre_deflationary_phase_base_subsidy: self.PreDeflationaryPhaseBaseSubsidy, coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, skip_proof_of_work: self.SkipProofOfWork, max_block_level: self.MaxBlockLevel, pruning_proof_m: self.PruningProofM, + payload_activation: ForkActivation::never(), } } } @@ -1359,7 +1367,7 @@ async fn difficulty_test() { fn full_window_bits(consensus: &TestConsensus, hash: Hash) -> u32 { let window_size = consensus.params().difficulty_window_size(0) * consensus.params().difficulty_sample_rate(0) as usize; let ghostdag_data = &consensus.ghostdag_store().get_data(hash).unwrap(); - let window = consensus.window_manager().block_window(ghostdag_data, WindowType::FullDifficultyWindow).unwrap(); + let window = consensus.window_manager().block_window(ghostdag_data, WindowType::VaryingWindow(window_size)).unwrap(); assert_eq!(window.blocks.len(), window_size); let daa_window = consensus.window_manager().calc_daa_window(ghostdag_data, window); consensus.window_manager().calculate_difficulty_bits(ghostdag_data, &daa_window) @@ -1388,7 +1396,7 @@ async fn difficulty_test() { .edit_consensus_params(|p| { p.ghostdag_k = 1; p.legacy_difficulty_window_size = FULL_WINDOW_SIZE; - p.sampling_activation_daa_score = u64::MAX; + p.sampling_activation = ForkActivation::never(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.legacy_timestamp_deviation_tolerance = 60; @@ -1404,7 +1412,7 @@ async fn difficulty_test() { p.ghostdag_k = 1; p.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; p.difficulty_sample_rate = SAMPLE_RATE; - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.past_median_time_sample_rate = PMT_SAMPLE_RATE; @@ -1423,7 +1431,7 @@ async fn difficulty_test() { p.target_time_per_block /= HIGH_BPS; p.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; p.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; - p.sampling_activation_daa_score = 0; + p.sampling_activation = ForkActivation::always(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window p.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; @@ -1755,3 +1763,245 @@ async fn staging_consensus_test() { core.shutdown(); core.join(joins); } + +/// Tests the KIP-10 transaction introspection opcode activation by verifying that: +/// 1. Transactions using these opcodes are rejected before the activation DAA score +/// 2. The same transactions are accepted at and after the activation score +/// Uses OpInputSpk opcode as an example +#[tokio::test] +async fn run_kip10_activation_test() { + use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; + use kaspa_txscript::opcodes::codes::{Op0, OpTxInputSpk}; + use kaspa_txscript::pay_to_script_hash_script; + use kaspa_txscript::script_builder::ScriptBuilder; + + // KIP-10 activates at DAA score 3 in this test + const KIP10_ACTIVATION_DAA_SCORE: u64 = 3; + + init_allocator_with_default_settings(); + + // Create P2SH script that attempts to use OpInputSpk - this will be our test subject + // The script should fail before KIP-10 activation and succeed after + let redeem_script = ScriptBuilder::new() + .add_op(Op0).unwrap() // Push 0 for input index + .add_op(OpTxInputSpk).unwrap() // Get the input's script pubkey + .drain(); + let spk = pay_to_script_hash_script(&redeem_script); + + // Set up initial UTXO with our test script + let initial_utxo_collection = [( + TransactionOutpoint::new(1.into(), 0), + UtxoEntry { amount: SOMPI_PER_KASPA, script_public_key: spk.clone(), block_daa_score: 0, is_coinbase: false }, + )]; + + // Initialize consensus with KIP-10 activation point + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .apply_args(|cfg| { + let mut genesis_multiset = MuHash::new(); + initial_utxo_collection.iter().for_each(|(outpoint, utxo)| { + genesis_multiset.add_utxo(outpoint, utxo); + }); + cfg.params.genesis.utxo_commitment = genesis_multiset.finalize(); + let genesis_header: Header = (&cfg.params.genesis).into(); + cfg.params.genesis.hash = genesis_header.hash; + }) + .edit_consensus_params(|p| { + p.kip10_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); + }) + .build(); + + let consensus = TestConsensus::new(&config); + let mut genesis_multiset = MuHash::new(); + consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset); + consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); + consensus.init(); + + // Build blockchain up to one block before activation + let mut index = 0; + for _ in 0..KIP10_ACTIVATION_DAA_SCORE - 1 { + let parent = if index == 0 { config.genesis.hash } else { index.into() }; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); + index += 1; + } + assert_eq!(consensus.get_virtual_daa_score(), index); + + // Create transaction that attempts to use the KIP-10 opcode + let mut spending_tx = Transaction::new( + 0, + vec![TransactionInput::new( + initial_utxo_collection[0].0, + ScriptBuilder::new().add_data(&redeem_script).unwrap().drain(), + 0, + 0, + )], + vec![TransactionOutput::new(initial_utxo_collection[0].1.amount - 5000, spk)], + 0, + SUBNETWORK_ID_NATIVE, + 0, + vec![], + ); + spending_tx.finalize(); + let tx_id = spending_tx.id(); + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it + { + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); + + // First build block without transactions + let mut block = + consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + + // Insert our test transaction and recalculate block hashes + block.transactions.push(spending_tx.clone()); + block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; + assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); + assert_eq!(consensus.lkg_virtual_state.load().daa_score, 2); + index += 1; + } + // // Add one more block to reach activation score + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); + index += 1; + + // Test 2: Verify the same transaction is accepted after activation + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![spending_tx.clone()]).await; + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); + assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); +} + +#[tokio::test] +async fn payload_test() { + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.coinbase_maturity = 0; + p.payload_activation = ForkActivation::always() + }) + .build(); + let consensus = TestConsensus::new(&config); + let wait_handles = consensus.init(); + + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![OpTrue]), vec![]); + let b = consensus.build_utxo_valid_block_with_parents(1.into(), vec![config.genesis.hash], miner_data.clone(), vec![]); + consensus.validate_and_insert_block(b.to_immutable()).virtual_state_task.await.unwrap(); + let funding_block = consensus.build_utxo_valid_block_with_parents(2.into(), vec![1.into()], miner_data, vec![]); + let cb_id = { + let mut cb = funding_block.transactions[0].clone(); + cb.finalize(); + cb.id() + }; + consensus.validate_and_insert_block(funding_block.to_immutable()).virtual_state_task.await.unwrap(); + let tx = Transaction::new( + 0, + vec![TransactionInput::new(TransactionOutpoint { transaction_id: cb_id, index: 0 }, vec![], 0, 0)], + vec![TransactionOutput::new(1, ScriptPublicKey::default())], + 0, + SubnetworkId::default(), + 0, + vec![0; (config.params.max_block_mass / 2) as usize], + ); + consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx]).await.unwrap(); + + consensus.shutdown(wait_handles); +} + +#[tokio::test] +async fn payload_activation_test() { + use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; + + // Set payload activation at DAA score 3 for this test + const PAYLOAD_ACTIVATION_DAA_SCORE: u64 = 3; + + init_allocator_with_default_settings(); + + // Create initial UTXO to fund our test transactions + let initial_utxo_collection = [( + TransactionOutpoint::new(1.into(), 0), + UtxoEntry { + amount: SOMPI_PER_KASPA, + script_public_key: ScriptPublicKey::from_vec(0, vec![OpTrue]), + block_daa_score: 0, + is_coinbase: false, + }, + )]; + + // Initialize consensus with payload activation point + let config = ConfigBuilder::new(DEVNET_PARAMS) + .skip_proof_of_work() + .apply_args(|cfg| { + let mut genesis_multiset = MuHash::new(); + initial_utxo_collection.iter().for_each(|(outpoint, utxo)| { + genesis_multiset.add_utxo(outpoint, utxo); + }); + cfg.params.genesis.utxo_commitment = genesis_multiset.finalize(); + let genesis_header: Header = (&cfg.params.genesis).into(); + cfg.params.genesis.hash = genesis_header.hash; + }) + .edit_consensus_params(|p| { + p.payload_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); + }) + .build(); + + let consensus = TestConsensus::new(&config); + let mut genesis_multiset = MuHash::new(); + consensus.append_imported_pruning_point_utxos(&initial_utxo_collection, &mut genesis_multiset); + consensus.import_pruning_point_utxo_set(config.genesis.hash, genesis_multiset).unwrap(); + consensus.init(); + + // Build blockchain up to one block before activation + let mut index = 0; + for _ in 0..PAYLOAD_ACTIVATION_DAA_SCORE - 1 { + let parent = if index == 0 { config.genesis.hash } else { index.into() }; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![parent], vec![]).await.unwrap(); + index += 1; + } + assert_eq!(consensus.get_virtual_daa_score(), index); + + // Create transaction with large payload + let large_payload = vec![0u8; (config.params.max_block_mass / 2) as usize]; + let mut tx_with_payload = Transaction::new( + 0, + vec![TransactionInput::new( + initial_utxo_collection[0].0, + vec![], // Empty signature script since we're using OpTrue + 0, + 0, + )], + vec![TransactionOutput::new(initial_utxo_collection[0].1.amount - 5000, ScriptPublicKey::from_vec(0, vec![OpTrue]))], + 0, + SUBNETWORK_ID_NATIVE, + 0, + large_payload, + ); + tx_with_payload.finalize(); + let tx_id = tx_with_payload.id(); + + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it + { + let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); + + // First build block without transactions + let mut block = + consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + + // Insert our test transaction and recalculate block hashes + block.transactions.push(tx_with_payload.clone()); + + block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; + assert!(matches!(block_status, Err(RuleError::TxInContextFailed(tx, TxRuleError::NonCoinbaseTxHasPayload)) if tx == tx_id)); + assert_eq!(consensus.lkg_virtual_state.load().daa_score, PAYLOAD_ACTIVATION_DAA_SCORE - 1); + index += 1; + } + + // Add one more block to reach activation score + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); + index += 1; + + // Test 2: Verify the same transaction is accepted after activation + let status = + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx_with_payload.clone()]).await; + + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); + assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); +} diff --git a/utils/src/as_slice.rs b/utils/src/as_slice.rs index fd73c3903..7fc0459c6 100644 --- a/utils/src/as_slice.rs +++ b/utils/src/as_slice.rs @@ -16,7 +16,7 @@ pub trait AsMutSlice: AsSlice { fn as_mut_slice(&mut self) -> &mut [Self::Element]; } -impl<'a, S> AsSlice for &'a S +impl AsSlice for &S where S: ?Sized + AsSlice, { @@ -27,7 +27,7 @@ where } } -impl<'a, S> AsSlice for &'a mut S +impl AsSlice for &mut S where S: ?Sized + AsSlice, { @@ -38,7 +38,7 @@ where } } -impl<'a, S> AsMutSlice for &'a mut S +impl AsMutSlice for &mut S where S: ?Sized + AsMutSlice, { diff --git a/utils/src/iter.rs b/utils/src/iter.rs index 58a61d770..3d38eff12 100644 --- a/utils/src/iter.rs +++ b/utils/src/iter.rs @@ -25,7 +25,7 @@ impl<'a, I> ReusableIterFormat<'a, I> { } } -impl<'a, I> std::fmt::Display for ReusableIterFormat<'a, I> +impl std::fmt::Display for ReusableIterFormat<'_, I> where I: std::clone::Clone, I: Iterator, @@ -37,7 +37,7 @@ where } } -impl<'a, I> std::fmt::Debug for ReusableIterFormat<'a, I> +impl std::fmt::Debug for ReusableIterFormat<'_, I> where I: std::clone::Clone, I: Iterator, @@ -48,3 +48,9 @@ where self.inner.clone().fmt(f) } } + +/// Returns an iterator over powers of two up to (the rounded up) available parallelism: `2, 4, 8, ..., 2^(available_parallelism.log2().ceil())`, +/// i.e., for `std::thread::available_parallelism = 15` the function will return `2, 4, 8, 16` +pub fn parallelism_in_power_steps() -> impl Iterator { + (1..=(std::thread::available_parallelism().unwrap().get() as f64).log2().ceil() as u32).map(|x| 2usize.pow(x)) +} diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 3d1bb5438..c6cc077c4 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -14,7 +14,6 @@ pub mod hex; pub mod iter; pub mod mem_size; pub mod networking; -pub mod option; pub mod refs; pub mod as_slice; diff --git a/utils/src/option.rs b/utils/src/option.rs deleted file mode 100644 index 3e619f46f..000000000 --- a/utils/src/option.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub trait OptionExtensions { - /// Substitute for unstable [`Option::is_none_or`] - fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool; -} - -impl OptionExtensions for Option { - fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool { - match self { - Some(v) => f(v), - None => true, - } - } -} diff --git a/utils/src/serde_bytes/de.rs b/utils/src/serde_bytes/de.rs index 66064bfe3..6a634db0c 100644 --- a/utils/src/serde_bytes/de.rs +++ b/utils/src/serde_bytes/de.rs @@ -29,7 +29,7 @@ pub struct FromHexVisitor<'de, T: FromHex> { lifetime: std::marker::PhantomData<&'de ()>, } -impl<'de, T: FromHex> Default for FromHexVisitor<'de, T> { +impl Default for FromHexVisitor<'_, T> { fn default() -> Self { Self { marker: Default::default(), lifetime: Default::default() } } diff --git a/utils/tower/Cargo.toml b/utils/tower/Cargo.toml index 2a2f5f796..010f8843b 100644 --- a/utils/tower/Cargo.toml +++ b/utils/tower/Cargo.toml @@ -14,9 +14,11 @@ cfg-if.workspace = true log.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +bytes.workspace = true futures.workspace = true -hyper.workspace = true +http-body.workspace = true +http-body-util.workspace = true pin-project-lite.workspace = true tokio.workspace = true tower-http.workspace = true -tower.workspace = true \ No newline at end of file +tower.workspace = true diff --git a/utils/tower/src/middleware.rs b/utils/tower/src/middleware.rs index 727d8ca47..8d0fa77c3 100644 --- a/utils/tower/src/middleware.rs +++ b/utils/tower/src/middleware.rs @@ -1,9 +1,6 @@ -use futures::ready; -use hyper::{ - body::{Bytes, HttpBody, SizeHint}, - HeaderMap, -}; -use log::*; +use bytes::Bytes; +use http_body::{Body, Frame, SizeHint}; +use log::trace; use pin_project_lite::pin_project; use std::{ pin::Pin, @@ -11,11 +8,12 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; + +pub use http_body_util::BodyExt; pub use tower::ServiceBuilder; -pub use tower_http::map_request_body::MapRequestBodyLayer; -pub use tower_http::map_response_body::MapResponseBodyLayer; +pub use tower_http::{map_request_body::MapRequestBodyLayer, map_response_body::MapResponseBodyLayer}; pin_project! { pub struct CountBytesBody { @@ -31,32 +29,29 @@ impl CountBytesBody { } } -impl HttpBody for CountBytesBody +impl Body for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { type Data = B::Data; type Error = B::Error; - fn poll_data(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { let this = self.project(); - let counter: Arc = this.counter.clone(); - match ready!(this.inner.poll_data(cx)) { - Some(Ok(chunk)) => { - debug!("[SIZE MW] response body chunk size = {}", chunk.len()); - let _previous = counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); + match ready!(this.inner.poll_frame(cx)) { + Some(Ok(frame)) => { + if let Some(chunk) = frame.data_ref() { + trace!("[SIZE MW] body chunk size = {}", chunk.len()); + let _previous = this.counter.fetch_add(chunk.len(), Ordering::Relaxed); + trace!("[SIZE MW] total count: {}", _previous); + } - Poll::Ready(Some(Ok(chunk))) + Poll::Ready(Some(Ok(frame))) } x => Poll::Ready(x), } } - fn poll_trailers(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>> { - self.project().inner.poll_trailers(cx) - } - fn is_end_stream(&self) -> bool { self.inner.is_end_stream() } @@ -68,43 +63,9 @@ where impl Default for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { fn default() -> Self { Self { inner: Default::default(), counter: Default::default() } } } - -pub fn measure_request_body_size_layer( - bytes_sent_counter: Arc, - f: F, -) -> MapRequestBodyLayer B2 + Clone> -where - B1: HttpBody + Unpin + Send + 'static, - ::Error: Send, - F: Fn(hyper::body::Body) -> B2 + Clone, -{ - MapRequestBodyLayer::new(move |mut body: B1| { - let (mut tx, new_body) = hyper::Body::channel(); - let bytes_sent_counter = bytes_sent_counter.clone(); - tokio::spawn(async move { - while let Some(Ok(chunk)) = body.data().await { - debug!("[SIZE MW] request body chunk size = {}", chunk.len()); - let _previous = bytes_sent_counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); - if let Err(_err) = tx.send_data(chunk).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending data: {}", _err) - } - } - - if let Ok(Some(trailers)) = body.trailers().await { - if let Err(_err) = tx.send_trailers(trailers).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending trailers: {}", _err) - } - } - }); - f(new_body) - }) -} diff --git a/wallet/bip32/src/mnemonic/bits.rs b/wallet/bip32/src/mnemonic/bits.rs index 5ed09af00..08ff65ef6 100644 --- a/wallet/bip32/src/mnemonic/bits.rs +++ b/wallet/bip32/src/mnemonic/bits.rs @@ -56,7 +56,7 @@ impl Bits for u8 { } } -impl<'a> Bits for &'a u8 { +impl Bits for &'_ u8 { const SIZE: usize = 8; fn bits(self) -> u32 { diff --git a/wallet/bip32/src/xpublic_key.rs b/wallet/bip32/src/xpublic_key.rs index ac4eb720c..ff9678cac 100644 --- a/wallet/bip32/src/xpublic_key.rs +++ b/wallet/bip32/src/xpublic_key.rs @@ -10,7 +10,7 @@ use hmac::Mac; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; -/// Extended public secp256k1 ECDSA verification key. +///// Extended public secp256k1 ECDSA verification key. //#[cfg(feature = "secp256k1")] //#[cfg_attr(docsrs, doc(cfg(feature = "secp256k1")))] //pub type XPub = ExtendedPublicKey; diff --git a/wallet/core/src/account/pskb.rs b/wallet/core/src/account/pskb.rs index a475b09cc..fbc138d44 100644 --- a/wallet/core/src/account/pskb.rs +++ b/wallet/core/src/account/pskb.rs @@ -9,7 +9,7 @@ use crate::tx::PaymentOutputs; use futures::stream; use kaspa_bip32::{DerivationPath, KeyFingerprint, PrivateKey}; use kaspa_consensus_client::UtxoEntry as ClientUTXO; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::tx::VerifiableTransaction; use kaspa_consensus_core::tx::{TransactionInput, UtxoEntry}; use kaspa_txscript::extract_script_pub_key_address; @@ -160,7 +160,7 @@ pub async fn pskb_signer_for_address( key_fingerprint: KeyFingerprint, ) -> Result { let mut signed_bundle = Bundle::new(); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); // If set, sign-for address is used for signing. // Else, all addresses from inputs are. @@ -186,7 +186,7 @@ pub async fn pskb_signer_for_address( for pskt_inner in bundle.iter().cloned() { let pskt: PSKT = PSKT::from(pskt_inner); - let mut sign = |signer_pskt: PSKT| { + let sign = |signer_pskt: PSKT| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { tx.tx @@ -194,7 +194,7 @@ pub async fn pskb_signer_for_address( .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); // When address represents a locked UTXO, no private key is available. diff --git a/wallet/core/src/message.rs b/wallet/core/src/message.rs index 01dc78676..152bf28aa 100644 --- a/wallet/core/src/message.rs +++ b/wallet/core/src/message.rs @@ -15,13 +15,28 @@ impl AsRef<[u8]> for PersonalMessage<'_> { } } +#[derive(Clone)] +pub struct SignMessageOptions { + /// The auxiliary randomness exists only to mitigate specific kinds of power analysis + /// side-channel attacks. Providing it definitely improves security, but omitting it + /// should not be considered dangerous, as most legacy signature schemes don't provide + /// mitigations against such attacks. To read more about the relevant discussions that + /// arose in adding this randomness please see: https://github.com/sipa/bips/issues/195 + pub no_aux_rand: bool, +} + /// Sign a message with the given private key -pub fn sign_message(msg: &PersonalMessage, privkey: &[u8; 32]) -> Result, Error> { +pub fn sign_message(msg: &PersonalMessage, privkey: &[u8; 32], options: &SignMessageOptions) -> Result, Error> { let hash = calc_personal_message_hash(msg); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice())?; let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, privkey)?; - let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); + + let sig: [u8; 64] = if options.no_aux_rand { + *secp256k1::SECP256K1.sign_schnorr_no_aux_rand(&msg, &schnorr_key).as_ref() + } else { + *schnorr_key.sign_schnorr(msg).as_ref() + }; Ok(sig.to_vec()) } @@ -74,7 +89,26 @@ mod tests { ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + } + + #[test] + fn test_basic_sign_without_rand_twice_should_get_same_signature() { + let pm = PersonalMessage("Hello Kaspa!"); + let privkey: [u8; 32] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + ]; + + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + let signature = sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"); + let signature_twice = sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"); + assert_eq!(signature, signature_twice); } #[test] @@ -90,7 +124,12 @@ mod tests { ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); } #[test] @@ -110,7 +149,12 @@ Ut omnis magnam et accusamus earum rem impedit provident eum commodi repellat qu ]) .unwrap(); - verify_message(&pm, &sign_message(&pm, &privkey).expect("sign_message failed"), &pubkey).expect("verify_message failed"); + let sign_with_aux_rand = SignMessageOptions { no_aux_rand: false }; + let sign_with_no_aux_rand = SignMessageOptions { no_aux_rand: true }; + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); + verify_message(&pm, &sign_message(&pm, &privkey, &sign_with_no_aux_rand).expect("sign_message failed"), &pubkey) + .expect("verify_message failed"); } #[test] diff --git a/wallet/core/src/storage/local/mod.rs b/wallet/core/src/storage/local/mod.rs index 07e612a51..0c220b64f 100644 --- a/wallet/core/src/storage/local/mod.rs +++ b/wallet/core/src/storage/local/mod.rs @@ -37,21 +37,30 @@ pub fn default_storage_folder() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_STORAGE_FOLDER.get_or_insert("~/.kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_STORAGE_FOLDER.get_or_insert("~/.kaspa".to_string()).as_str() + } } pub fn default_wallet_file() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_WALLET_FILE.get_or_insert("kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_WALLET_FILE.get_or_insert("kaspa".to_string()).as_str() + } } pub fn default_settings_file() -> &'static str { // SAFETY: This operation is initializing a static mut variable, // however, the actual variable is accessible only through // this function. - unsafe { DEFAULT_SETTINGS_FILE.get_or_insert("kaspa".to_string()).as_str() } + #[allow(static_mut_refs)] + unsafe { + DEFAULT_SETTINGS_FILE.get_or_insert("kaspa".to_string()).as_str() + } } /// Set a custom storage folder for the wallet SDK diff --git a/wallet/core/src/tx/generator/generator.rs b/wallet/core/src/tx/generator/generator.rs index cbbbbc34d..1cfd070e7 100644 --- a/wallet/core/src/tx/generator/generator.rs +++ b/wallet/core/src/tx/generator/generator.rs @@ -672,7 +672,6 @@ impl Generator { } */ - fn generate_transaction_data(&self, context: &mut Context, stage: &mut Stage) -> Result<(DataKind, Data)> { let calc = &self.inner.mass_calculator; let mut data = Data::new(calc); diff --git a/wallet/core/src/tx/payment.rs b/wallet/core/src/tx/payment.rs index c164e0d78..350b8c98f 100644 --- a/wallet/core/src/tx/payment.rs +++ b/wallet/core/src/tx/payment.rs @@ -72,7 +72,7 @@ pub struct PaymentOutput { impl TryCastFromJs for PaymentOutput { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { @@ -158,7 +158,7 @@ impl PaymentOutputs { impl TryCastFromJs for PaymentOutputs { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index 06cb76434..ffec7d65c 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -57,7 +57,7 @@ pub struct SingleWalletFileV1<'a, T: AsRef<[u8]>> { pub ecdsa: bool, } -impl<'a, T: AsRef<[u8]>> SingleWalletFileV1<'a, T> { +impl> SingleWalletFileV1<'_, T> { const NUM_THREADS: u32 = 8; } @@ -80,7 +80,7 @@ pub struct MultisigWalletFileV1<'a, T: AsRef<[u8]>> { pub ecdsa: bool, } -impl<'a, T: AsRef<[u8]>> MultisigWalletFileV1<'a, T> { +impl> MultisigWalletFileV1<'_, T> { const NUM_THREADS: u32 = 8; } diff --git a/wallet/core/src/wasm/cryptobox.rs b/wallet/core/src/wasm/cryptobox.rs index 957d4fc35..76e9fdc3c 100644 --- a/wallet/core/src/wasm/cryptobox.rs +++ b/wallet/core/src/wasm/cryptobox.rs @@ -35,7 +35,7 @@ impl CryptoBoxPrivateKey { impl TryCastFromJs for CryptoBoxPrivateKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { @@ -66,7 +66,7 @@ pub struct CryptoBoxPublicKey { impl TryCastFromJs for CryptoBoxPublicKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result> + fn try_cast_from<'a, R>(value: &'a R) -> Result> where R: AsRef + 'a, { diff --git a/wallet/core/src/wasm/message.rs b/wallet/core/src/wasm/message.rs index 25c7f399a..372129280 100644 --- a/wallet/core/src/wasm/message.rs +++ b/wallet/core/src/wasm/message.rs @@ -14,6 +14,7 @@ const TS_MESSAGE_TYPES: &'static str = r#" export interface ISignMessage { message: string; privateKey: PrivateKey | string; + noAuxRand?: boolean; } "#; @@ -30,10 +31,12 @@ pub fn js_sign_message(value: ISignMessage) -> Result { if let Some(object) = Object::try_from(&value) { let private_key = object.cast_into::("privateKey")?; let raw_msg = object.get_string("message")?; + let no_aux_rand = object.get_bool("noAuxRand").unwrap_or(false); let mut privkey_bytes = [0u8; 32]; privkey_bytes.copy_from_slice(&private_key.secret_bytes()); let pm = PersonalMessage(&raw_msg); - let sig_vec = sign_message(&pm, &privkey_bytes)?; + let sign_options = SignMessageOptions { no_aux_rand }; + let sig_vec = sign_message(&pm, &privkey_bytes, &sign_options)?; privkey_bytes.zeroize(); Ok(faster_hex::hex_string(sig_vec.as_slice()).into()) } else { diff --git a/wallet/core/src/wasm/utxo/context.rs b/wallet/core/src/wasm/utxo/context.rs index 3298a4829..9c78fe689 100644 --- a/wallet/core/src/wasm/utxo/context.rs +++ b/wallet/core/src/wasm/utxo/context.rs @@ -252,7 +252,7 @@ impl From for native::UtxoContext { impl TryCastFromJs for UtxoContext { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/core/src/wasm/utxo/processor.rs b/wallet/core/src/wasm/utxo/processor.rs index d68f10f76..ac089d485 100644 --- a/wallet/core/src/wasm/utxo/processor.rs +++ b/wallet/core/src/wasm/utxo/processor.rs @@ -197,7 +197,7 @@ impl UtxoProcessor { impl TryCastFromJs for UtxoProcessor { type Error = workflow_wasm::error::Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/derivation_path.rs b/wallet/keys/src/derivation_path.rs index a5389ca37..7867f886f 100644 --- a/wallet/keys/src/derivation_path.rs +++ b/wallet/keys/src/derivation_path.rs @@ -57,7 +57,7 @@ impl DerivationPath { impl TryCastFromJs for DerivationPath { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/keypair.rs b/wallet/keys/src/keypair.rs index 2cc3d5760..7c58d18bd 100644 --- a/wallet/keys/src/keypair.rs +++ b/wallet/keys/src/keypair.rs @@ -104,7 +104,7 @@ impl Keypair { impl TryCastFromJs for Keypair { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/privatekey.rs b/wallet/keys/src/privatekey.rs index 554bdf36e..75911a3ff 100644 --- a/wallet/keys/src/privatekey.rs +++ b/wallet/keys/src/privatekey.rs @@ -95,7 +95,7 @@ impl PrivateKey { impl TryCastFromJs for PrivateKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/publickey.rs b/wallet/keys/src/publickey.rs index 235eb8080..f3c951ae2 100644 --- a/wallet/keys/src/publickey.rs +++ b/wallet/keys/src/publickey.rs @@ -155,7 +155,7 @@ extern "C" { impl TryCastFromJs for PublicKey { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/xprv.rs b/wallet/keys/src/xprv.rs index c19e0b9cc..a0ea428a0 100644 --- a/wallet/keys/src/xprv.rs +++ b/wallet/keys/src/xprv.rs @@ -146,7 +146,7 @@ extern "C" { impl TryCastFromJs for XPrv { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/keys/src/xpub.rs b/wallet/keys/src/xpub.rs index 8706f3fc9..64fc5f78e 100644 --- a/wallet/keys/src/xpub.rs +++ b/wallet/keys/src/xpub.rs @@ -116,7 +116,7 @@ extern "C" { impl TryCastFromJs for XPub { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> where R: AsRef + 'a, { diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs index fb011402f..7a9ca190e 100644 --- a/wallet/pskt/examples/multisig.rs +++ b/wallet/pskt/examples/multisig.rs @@ -1,5 +1,5 @@ use kaspa_consensus_core::{ - hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, tx::{TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; @@ -51,8 +51,8 @@ fn main() { println!("Serialized after setting sequence: {}", ser_updated); let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); - let mut reused_values = SigHashReusedValues::new(); - let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + let reused_values = SigHashReusedValuesUnsync::new(); + let sign = |signer_pskt: PSKT, kp: &Keypair| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { let tx = dbg!(tx); @@ -61,7 +61,7 @@ fn main() { .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); Ok(SignInputOk { signature: Signature::Schnorr(kp.sign_schnorr(msg)), diff --git a/wallet/pskt/src/bundle.rs b/wallet/pskt/src/bundle.rs index 6c926c666..e08474d7a 100644 --- a/wallet/pskt/src/bundle.rs +++ b/wallet/pskt/src/bundle.rs @@ -247,23 +247,18 @@ mod tests { use secp256k1::Secp256k1; use secp256k1::{rand::thread_rng, Keypair}; use std::str::FromStr; - use std::sync::Once; + use std::sync::LazyLock; - static INIT: Once = Once::new(); - static mut CONTEXT: Option)>> = None; + static CONTEXT: LazyLock)>> = LazyLock::new(|| { + let kps = [Keypair::new(&Secp256k1::new(), &mut thread_rng()), Keypair::new(&Secp256k1::new(), &mut thread_rng())]; + let redeem_script: Vec = + multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2).expect("Test multisig redeem script"); - fn mock_context() -> &'static ([Keypair; 2], Vec) { - unsafe { - INIT.call_once(|| { - let kps = [Keypair::new(&Secp256k1::new(), &mut thread_rng()), Keypair::new(&Secp256k1::new(), &mut thread_rng())]; - let redeem_script: Vec = multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2) - .expect("Test multisig redeem script"); - - CONTEXT = Some(Box::new((kps, redeem_script))); - }); + Box::new((kps, redeem_script)) + }); - CONTEXT.as_ref().unwrap() - } + fn mock_context() -> &'static ([Keypair; 2], Vec) { + CONTEXT.as_ref() } // Mock multisig PSKT from example diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs index 73f87a628..abae18f2d 100644 --- a/wallet/pskt/src/pskt.rs +++ b/wallet/pskt/src/pskt.rs @@ -3,6 +3,7 @@ //! use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; @@ -14,7 +15,7 @@ pub use crate::output::{Output, OutputBuilder}; pub use crate::role::{Combiner, Constructor, Creator, Extractor, Finalizer, Signer, Updater}; use kaspa_consensus_core::tx::UtxoEntry; use kaspa_consensus_core::{ - hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + hashing::sighash_type::SigHashType, subnets::SUBNETWORK_ID_NATIVE, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, }; @@ -432,10 +433,10 @@ impl PSKT { { let tx = tx.as_verifiable(); let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache, false).execute()?; >::Ok(()) })?; } diff --git a/wallet/pskt/src/wasm/pskt.rs b/wallet/pskt/src/wasm/pskt.rs index 8ee370a4b..53c8a1cc3 100644 --- a/wallet/pskt/src/wasm/pskt.rs +++ b/wallet/pskt/src/wasm/pskt.rs @@ -90,7 +90,7 @@ pub struct PSKT { impl TryCastFromJs for PSKT { type Error = Error; - fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> where R: AsRef + 'a, { diff --git a/wasm/examples/nodejs/javascript/general/message-signing.js b/wasm/examples/nodejs/javascript/general/message-signing.js index ed12afd45..832af2ca6 100644 --- a/wasm/examples/nodejs/javascript/general/message-signing.js +++ b/wasm/examples/nodejs/javascript/general/message-signing.js @@ -12,8 +12,8 @@ let message = 'Hello Kaspa!'; let privkey = 'b7e151628aed2a6abf7158809cf4f3c762e7160f38b4da56a784d9045190cfef'; let pubkey = 'dff1d77f2a671c5f36183726db2341be58feae1da2deced843240f7b502ba659'; -function runDemo(message, privateKey, publicKey) { - let signature = signMessage({message, privateKey}); +function runDemo(message, privateKey, publicKey, noAuxRand) { + let signature = signMessage({message, privateKey, noAuxRand}); console.info(`Message: ${message} => Signature: ${signature}`); @@ -26,5 +26,7 @@ function runDemo(message, privateKey, publicKey) { // Using strings: runDemo(message, privkey, pubkey); +runDemo(message, privkey, pubkey, true); // Using Objects: runDemo(message, new PrivateKey(privkey), new PublicKey(pubkey)); +runDemo(message, new PrivateKey(privkey), new PublicKey(pubkey), true);