diff --git a/.config/nextest.toml b/.config/nextest.toml
new file mode 100644
index 00000000000..ba2b1f6a49b
--- /dev/null
+++ b/.config/nextest.toml
@@ -0,0 +1,8 @@
+[profile.default]
+retries = 2
+
+[profile.ci]
+fail-fast = false
+failure-output = "immediate-final"
+slow-timeout = { period = "30s", terminate-after = 2 }
+
diff --git a/.github/workflows/iroha2-dev-pr.yml b/.github/workflows/iroha2-dev-pr.yml
index feb5a1f8f56..bff72e8eac7 100644
--- a/.github/workflows/iroha2-dev-pr.yml
+++ b/.github/workflows/iroha2-dev-pr.yml
@@ -20,6 +20,8 @@ env:
IROHA_CLI_DIR: "/__w/${{ github.event.repository.name }}/${{ github.event.repository.name }}/test"
DOCKER_COMPOSE_PATH: defaults
WASM_SAMPLES_TARGET_DIR: wasm_samples/target/prebuilt
+ TEST_NETWORK_TMP_DIR: /tmp
+ NEXTEST_PROFILE: ci
jobs:
consistency:
@@ -83,7 +85,7 @@ jobs:
path: ${{ env.DOCKER_COMPOSE_PATH }}/executor.wasm
retention-days: 1
- unit_tests_with_coverage:
+ test_with_coverage:
runs-on: [self-hosted, Linux, iroha2]
container:
image: hyperledger/iroha2-ci:nightly-2024-09-09
@@ -92,29 +94,32 @@ jobs:
LLVM_PROFILE_FILE_NAME: "iroha-%p-%m.profraw"
steps:
- uses: actions/checkout@v4
+ - uses: taiki-e/install-action@nextest
+ - uses: taiki-e/install-action@cargo-llvm-cov
- name: Download executor.wasm
uses: actions/download-artifact@v4
with:
name: executor.wasm
path: ${{ env.DOCKER_COMPOSE_PATH }}
- - uses: taiki-e/install-action@nextest
- - uses: taiki-e/install-action@cargo-llvm-cov
- - name: Run unit tests (no default features)
+ - name: Download the rest of WASM samples
+ uses: actions/download-artifact@v4
+ with:
+ name: wasm_samples
+ path: ${{ env.WASM_SAMPLES_TARGET_DIR }}
+ - name: Install irohad
+ run: which irohad || cargo install --path crates/irohad --locked
+ - name: Test with no default features
+ id: test_no_features
run: >
mold --run cargo llvm-cov nextest
- --no-fail-fast
- --workspace --lib
--no-default-features
- --branch
- --no-report
- - name: Run unit tests (all features)
+ --branch --no-report
+ - name: Test with all features
+ id: test_all_features
run: >
mold --run cargo llvm-cov nextest
- --no-fail-fast
- --workspace --lib
--all-features
- --branch
- --no-report
+ --branch --no-report
- name: Generate lcov report
run: cargo llvm-cov report --text --output-path coverage.txt
- name: Upload lcov report
@@ -122,64 +127,13 @@ jobs:
with:
name: report-coverage
path: coverage.txt
-
- # include: iroha/tests/integration/
- # exclude: iroha/tests/integration/extra_functional
- integration:
- runs-on: [self-hosted, Linux, iroha2]
- container:
- image: hyperledger/iroha2-ci:nightly-2024-09-09
- needs: build_wasm_samples
- timeout-minutes: 30
- steps:
- - uses: actions/checkout@v4
- - name: Download executor.wasm
- uses: actions/download-artifact@v4
- with:
- name: executor.wasm
- path: ${{ env.DOCKER_COMPOSE_PATH }}
- - name: Download the rest of WASM samples
- uses: actions/download-artifact@v4
- with:
- name: wasm_samples
- path: ${{ env.WASM_SAMPLES_TARGET_DIR }}
- - uses: taiki-e/install-action@nextest
- - name: Run integration tests, with all features
- run: >
- mold --run cargo nextest run
- --all-features
- --no-fail-fast
- --failure-output immediate-final
- -E 'package(iroha) and test(integration) and not test(extra_functional)'
-
- # include: iroha/tests/integration/extra_functional
- extra_functional:
- runs-on: [self-hosted, Linux, iroha2]
- container:
- image: hyperledger/iroha2-ci:nightly-2024-09-09
- needs: build_wasm_samples
- timeout-minutes: 60
- steps:
- - uses: actions/checkout@v4
- - name: Download executor.wasm
- uses: actions/download-artifact@v4
- with:
- name: executor.wasm
- path: ${{ env.DOCKER_COMPOSE_PATH }}
- - name: Download the rest of WASM samples
- uses: actions/download-artifact@v4
+ - name: Upload test network artifacts
+ if: failure() && (steps.test_no_features.outcome == 'failure' || steps.test_all_features.outcome == 'failure')
+ uses: actions/upload-artifact@v4
with:
- name: wasm_samples
- path: ${{ env.WASM_SAMPLES_TARGET_DIR }}
- - uses: taiki-e/install-action@nextest
- - name: Run integration tests, with all features
- run: >
- mold --run cargo nextest run
- --all-features
- --no-fail-fast
- --failure-output final
- --test-threads 1
- -E 'test(extra_functional)'
+ name: test_network_runs
+ path: ${{ env.TEST_NETWORK_TMP_DIR }}/irohad_test_network_*
+ retention-days: 3
# Run the job to check that the docker containers are properly buildable
pr-generator-build:
diff --git a/.github/workflows/iroha2-pr-ui.yml b/.github/workflows/iroha2-pr-ui.yml
index 33dd0b1922a..207efffe35f 100644
--- a/.github/workflows/iroha2-pr-ui.yml
+++ b/.github/workflows/iroha2-pr-ui.yml
@@ -17,6 +17,7 @@ concurrency:
env:
CARGO_TERM_COLOR: always
+ NEXTEST_PROFILE: ci
jobs:
tests:
@@ -31,4 +32,4 @@ jobs:
- uses: actions/checkout@v4
- uses: taiki-e/install-action@nextest
- name: Run UI tests, with ${{ matrix.feature_flag }}
- run: mold --run cargo nextest run --no-fail-fast -E 'test(ui)' --${{ matrix.feature_flag }}
+ run: mold --run cargo nextest run -E 'test(ui)' --${{ matrix.feature_flag }}
diff --git a/.gitignore b/.gitignore
index f4baa3a1eac..70d5f05109b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -51,3 +51,4 @@ result
/lcov.info
test_docker
**/*.wasm
+.iroha_test_network_run.json*
diff --git a/Cargo.lock b/Cargo.lock
index abddda19711..33fc39067ec 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -156,9 +156,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.86"
+version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
+checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
[[package]]
name = "arbitrary"
@@ -306,9 +306,9 @@ dependencies = [
[[package]]
name = "arrayref"
-version = "0.3.8"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a"
+checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
[[package]]
name = "arrayvec"
@@ -316,6 +316,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
+[[package]]
+name = "assert_matches"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9"
+
[[package]]
name = "assertables"
version = "7.0.1"
@@ -324,9 +330,9 @@ checksum = "0c24e9d990669fbd16806bff449e4ac644fd9b1fca014760087732fe4102f131"
[[package]]
name = "async-stream"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51"
+checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
@@ -335,26 +341,32 @@ dependencies = [
[[package]]
name = "async-stream-impl"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
+checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "async-trait"
-version = "0.1.81"
+version = "0.1.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107"
+checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
+[[package]]
+name = "atomic-waker"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
+
[[package]]
name = "attohttpc"
version = "0.28.0"
@@ -364,7 +376,7 @@ dependencies = [
"http 1.1.0",
"log",
"native-tls",
- "rustls",
+ "rustls 0.22.4",
"rustls-native-certs",
"url",
"webpki-roots",
@@ -383,9 +395,9 @@ dependencies = [
[[package]]
name = "autocfg"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "axum"
@@ -410,20 +422,20 @@ dependencies = [
"rustversion",
"serde",
"sync_wrapper 0.1.2",
- "tower",
+ "tower 0.4.13",
"tower-layer",
"tower-service",
]
[[package]]
name = "axum"
-version = "0.7.5"
+version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"
+checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae"
dependencies = [
"async-trait",
- "axum-core 0.4.3",
- "base64 0.21.7",
+ "axum-core 0.4.5",
+ "base64 0.22.1",
"bytes",
"futures-util",
"http 1.1.0",
@@ -446,8 +458,8 @@ dependencies = [
"sha1",
"sync_wrapper 1.0.1",
"tokio",
- "tokio-tungstenite",
- "tower",
+ "tokio-tungstenite 0.24.0",
+ "tower 0.5.1",
"tower-layer",
"tower-service",
]
@@ -471,9 +483,9 @@ dependencies = [
[[package]]
name = "axum-core"
-version = "0.4.3"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3"
+checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
dependencies = [
"async-trait",
"bytes",
@@ -484,11 +496,25 @@ dependencies = [
"mime",
"pin-project-lite",
"rustversion",
- "sync_wrapper 0.1.2",
+ "sync_wrapper 1.0.1",
"tower-layer",
"tower-service",
]
+[[package]]
+name = "backoff"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1"
+dependencies = [
+ "futures-core",
+ "getrandom",
+ "instant",
+ "pin-project-lite",
+ "rand",
+ "tokio",
+]
+
[[package]]
name = "backtrace"
version = "0.3.71"
@@ -590,7 +616,7 @@ dependencies = [
"proc-macro-crate",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"syn_derive",
]
@@ -601,7 +627,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c"
dependencies = [
"memchr",
- "regex-automata 0.4.7",
+ "regex-automata 0.4.8",
"serde",
]
@@ -658,9 +684,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
-version = "1.7.1"
+version = "1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
+checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
[[package]]
name = "camino"
@@ -702,9 +728,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
-version = "1.1.13"
+version = "1.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48"
+checksum = "812acba72f0a070b003d3697490d2b55b837230ae7c6c6497f05cc2ddbb8d938"
dependencies = [
"jobserver",
"libc",
@@ -802,9 +828,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.5.16"
+version = "4.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019"
+checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615"
dependencies = [
"clap_builder",
"clap_derive",
@@ -812,9 +838,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.5.15"
+version = "4.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6"
+checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b"
dependencies = [
"anstream",
"anstyle",
@@ -824,14 +850,14 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.5.13"
+version = "4.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
+checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1000,18 +1026,18 @@ dependencies = [
[[package]]
name = "cpp_demangle"
-version = "0.4.3"
+version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119"
+checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d"
dependencies = [
"cfg-if",
]
[[package]]
name = "cpufeatures"
-version = "0.2.13"
+version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad"
+checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0"
dependencies = [
"libc",
]
@@ -1291,14 +1317,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "cxx"
-version = "1.0.126"
+version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c4eae4b7fc8dcb0032eb3b1beee46b38d371cdeaf2d0c64b9944f6f69ad7755"
+checksum = "54ccead7d199d584d139148b04b4a368d1ec7556a1d9ea2548febb1b9d49f9a4"
dependencies = [
"cc",
"cxxbridge-flags",
@@ -1308,9 +1334,9 @@ dependencies = [
[[package]]
name = "cxx-build"
-version = "1.0.126"
+version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6c822bf7fb755d97328d6c337120b6f843678178751cba33c9da25cf522272e0"
+checksum = "c77953e99f01508f89f55c494bfa867171ef3a6c8cea03d26975368f2121a5c1"
dependencies = [
"cc",
"codespan-reporting",
@@ -1318,24 +1344,24 @@ dependencies = [
"proc-macro2",
"quote",
"scratch",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "cxxbridge-flags"
-version = "1.0.126"
+version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "719d6197dc016c88744aff3c0d0340a01ecce12e8939fc282e7c8f583ee64bc6"
+checksum = "65777e06cc48f0cb0152024c77d6cf9e4bdb4408e7b48bea993d42fa0f5b02b6"
[[package]]
name = "cxxbridge-macro"
-version = "1.0.126"
+version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35de3b547387863c8f82013c4f79f1c2162edee956383e4089e1d04c18c4f16c"
+checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1359,7 +1385,7 @@ dependencies = [
"proc-macro2",
"quote",
"strsim",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1370,7 +1396,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806"
dependencies = [
"darling_core",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1446,7 +1472,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1457,7 +1483,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1501,7 +1527,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1695,9 +1721,9 @@ checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183"
[[package]]
name = "fastrand"
-version = "2.1.0"
+version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a"
+checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
[[package]]
name = "ff"
@@ -1717,9 +1743,9 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "filetime"
-version = "0.2.24"
+version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf401df4a4e3872c4fe8151134cf483738e74b67fc934d6532c882b3d24a4550"
+checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586"
dependencies = [
"cfg-if",
"libc",
@@ -1747,9 +1773,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flate2"
-version = "1.0.32"
+version = "1.0.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c0596c1eac1f9e04ed902702e9878208b336edc9d6fddc8a48387349bab3666"
+checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0"
dependencies = [
"crc32fast",
"miniz_oxide 0.8.0",
@@ -1785,6 +1811,16 @@ dependencies = [
"percent-encoding",
]
+[[package]]
+name = "fslock"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
[[package]]
name = "funty"
version = "2.0.0"
@@ -1847,7 +1883,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -1926,14 +1962,14 @@ dependencies = [
[[package]]
name = "getset"
-version = "0.1.2"
+version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e45727250e75cc04ff2846a66397da8ef2b3db8e40e0cef4df67950a07621eb9"
+checksum = "f636605b743120a8d32ed92fc27b6cde1a769f8f936c065151eb66f88ded513c"
dependencies = [
- "proc-macro-error",
+ "proc-macro-error2",
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn 2.0.79",
]
[[package]]
@@ -1943,7 +1979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
dependencies = [
"fallible-iterator",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"stable_deref_trait",
]
@@ -2000,7 +2036,7 @@ dependencies = [
"gix-utils",
"itoa",
"thiserror",
- "winnow 0.6.18",
+ "winnow",
]
[[package]]
@@ -2053,7 +2089,7 @@ dependencies = [
"smallvec",
"thiserror",
"unicode-bom",
- "winnow 0.6.18",
+ "winnow",
]
[[package]]
@@ -2219,7 +2255,7 @@ checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -2238,7 +2274,7 @@ dependencies = [
"itoa",
"smallvec",
"thiserror",
- "winnow 0.6.18",
+ "winnow",
]
[[package]]
@@ -2281,9 +2317,9 @@ dependencies = [
[[package]]
name = "gix-path"
-version = "0.10.10"
+version = "0.10.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38d5b8722112fa2fa87135298780bc833b0e9f6c56cc82795d209804b3a03484"
+checksum = "ebfc4febd088abdcbc9f1246896e57e37b7a34f6909840045a1767c6dafac7af"
dependencies = [
"bstr",
"gix-trace",
@@ -2322,7 +2358,7 @@ dependencies = [
"gix-validate",
"memmap2",
"thiserror",
- "winnow 0.6.18",
+ "winnow",
]
[[package]]
@@ -2399,9 +2435,9 @@ dependencies = [
[[package]]
name = "gix-trace"
-version = "0.1.9"
+version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f924267408915fddcd558e3f37295cc7d6a3e50f8bd8b606cee0808c3915157e"
+checksum = "6cae0e8661c3ff92688ce1c8b8058b3efb312aba9492bbe93661a21705ab431b"
[[package]]
name = "gix-traverse"
@@ -2483,7 +2519,26 @@ dependencies = [
"futures-sink",
"futures-util",
"http 0.2.12",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
+ "slab",
+ "tokio",
+ "tokio-util",
+ "tracing",
+]
+
+[[package]]
+name = "h2"
+version = "0.4.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
+dependencies = [
+ "atomic-waker",
+ "bytes",
+ "fnv",
+ "futures-core",
+ "futures-sink",
+ "http 1.1.0",
+ "indexmap 2.6.0",
"slab",
"tokio",
"tokio-util",
@@ -2529,6 +2584,12 @@ dependencies = [
"serde",
]
+[[package]]
+name = "hashbrown"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
+
[[package]]
name = "hdrhistogram"
version = "7.5.4"
@@ -2675,9 +2736,9 @@ dependencies = [
[[package]]
name = "httparse"
-version = "1.9.4"
+version = "1.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9"
+checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
[[package]]
name = "httpdate"
@@ -2701,7 +2762,7 @@ dependencies = [
"futures-channel",
"futures-core",
"futures-util",
- "h2",
+ "h2 0.3.26",
"http 0.2.12",
"http-body 0.4.6",
"httparse",
@@ -2724,6 +2785,7 @@ dependencies = [
"bytes",
"futures-channel",
"futures-util",
+ "h2 0.4.6",
"http 1.1.0",
"http-body 1.0.1",
"httparse",
@@ -2732,6 +2794,24 @@ dependencies = [
"pin-project-lite",
"smallvec",
"tokio",
+ "want",
+]
+
+[[package]]
+name = "hyper-rustls"
+version = "0.27.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333"
+dependencies = [
+ "futures-util",
+ "http 1.1.0",
+ "hyper 1.4.1",
+ "hyper-util",
+ "rustls 0.23.13",
+ "rustls-pki-types",
+ "tokio",
+ "tokio-rustls 0.26.0",
+ "tower-service",
]
[[package]]
@@ -2746,26 +2826,46 @@ dependencies = [
"tokio-io-timeout",
]
+[[package]]
+name = "hyper-tls"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0"
+dependencies = [
+ "bytes",
+ "http-body-util",
+ "hyper 1.4.1",
+ "hyper-util",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+]
+
[[package]]
name = "hyper-util"
-version = "0.1.7"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9"
+checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b"
dependencies = [
"bytes",
+ "futures-channel",
"futures-util",
"http 1.1.0",
"http-body 1.0.1",
"hyper 1.4.1",
"pin-project-lite",
+ "socket2",
"tokio",
+ "tower-service",
+ "tracing",
]
[[package]]
name = "iana-time-zone"
-version = "0.1.60"
+version = "0.1.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@@ -2842,12 +2942,12 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.4.0"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c"
+checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
- "hashbrown 0.14.5",
+ "hashbrown 0.15.0",
"serde",
]
@@ -2876,15 +2976,30 @@ dependencies = [
"unicode-width",
]
+[[package]]
+name = "instant"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "ipnet"
+version = "2.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708"
+
[[package]]
name = "iroha"
version = "2.0.0-rc.1.0"
dependencies = [
+ "assert_matches",
"assertables",
"attohttpc",
"base64 0.22.1",
"color-eyre",
- "criterion",
"derive_more",
"displaydoc",
"error-stack",
@@ -2906,22 +3021,20 @@ dependencies = [
"iroha_test_samples",
"iroha_torii_const",
"iroha_version",
- "irohad",
"nonzero_ext",
"parity-scale-codec",
"rand",
+ "reqwest",
"serde",
"serde_json",
"serde_with",
"tempfile",
"thiserror",
"tokio",
- "tokio-tungstenite",
+ "tokio-tungstenite 0.21.0",
"toml",
- "tracing-flame",
- "tracing-subscriber",
"trybuild",
- "tungstenite",
+ "tungstenite 0.21.0",
"url",
]
@@ -3027,7 +3140,7 @@ dependencies = [
"proc-macro2",
"quote",
"serde",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3045,7 +3158,7 @@ dependencies = [
"eyre",
"futures",
"hex",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"iroha_config",
"iroha_crypto",
"iroha_data_model",
@@ -3157,7 +3270,7 @@ dependencies = [
"quote",
"serde",
"serde_json",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3171,7 +3284,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3208,7 +3321,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3220,7 +3333,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3246,7 +3359,7 @@ dependencies = [
"quote",
"rustc-hash",
"strum 0.25.0",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3275,7 +3388,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3349,7 +3462,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3422,7 +3535,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3446,7 +3559,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3486,7 +3599,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3543,7 +3656,7 @@ dependencies = [
"streaming-stats",
"tokio",
"tokio-stream",
- "tokio-tungstenite",
+ "tokio-tungstenite 0.21.0",
"url",
"vergen",
]
@@ -3557,7 +3670,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3565,7 +3678,10 @@ dependencies = [
name = "iroha_test_network"
version = "2.0.0-rc.1.0"
dependencies = [
- "eyre",
+ "backoff",
+ "color-eyre",
+ "derive_more",
+ "fslock",
"futures",
"iroha",
"iroha_config",
@@ -3577,15 +3693,19 @@ dependencies = [
"iroha_genesis",
"iroha_logger",
"iroha_primitives",
+ "iroha_telemetry",
"iroha_test_samples",
- "iroha_wasm_builder",
- "irohad",
+ "nix 0.29.0",
"parity-scale-codec",
"rand",
+ "serde",
"serde_json",
"tempfile",
+ "thiserror",
"tokio",
+ "toml",
"unique_port",
+ "which",
]
[[package]]
@@ -3603,7 +3723,7 @@ name = "iroha_torii"
version = "2.0.0-rc.1.0"
dependencies = [
"async-trait",
- "axum 0.7.5",
+ "axum 0.7.7",
"displaydoc",
"error-stack",
"eyre",
@@ -3630,7 +3750,7 @@ dependencies = [
"thiserror",
"tokio",
"tower-http",
- "tungstenite",
+ "tungstenite 0.21.0",
]
[[package]]
@@ -3658,7 +3778,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3688,7 +3808,7 @@ dependencies = [
"quote",
"serde",
"serde_json",
- "syn 2.0.75",
+ "syn 2.0.79",
"trybuild",
]
@@ -3725,7 +3845,7 @@ dependencies = [
"manyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -3868,9 +3988,9 @@ dependencies = [
[[package]]
name = "k256"
-version = "0.13.3"
+version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b"
+checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b"
dependencies = [
"cfg-if",
"ecdsa",
@@ -3912,9 +4032,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67"
[[package]]
name = "libc"
-version = "0.2.158"
+version = "0.2.159"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5"
[[package]]
name = "libflate"
@@ -3959,9 +4079,9 @@ dependencies = [
[[package]]
name = "libsodium-sys-stable"
-version = "1.21.1"
+version = "1.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5fd1882b85f01cdd4021c0664fd897710a04c5d01b593a5a70e1b0baa999c1f8"
+checksum = "42631d334de875c636a1aae7adb515653ac2e771e5a2ce74b1053f5a4412df3a"
dependencies = [
"cc",
"libc",
@@ -4039,7 +4159,7 @@ dependencies = [
"manyhow-macros",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -4085,9 +4205,9 @@ dependencies = [
[[package]]
name = "memmap2"
-version = "0.9.4"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322"
+checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f"
dependencies = [
"libc",
]
@@ -4115,9 +4235,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "minisign-verify"
-version = "0.2.1"
+version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "933dca44d65cdd53b355d0b73d380a2ff5da71f87f036053188bf1eab6a19881"
+checksum = "a05b5d0594e0cb1ad8cee3373018d2b84e25905dc75b2468114cc9a8e86cfc20"
[[package]]
name = "miniz_oxide"
@@ -4225,6 +4345,18 @@ dependencies = [
"libc",
]
+[[package]]
+name = "nix"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
+dependencies = [
+ "bitflags 2.6.0",
+ "cfg-if",
+ "cfg_aliases",
+ "libc",
+]
+
[[package]]
name = "nom"
version = "7.1.3"
@@ -4305,21 +4437,24 @@ dependencies = [
[[package]]
name = "object"
-version = "0.36.3"
+version = "0.36.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9"
+checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a"
dependencies = [
"crc32fast",
"hashbrown 0.14.5",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"memchr",
]
[[package]]
name = "once_cell"
-version = "1.19.0"
+version = "1.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1"
+dependencies = [
+ "portable-atomic",
+]
[[package]]
name = "oorandom"
@@ -4356,7 +4491,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -4367,9 +4502,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-src"
-version = "300.3.1+3.3.1"
+version = "300.3.2+3.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91"
+checksum = "a211a18d945ef7e648cc6e0058f4c548ee46aab922ea203e0d30e966ea23647b"
dependencies = [
"cc",
]
@@ -4492,9 +4627,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "pest"
-version = "2.7.11"
+version = "2.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95"
+checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9"
dependencies = [
"memchr",
"thiserror",
@@ -4503,9 +4638,9 @@ dependencies = [
[[package]]
name = "pest_derive"
-version = "2.7.11"
+version = "2.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a"
+checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0"
dependencies = [
"pest",
"pest_generator",
@@ -4513,22 +4648,22 @@ dependencies = [
[[package]]
name = "pest_generator"
-version = "2.7.11"
+version = "2.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183"
+checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e"
dependencies = [
"pest",
"pest_meta",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "pest_meta"
-version = "2.7.11"
+version = "2.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f"
+checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f"
dependencies = [
"once_cell",
"pest",
@@ -4542,7 +4677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
]
[[package]]
@@ -4562,7 +4697,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -4589,15 +4724,15 @@ dependencies = [
[[package]]
name = "pkg-config"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
+checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
[[package]]
name = "plotters"
-version = "0.3.6"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3"
+checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
@@ -4608,15 +4743,15 @@ dependencies = [
[[package]]
name = "plotters-backend"
-version = "0.3.6"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7"
+checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
-version = "0.3.6"
+version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705"
+checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
@@ -4632,6 +4767,12 @@ dependencies = [
"universal-hash",
]
+[[package]]
+name = "portable-atomic"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2"
+
[[package]]
name = "postcard"
version = "1.0.10"
@@ -4660,7 +4801,7 @@ dependencies = [
"findshlibs",
"libc",
"log",
- "nix",
+ "nix 0.26.4",
"once_cell",
"parking_lot",
"protobuf",
@@ -4696,16 +4837,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "788992637e9c73f809f7bdc647572785efb06cb7c860105a4e55e9c7d6935d39"
dependencies = [
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "proc-macro-crate"
-version = "3.1.0"
+version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284"
+checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b"
dependencies = [
- "toml_edit 0.21.1",
+ "toml_edit",
]
[[package]]
@@ -4717,7 +4858,6 @@ dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
- "syn 1.0.109",
"version_check",
]
@@ -4732,6 +4872,28 @@ dependencies = [
"version_check",
]
+[[package]]
+name = "proc-macro-error-attr2"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+]
+
+[[package]]
+name = "proc-macro-error2"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802"
+dependencies = [
+ "proc-macro-error-attr2",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.79",
+]
+
[[package]]
name = "proc-macro-utils"
version = "0.8.0"
@@ -4792,7 +4954,7 @@ dependencies = [
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -4831,9 +4993,9 @@ dependencies = [
[[package]]
name = "psm"
-version = "0.1.21"
+version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874"
+checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205"
dependencies = [
"cc",
]
@@ -4925,9 +5087,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
-version = "0.5.3"
+version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
+checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
dependencies = [
"bitflags 2.6.0",
]
@@ -4958,14 +5120,14 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.10.6"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.7",
- "regex-syntax 0.8.4",
+ "regex-automata 0.4.8",
+ "regex-syntax 0.8.5",
]
[[package]]
@@ -4979,13 +5141,13 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.7"
+version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3"
dependencies = [
"aho-corasick",
"memchr",
- "regex-syntax 0.8.4",
+ "regex-syntax 0.8.5",
]
[[package]]
@@ -4996,9 +5158,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
-version = "0.8.4"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rend"
@@ -5009,6 +5171,49 @@ dependencies = [
"bytecheck",
]
+[[package]]
+name = "reqwest"
+version = "0.12.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b"
+dependencies = [
+ "base64 0.22.1",
+ "bytes",
+ "encoding_rs",
+ "futures-core",
+ "futures-util",
+ "h2 0.4.6",
+ "http 1.1.0",
+ "http-body 1.0.1",
+ "http-body-util",
+ "hyper 1.4.1",
+ "hyper-rustls",
+ "hyper-tls",
+ "hyper-util",
+ "ipnet",
+ "js-sys",
+ "log",
+ "mime",
+ "native-tls",
+ "once_cell",
+ "percent-encoding",
+ "pin-project-lite",
+ "rustls-pemfile",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "sync_wrapper 1.0.1",
+ "system-configuration",
+ "tokio",
+ "tokio-native-tls",
+ "tower-service",
+ "url",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "windows-registry",
+]
+
[[package]]
name = "rfc6979"
version = "0.4.0"
@@ -5099,18 +5304,18 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]]
name = "rustc_version"
-version = "0.4.0"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
+checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [
"semver",
]
[[package]]
name = "rustix"
-version = "0.38.34"
+version = "0.38.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f"
+checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811"
dependencies = [
"bitflags 2.6.0",
"errno",
@@ -5133,11 +5338,24 @@ dependencies = [
"zeroize",
]
+[[package]]
+name = "rustls"
+version = "0.23.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8"
+dependencies = [
+ "once_cell",
+ "rustls-pki-types",
+ "rustls-webpki",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "rustls-native-certs"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04182dffc9091a404e0fc069ea5cd60e5b866c3adf881eff99a32d048242dffa"
+checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5"
dependencies = [
"openssl-probe",
"rustls-pemfile",
@@ -5148,25 +5366,24 @@ dependencies = [
[[package]]
name = "rustls-pemfile"
-version = "2.1.3"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425"
+checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
dependencies = [
- "base64 0.22.1",
"rustls-pki-types",
]
[[package]]
name = "rustls-pki-types"
-version = "1.8.0"
+version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
+checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55"
[[package]]
name = "rustls-webpki"
-version = "0.102.6"
+version = "0.102.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e"
+checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
dependencies = [
"ring",
"rustls-pki-types",
@@ -5196,20 +5413,20 @@ dependencies = [
[[package]]
name = "scc"
-version = "2.1.16"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aeb7ac86243095b70a7920639507b71d51a63390d1ba26c4f60a552fbb914a37"
+checksum = "836f1e0f4963ef5288b539b643b35e043e76a32d0f4e47e67febf69576527f50"
dependencies = [
"sdd",
]
[[package]]
name = "schannel"
-version = "0.1.23"
+version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534"
+checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -5226,9 +5443,9 @@ checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152"
[[package]]
name = "sdd"
-version = "3.0.2"
+version = "3.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0495e4577c672de8254beb68d01a9b62d0e8a13c099edecdbedccce3223cd29f"
+checksum = "60a7b59a5d9b0099720b417b6325d91a52cbf5b3dcb5041d864be53eefa58abc"
[[package]]
name = "seahash"
@@ -5252,9 +5469,9 @@ dependencies = [
[[package]]
name = "secp256k1"
-version = "0.29.0"
+version = "0.29.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3"
+checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113"
dependencies = [
"rand",
"secp256k1-sys",
@@ -5263,9 +5480,9 @@ dependencies = [
[[package]]
name = "secp256k1-sys"
-version = "0.10.0"
+version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b"
+checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9"
dependencies = [
"cc",
]
@@ -5285,9 +5502,9 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.11.1"
+version = "2.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf"
+checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6"
dependencies = [
"core-foundation-sys",
"libc",
@@ -5304,29 +5521,29 @@ dependencies = [
[[package]]
name = "serde"
-version = "1.0.208"
+version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2"
+checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.208"
+version = "1.0.210"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf"
+checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
name = "serde_json"
-version = "1.0.125"
+version = "1.0.128"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed"
+checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
dependencies = [
"itoa",
"memchr",
@@ -5346,9 +5563,9 @@ dependencies = [
[[package]]
name = "serde_spanned"
-version = "0.6.7"
+version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d"
+checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
dependencies = [
"serde",
]
@@ -5367,15 +5584,15 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.9.0"
+version = "3.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857"
+checksum = "9720086b3357bcb44fce40117d769a4d068c70ecfa190850a980a71755f66fcc"
dependencies = [
"base64 0.22.1",
"chrono",
"hex",
"indexmap 1.9.3",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"serde",
"serde_derive",
"serde_json",
@@ -5385,14 +5602,14 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.9.0"
+version = "3.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350"
+checksum = "5f1abbfe725f27678f4663bcacb75a83e829fd464c25d78dd038a3a29e307cec"
dependencies = [
"darling",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5401,7 +5618,7 @@ version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"itoa",
"ryu",
"serde",
@@ -5430,7 +5647,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5547,9 +5764,9 @@ checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "simdutf8"
-version = "0.1.4"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
+checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "slab"
@@ -5700,7 +5917,7 @@ dependencies = [
"proc-macro2",
"quote",
"rustversion",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5731,9 +5948,9 @@ dependencies = [
[[package]]
name = "symbolic-common"
-version = "12.10.0"
+version = "12.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16629323a4ec5268ad23a575110a724ad4544aae623451de600c747bf87b36cf"
+checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77"
dependencies = [
"debugid",
"memmap2",
@@ -5743,9 +5960,9 @@ dependencies = [
[[package]]
name = "symbolic-demangle"
-version = "12.10.0"
+version = "12.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48c043a45f08f41187414592b3ceb53fb0687da57209cc77401767fb69d5b596"
+checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8"
dependencies = [
"cpp_demangle",
"rustc-demangle",
@@ -5765,9 +5982,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.75"
+version = "2.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9"
+checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590"
dependencies = [
"proc-macro2",
"quote",
@@ -5783,7 +6000,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5797,6 +6014,30 @@ name = "sync_wrapper"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"
+dependencies = [
+ "futures-core",
+]
+
+[[package]]
+name = "system-configuration"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
+dependencies = [
+ "bitflags 2.6.0",
+ "core-foundation",
+ "system-configuration-sys",
+]
+
+[[package]]
+name = "system-configuration-sys"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
[[package]]
name = "tap"
@@ -5806,9 +6047,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tar"
-version = "0.4.41"
+version = "0.4.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909"
+checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020"
dependencies = [
"filetime",
"libc",
@@ -5823,9 +6064,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
[[package]]
name = "tempfile"
-version = "3.12.0"
+version = "3.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
+checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b"
dependencies = [
"cfg-if",
"fastrand",
@@ -5845,22 +6086,22 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.63"
+version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724"
+checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.63"
+version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261"
+checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5943,9 +6184,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.39.3"
+version = "1.40.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5"
+checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
dependencies = [
"backtrace",
"bytes",
@@ -5977,7 +6218,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -5996,16 +6237,27 @@ version = "0.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f"
dependencies = [
- "rustls",
+ "rustls 0.22.4",
+ "rustls-pki-types",
+ "tokio",
+]
+
+[[package]]
+name = "tokio-rustls"
+version = "0.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
+dependencies = [
+ "rustls 0.23.13",
"rustls-pki-types",
"tokio",
]
[[package]]
name = "tokio-stream"
-version = "0.1.15"
+version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af"
+checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1"
dependencies = [
"futures-core",
"pin-project-lite",
@@ -6022,21 +6274,33 @@ dependencies = [
"futures-util",
"log",
"native-tls",
- "rustls",
+ "rustls 0.22.4",
"rustls-native-certs",
"rustls-pki-types",
"tokio",
"tokio-native-tls",
- "tokio-rustls",
- "tungstenite",
+ "tokio-rustls 0.25.0",
+ "tungstenite 0.21.0",
"webpki-roots",
]
+[[package]]
+name = "tokio-tungstenite"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9"
+dependencies = [
+ "futures-util",
+ "log",
+ "tokio",
+ "tungstenite 0.24.0",
+]
+
[[package]]
name = "tokio-util"
-version = "0.7.11"
+version = "0.7.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1"
+checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a"
dependencies = [
"bytes",
"futures-core",
@@ -6056,7 +6320,7 @@ dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
- "toml_edit 0.22.20",
+ "toml_edit",
]
[[package]]
@@ -6070,26 +6334,15 @@ dependencies = [
[[package]]
name = "toml_edit"
-version = "0.21.1"
+version = "0.22.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
+checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5"
dependencies = [
- "indexmap 2.4.0",
- "toml_datetime",
- "winnow 0.5.40",
-]
-
-[[package]]
-name = "toml_edit"
-version = "0.22.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d"
-dependencies = [
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"serde",
"serde_spanned",
"toml_datetime",
- "winnow 0.6.18",
+ "winnow",
]
[[package]]
@@ -6103,7 +6356,7 @@ dependencies = [
"axum 0.6.20",
"base64 0.21.7",
"bytes",
- "h2",
+ "h2 0.3.26",
"http 0.2.12",
"http-body 0.4.6",
"hyper 0.14.30",
@@ -6113,7 +6366,7 @@ dependencies = [
"prost",
"tokio",
"tokio-stream",
- "tower",
+ "tower 0.4.13",
"tower-layer",
"tower-service",
"tracing",
@@ -6139,6 +6392,21 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "tower"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f"
+dependencies = [
+ "futures-core",
+ "futures-util",
+ "pin-project-lite",
+ "sync_wrapper 0.1.2",
+ "tokio",
+ "tower-layer",
+ "tower-service",
+]
+
[[package]]
name = "tower-http"
version = "0.5.2"
@@ -6189,7 +6457,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -6212,17 +6480,6 @@ dependencies = [
"tracing-subscriber",
]
-[[package]]
-name = "tracing-flame"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9"
-dependencies = [
- "lazy_static",
- "tracing",
- "tracing-subscriber",
-]
-
[[package]]
name = "tracing-futures"
version = "0.2.5"
@@ -6256,7 +6513,6 @@ dependencies = [
"serde",
"serde_json",
"sharded-slab",
- "smallvec",
"thread_local",
"tracing",
"tracing-core",
@@ -6297,7 +6553,7 @@ dependencies = [
"log",
"native-tls",
"rand",
- "rustls",
+ "rustls 0.22.4",
"rustls-native-certs",
"rustls-pki-types",
"sha1",
@@ -6307,6 +6563,24 @@ dependencies = [
"webpki-roots",
]
+[[package]]
+name = "tungstenite"
+version = "0.24.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a"
+dependencies = [
+ "byteorder",
+ "bytes",
+ "data-encoding",
+ "http 1.1.0",
+ "httparse",
+ "log",
+ "rand",
+ "sha1",
+ "thiserror",
+ "utf-8",
+]
+
[[package]]
name = "typeid"
version = "1.0.2"
@@ -6321,15 +6595,15 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "ucd-trie"
-version = "0.1.6"
+version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9"
+checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
[[package]]
name = "unicode-bidi"
-version = "0.3.15"
+version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
+checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893"
[[package]]
name = "unicode-bom"
@@ -6339,36 +6613,36 @@ checksum = "7eec5d1121208364f6793f7d2e222bf75a915c19557537745b195b253dd64217"
[[package]]
name = "unicode-ident"
-version = "1.0.12"
+version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
[[package]]
name = "unicode-normalization"
-version = "0.1.23"
+version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
+checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
-version = "1.11.0"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202"
+checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
[[package]]
name = "unicode-width"
-version = "0.1.13"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
+checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "unicode-xid"
-version = "0.2.5"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a"
+checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "unique_port"
@@ -6556,10 +6830,22 @@ dependencies = [
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"wasm-bindgen-shared",
]
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.93"
@@ -6578,7 +6864,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -6600,9 +6886,9 @@ dependencies = [
[[package]]
name = "wasm-encoder"
-version = "0.216.0"
+version = "0.218.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04c23aebea22c8a75833ae08ed31ccc020835b12a41999e58c31464271b94a88"
+checksum = "22b896fa8ceb71091ace9bcb81e853f54043183a1c9667cf93422c40252ffa0a"
dependencies = [
"leb128",
]
@@ -6656,7 +6942,7 @@ dependencies = [
"ahash 0.8.11",
"bitflags 2.6.0",
"hashbrown 0.14.5",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"semver",
"serde",
]
@@ -6687,7 +6973,7 @@ dependencies = [
"fxprof-processed-profile",
"gimli",
"hashbrown 0.14.5",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"ittapi",
"libc",
"libm",
@@ -6695,7 +6981,7 @@ dependencies = [
"mach2",
"memfd",
"memoffset",
- "object 0.36.3",
+ "object 0.36.4",
"once_cell",
"paste",
"postcard",
@@ -6765,7 +7051,7 @@ dependencies = [
"anyhow",
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
"wasmtime-component-util",
"wasmtime-wit-bindgen",
"wit-parser",
@@ -6793,7 +7079,7 @@ dependencies = [
"cranelift-wasm",
"gimli",
"log",
- "object 0.36.3",
+ "object 0.36.4",
"target-lexicon",
"thiserror",
"wasmparser",
@@ -6811,9 +7097,9 @@ dependencies = [
"cpp_demangle",
"cranelift-entity",
"gimli",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"log",
- "object 0.36.3",
+ "object 0.36.4",
"postcard",
"rustc-demangle",
"serde",
@@ -6847,7 +7133,7 @@ version = "22.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bc54198c6720f098210a85efb3ba8c078d1de4d373cdb6778850a66ae088d11"
dependencies = [
- "object 0.36.3",
+ "object 0.36.4",
"once_cell",
"rustix",
"wasmtime-versioned-export-macros",
@@ -6892,7 +7178,7 @@ checksum = "de5a9bc4f44ceeb168e9e8e3be4e0b4beb9095b468479663a9e24c667e36826f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -6904,7 +7190,7 @@ dependencies = [
"anyhow",
"cranelift-codegen",
"gimli",
- "object 0.36.3",
+ "object 0.36.4",
"target-lexicon",
"wasmparser",
"wasmtime-cranelift",
@@ -6920,28 +7206,28 @@ checksum = "70dc077306b38288262e5ba01d4b21532a6987416cdc0aedf04bb06c22a68fdc"
dependencies = [
"anyhow",
"heck 0.4.1",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"wit-parser",
]
[[package]]
name = "wast"
-version = "216.0.0"
+version = "218.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7eb1f2eecd913fdde0dc6c3439d0f24530a98ac6db6cb3d14d92a5328554a08"
+checksum = "8a53cd1f0fa505df97557e36a58bddb8296e2fcdcd089529545ebfdb18a1b9d7"
dependencies = [
"bumpalo",
"leb128",
"memchr",
"unicode-width",
- "wasm-encoder 0.216.0",
+ "wasm-encoder 0.218.0",
]
[[package]]
name = "wat"
-version = "1.216.0"
+version = "1.218.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac0409090fb5154f95fb5ba3235675fd9e579e731524d63b6a2f653e1280c82a"
+checksum = "4f87f8e14e776762e07927c27c2054d2cf678aab9aae2d431a79b3e31e4dd391"
dependencies = [
"wast",
]
@@ -6968,13 +7254,25 @@ dependencies = [
[[package]]
name = "webpki-roots"
-version = "0.26.3"
+version = "0.26.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd"
+checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958"
dependencies = [
"rustls-pki-types",
]
+[[package]]
+name = "which"
+version = "6.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f"
+dependencies = [
+ "either",
+ "home",
+ "rustix",
+ "winsafe",
+]
+
[[package]]
name = "winapi"
version = "0.3.9"
@@ -7032,6 +7330,36 @@ dependencies = [
"windows-targets 0.52.6",
]
+[[package]]
+name = "windows-registry"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0"
+dependencies = [
+ "windows-result",
+ "windows-strings",
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-result"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e"
+dependencies = [
+ "windows-targets 0.52.6",
+]
+
+[[package]]
+name = "windows-strings"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10"
+dependencies = [
+ "windows-result",
+ "windows-targets 0.52.6",
+]
+
[[package]]
name = "windows-sys"
version = "0.48.0"
@@ -7182,21 +7510,18 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winnow"
-version = "0.5.40"
+version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876"
+checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b"
dependencies = [
"memchr",
]
[[package]]
-name = "winnow"
-version = "0.6.18"
+name = "winsafe"
+version = "0.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f"
-dependencies = [
- "memchr",
-]
+checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
[[package]]
name = "wit-parser"
@@ -7206,7 +7531,7 @@ checksum = "3e79b9e3c0b6bb589dec46317e645851e0db2734c44e2be5e251b03ff4a51269"
dependencies = [
"anyhow",
"id-arena",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"log",
"semver",
"serde",
@@ -7264,7 +7589,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -7284,7 +7609,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.75",
+ "syn 2.0.79",
]
[[package]]
@@ -7298,7 +7623,7 @@ dependencies = [
"crossbeam-utils",
"displaydoc",
"flate2",
- "indexmap 2.4.0",
+ "indexmap 2.6.0",
"memchr",
"thiserror",
"zopfli",
diff --git a/Cargo.toml b/Cargo.toml
index 228caded553..63d94b05425 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -16,7 +16,6 @@ categories = ["cryptography::cryptocurrencies"]
[workspace.dependencies]
iroha_core = { version = "=2.0.0-rc.1.0 ", path = "crates/iroha_core" }
-irohad = { version = "=2.0.0-rc.1.0", path = "crates/irohad" }
iroha_torii = { version = "=2.0.0-rc.1.0", path = "crates/iroha_torii" }
iroha_torii_const = { version = "=2.0.0-rc.1.0", path = "crates/iroha_torii_const" }
@@ -58,7 +57,7 @@ darling = "0.20.10"
drop_bomb = "0.1.5"
futures = { version = "0.3.30", default-features = false }
-tokio = "1.39.2"
+tokio = "1.40.0"
tokio-stream = "0.1.15"
tokio-tungstenite = "0.21.0"
tokio-util = "0.7.11"
diff --git a/README.md b/README.md
index 02fea5edf46..a2511eb2d3d 100644
--- a/README.md
+++ b/README.md
@@ -68,35 +68,6 @@ Prerequisites:
* (Optional) [Docker](https://docs.docker.com/get-docker/)
* (Optional) [Docker Compose](https://docs.docker.com/compose/install/)
- (Optional) Run included tests
-
-Run included code tests:
-
-```bash
-cargo test
-```
-
-Run API functional tests:
-
-```bash
-cargo build
-chmod +x target/debug/irohad
-chmod +x target/debug/iroha
-
-bash ./scripts/test_env.sh setup
-bash ./scripts/tests/register_mint_quantity.sh
-bash ./scripts/test_env.sh cleanup
-```
-To generate WASM files for smart contracts, use the provided script `generate_wasm.sh`. If you are in the root directory of Iroha run the following command:
-
-```bash
-bash ./scripts/generate_wasm.sh [path/to/smartcontracts]
-```
-
-The generated WASM files will be saved in a generated directory `test-smartcontracts`, relative to your current working directory. The default path for smart contracts in this project is `wasm_samples`.
-
-
-
### Build Iroha
- Build Iroha and accompanying binaries:
diff --git a/crates/iroha/Cargo.toml b/crates/iroha/Cargo.toml
index 6270392be37..0c1b753c0f7 100644
--- a/crates/iroha/Cargo.toml
+++ b/crates/iroha/Cargo.toml
@@ -83,37 +83,15 @@ toml = { workspace = true }
nonzero_ext = { workspace = true }
[dev-dependencies]
-# FIXME: These three activate `transparent_api` but client should never activate this feature.
-# Additionally there is a dependency on iroha_core in dev-dependencies in iroha_telemetry/derive
-# Hopefully, once the integration tests migration is finished these can be removed
-irohad = { workspace = true }
-
iroha_genesis = { workspace = true }
iroha_test_network = { workspace = true }
executor_custom_data_model = { version = "=2.0.0-rc.1.0", path = "../../wasm_samples/executor_custom_data_model" }
tokio = { workspace = true, features = ["rt-multi-thread"] }
-criterion = { workspace = true, features = ["html_reports"] }
+reqwest = { version = "0.12.7", features = ["json"] }
color-eyre = { workspace = true }
tempfile = { workspace = true }
hex = { workspace = true }
assertables = { workspace = true }
-
-tracing-subscriber = { workspace = true, features = ["fmt", "ansi"] }
-tracing-flame = "0.2.0"
-
trybuild = { workspace = true }
-
-[[bench]]
-name = "torii"
-harness = false
-
-[[bench]]
-name = "tps-dev"
-harness = false
-path = "benches/tps/dev.rs"
-
-[[example]]
-name = "tps-oneshot"
-harness = false
-path = "benches/tps/oneshot.rs"
+assert_matches = "1.5.0"
diff --git a/crates/iroha/benches/torii.rs b/crates/iroha/benches/torii.rs
deleted file mode 100644
index 33a754b268a..00000000000
--- a/crates/iroha/benches/torii.rs
+++ /dev/null
@@ -1,189 +0,0 @@
-#![allow(missing_docs, clippy::pedantic)]
-
-use std::thread;
-
-use criterion::{criterion_group, criterion_main, Criterion, Throughput};
-use iroha::{
- client::{asset, Client},
- data_model::prelude::*,
-};
-use iroha_genesis::GenesisBuilder;
-use iroha_primitives::unique_vec;
-use iroha_test_network::{get_chain_id, get_key_pair, Peer as TestPeer, PeerBuilder, TestRuntime};
-use iroha_test_samples::{gen_account_in, load_sample_wasm};
-use irohad::samples::get_config;
-use tokio::runtime::Runtime;
-
-const MINIMUM_SUCCESS_REQUEST_RATIO: f32 = 0.9;
-
-fn query_requests(criterion: &mut Criterion) {
- let mut peer = ::new().expect("Failed to create peer");
-
- let chain_id = get_chain_id();
- let genesis_key_pair = get_key_pair(iroha_test_network::Signatory::Genesis);
- let configuration = get_config(
- unique_vec![peer.id.clone()],
- chain_id.clone(),
- get_key_pair(iroha_test_network::Signatory::Peer),
- genesis_key_pair.public_key(),
- );
-
- let rt = Runtime::test();
- let executor = Executor::new(load_sample_wasm("default_executor"));
- let topology = vec![peer.id.clone()];
- let genesis = GenesisBuilder::default()
- .domain("wonderland".parse().expect("Valid"))
- .account(
- get_key_pair(iroha_test_network::Signatory::Alice)
- .into_parts()
- .0,
- )
- .finish_domain()
- .build_and_sign(chain_id, executor, topology, &genesis_key_pair);
-
- let builder = PeerBuilder::new()
- .with_config(configuration)
- .with_genesis(genesis);
-
- rt.block_on(builder.start_with_peer(&mut peer));
- rt.block_on(async {
- iroha_logger::test_logger()
- .reload_level(iroha::data_model::Level::ERROR.into())
- .await
- .unwrap()
- });
- let mut group = criterion.benchmark_group("query-requests");
- let domain_id: DomainId = "domain".parse().expect("Valid");
- let create_domain = Register::domain(Domain::new(domain_id));
- let (account_id, _account_keypair) = gen_account_in("domain");
- let create_account = Register::account(Account::new(account_id.clone()));
- let asset_definition_id: AssetDefinitionId = "xor#domain".parse().expect("Valid");
- let create_asset =
- Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- let mint_asset = Mint::asset_numeric(
- 200u32,
- AssetId::new(asset_definition_id, account_id.clone()),
- );
- let client_config = iroha::samples::get_client_config(
- get_chain_id(),
- get_key_pair(iroha_test_network::Signatory::Alice),
- format!("http://{}", peer.api_address).parse().unwrap(),
- );
-
- let client = Client::new(client_config);
- thread::sleep(std::time::Duration::from_millis(5000));
-
- let _ = client
- .submit_all::([
- create_domain.into(),
- create_account.into(),
- create_asset.into(),
- mint_asset.into(),
- ])
- .expect("Failed to prepare state");
-
- let query = client
- .query(asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id));
- thread::sleep(std::time::Duration::from_millis(1500));
- let mut success_count = 0;
- let mut failures_count = 0;
- // reporting elements and not bytes here because the new query builder doesn't easily expose the box type used in transport
- let _dropable = group.throughput(Throughput::Elements(1));
- let _dropable2 = group.bench_function("query", |b| {
- b.iter(|| {
- let iter = query.clone().execute_all();
-
- match iter {
- Ok(assets) => {
- assert!(!assets.is_empty());
- success_count += 1;
- }
- Err(e) => {
- eprintln!("Query failed: {e}");
- failures_count += 1;
- }
- }
- });
- });
- println!("Success count: {success_count}, Failures count: {failures_count}");
- group.finish();
- if (failures_count + success_count) > 0 {
- assert!(
- success_count as f32 / (failures_count + success_count) as f32
- > MINIMUM_SUCCESS_REQUEST_RATIO
- );
- }
-}
-
-fn instruction_submits(criterion: &mut Criterion) {
- println!("instruction submits");
- let rt = Runtime::test();
- let mut peer = ::new().expect("Failed to create peer");
-
- let chain_id = get_chain_id();
- let genesis_key_pair = get_key_pair(iroha_test_network::Signatory::Genesis);
- let topology = vec![peer.id.clone()];
- let configuration = get_config(
- unique_vec![peer.id.clone()],
- chain_id.clone(),
- get_key_pair(iroha_test_network::Signatory::Peer),
- genesis_key_pair.public_key(),
- );
- let executor = Executor::new(load_sample_wasm("default_executor"));
- let genesis = GenesisBuilder::default()
- .domain("wonderland".parse().expect("Valid"))
- .account(configuration.common.key_pair.public_key().clone())
- .finish_domain()
- .build_and_sign(chain_id, executor, topology, &genesis_key_pair);
- let builder = PeerBuilder::new()
- .with_config(configuration)
- .with_genesis(genesis);
- rt.block_on(builder.start_with_peer(&mut peer));
- let mut group = criterion.benchmark_group("instruction-requests");
- let domain_id: DomainId = "domain".parse().expect("Valid");
- let create_domain = Register::domain(Domain::new(domain_id));
- let (account_id, _account_keypair) = gen_account_in("domain");
- let create_account = Register::account(Account::new(account_id.clone()));
- let asset_definition_id: AssetDefinitionId = "xor#domain".parse().expect("Valid");
- let client_config = iroha::samples::get_client_config(
- get_chain_id(),
- get_key_pair(iroha_test_network::Signatory::Alice),
- format!("http://{}", peer.api_address).parse().unwrap(),
- );
- let client = Client::new(client_config);
- thread::sleep(std::time::Duration::from_millis(5000));
- let _ = client
- .submit_all::([create_domain.into(), create_account.into()])
- .expect("Failed to create role.");
- thread::sleep(std::time::Duration::from_millis(500));
- let mut success_count = 0;
- let mut failures_count = 0;
- let _dropable = group.bench_function("instructions", |b| {
- b.iter(|| {
- let mint_asset = Mint::asset_numeric(
- 200u32,
- AssetId::new(asset_definition_id.clone(), account_id.clone()),
- );
- match client.submit(mint_asset) {
- Ok(_) => success_count += 1,
- Err(e) => {
- eprintln!("Failed to execute instruction: {e}");
- failures_count += 1;
- }
- };
- })
- });
- println!("Success count: {success_count}, Failures count: {failures_count}");
- group.finish();
- if (failures_count + success_count) > 0 {
- assert!(
- success_count as f32 / (failures_count + success_count) as f32
- > MINIMUM_SUCCESS_REQUEST_RATIO
- );
- }
-}
-
-criterion_group!(instructions, instruction_submits);
-criterion_group!(queries, query_requests);
-criterion_main!(queries, instructions);
diff --git a/crates/iroha/benches/tps/README.md b/crates/iroha/benches/tps/README.md
deleted file mode 100644
index 46223669003..00000000000
--- a/crates/iroha/benches/tps/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Benchmarks: Transactions per Second (TPS)
-
-Benchmark your code during development and get a statistical report with tps measurements. [Criterion.rs](https://github.com/bheisler/criterion.rs) is used for benchmarking.
-
-## Usage
-
-1. Establish a baseline:
-
- Checkout the target branch (`main`):
- ```
- git checkout main
- ```
- Then run:
- ```
- cargo bench --bench tps-dev
- ```
-
-2. Compare against the baseline:
-
- Checkout the commit you want to benchmark:
- ```
- git checkout
- ```
- Then run:
- ```
- cargo bench --bench tps-dev
- ```
-
- :exclamation: Since Criterion.rs measures time instead of throughput by default, `"improved"` and `"regressed"` messages are reversed.
-
-3. Check the report at `../../../target/criterion/report/index.html`.
-
-## Troubleshooting
-
-If a benchmark fails, reduce the load by increasing the interval between transactions (`interval_us_per_tx`) in the [configuration file](config.json).
-
-You can also run a single trial of the measurement:
-
-```
-cd client
-cargo run --release --example tps-oneshot
-```
diff --git a/crates/iroha/benches/tps/config.json b/crates/iroha/benches/tps/config.json
deleted file mode 100644
index 8b62736a4ec..00000000000
--- a/crates/iroha/benches/tps/config.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "peers": 4,
- "interval_us_per_tx": 0,
- "max_txs_per_block": 1024,
- "blocks": 15,
- "sample_size": 10,
- "genesis_max_retries": 30
-}
diff --git a/crates/iroha/benches/tps/dev.rs b/crates/iroha/benches/tps/dev.rs
deleted file mode 100644
index 716fdfe2eb3..00000000000
--- a/crates/iroha/benches/tps/dev.rs
+++ /dev/null
@@ -1,91 +0,0 @@
-//! Benchmark by iterating a tps measurement and analyzing it into a statistical report
-//! using [criterion](https://github.com/bheisler/criterion.rs)
-//! for performance check during development
-#![allow(missing_docs)]
-
-use criterion::{
- black_box, criterion_group, criterion_main,
- measurement::{Measurement, ValueFormatter},
- BenchmarkId, Criterion, Throughput,
-};
-
-use crate::utils::Config;
-
-mod utils;
-
-impl Config {
- fn bench(self, c: &mut Criterion) {
- let mut group = c.benchmark_group("tps");
-
- group.sample_size(self.sample_size as usize);
-
- group.bench_function(BenchmarkId::from_parameter(self), move |b| {
- b.iter_custom(|_| self.measure().expect("Failed to measure"));
- });
-
- group.finish();
- }
-}
-
-fn bench_tps_with_config(c: &mut Criterion) {
- let config = Config::from_path("benches/tps/config.json").expect("Failed to configure");
- iroha_logger::info!(?config);
- black_box(config).bench(c);
-}
-
-fn alternate_measurement() -> Criterion {
- Criterion::default().with_measurement(Tps)
-}
-
-criterion_group! {
- name = benches;
- config = alternate_measurement();
- targets = bench_tps_with_config
-}
-criterion_main!(benches);
-
-struct Tps;
-
-impl Measurement for Tps {
- type Intermediate = ();
- type Value = utils::Tps;
-
- fn start(&self) -> Self::Intermediate {
- unreachable!()
- }
- fn end(&self, _i: Self::Intermediate) -> Self::Value {
- unreachable!()
- }
- #[allow(clippy::float_arithmetic)]
- fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value {
- *v1 + *v2
- }
- fn zero(&self) -> Self::Value {
- f64::MIN_POSITIVE
- }
- fn to_f64(&self, value: &Self::Value) -> f64 {
- *value
- }
- fn formatter(&self) -> &dyn ValueFormatter {
- &TpsFormatter
- }
-}
-
-struct TpsFormatter;
-
-impl ValueFormatter for TpsFormatter {
- fn scale_values(&self, _typical_value: f64, _values: &mut [f64]) -> &'static str {
- "tps"
- }
- fn scale_throughputs(
- &self,
- _typical_value: f64,
- _throughput: &Throughput,
- _values: &mut [f64],
- ) -> &'static str {
- unreachable!()
- }
- fn scale_for_machines(&self, _values: &mut [f64]) -> &'static str {
- "tps"
- }
-}
diff --git a/crates/iroha/benches/tps/oneshot.rs b/crates/iroha/benches/tps/oneshot.rs
deleted file mode 100644
index 99efceac8b2..00000000000
--- a/crates/iroha/benches/tps/oneshot.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-//! Single trial of the benchmark
-
-mod utils;
-
-use std::{fs::File, io::BufWriter};
-
-use tracing_flame::{FlameLayer, FlushGuard};
-use tracing_subscriber::prelude::*;
-
-fn main() {
- let args: Vec = std::env::args().collect();
- let mut flush_guard: Option>> = None;
-
- if args.len() >= 2 {
- let file = File::create(&args[1]).expect("valid path");
-
- let flame_layer = FlameLayer::new(BufWriter::new(file))
- .with_threads_collapsed(true)
- .with_empty_samples(true);
- flush_guard = Some(flame_layer.flush_on_drop());
-
- tracing_subscriber::registry().with(flame_layer).init();
- iroha_logger::disable_global().expect("Logger should not be set yet");
- }
-
- let config = utils::Config::from_path("benches/tps/config.json").expect("Failed to configure");
- let tps = config.measure().expect("Failed to measure");
-
- flush_guard.map_or_else(
- || {
- iroha_logger::info!(?config);
- iroha_logger::info!(%tps);
- },
- |guard| {
- guard.flush().expect("Flushed data without errors");
- println!("Tracing data outputted to file: {}", &args[1]);
- println!("TPS was {tps}");
- println!("Config was {config:?}");
- },
- )
-}
diff --git a/crates/iroha/benches/tps/utils.rs b/crates/iroha/benches/tps/utils.rs
deleted file mode 100644
index 08a95111946..00000000000
--- a/crates/iroha/benches/tps/utils.rs
+++ /dev/null
@@ -1,236 +0,0 @@
-use std::{fmt, fs::File, io::BufReader, num::NonZeroUsize, path::Path, sync::mpsc, thread, time};
-
-use eyre::{Result, WrapErr};
-use iroha::{
- client::Client,
- crypto::KeyPair,
- data_model::{
- events::pipeline::{BlockEventFilter, BlockStatus},
- parameter::BlockParameter,
- prelude::*,
- },
-};
-use iroha_test_network::*;
-use iroha_test_samples::ALICE_ID;
-use nonzero_ext::nonzero;
-use serde::Deserialize;
-
-pub type Tps = f64;
-
-#[derive(Debug, Clone, Copy, Deserialize)]
-pub struct Config {
- pub peers: u32,
- /// Interval in microseconds between transactions to reduce load
- pub interval_us_per_tx: u64,
- pub block_limits: BlockParameter,
- pub blocks: u32,
- pub sample_size: u32,
- pub genesis_max_retries: u32,
-}
-
-impl fmt::Display for Config {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- f,
- "{}peers-{}interval_µs-{}max_txs-{}blocks-{}samples",
- self.peers, self.interval_us_per_tx, self.block_limits, self.blocks, self.sample_size,
- )
- }
-}
-
-impl Config {
- pub fn from_path + fmt::Debug>(path: P) -> Result {
- let file = File::open(path).wrap_err("Failed to open the config file")?;
- let reader = BufReader::new(file);
- serde_json::from_reader(reader).wrap_err("Failed to deserialize json from reader")
- }
-
- pub fn measure(self) -> Result {
- // READY
- let (_rt, network, client) = Network::start_test_with_runtime(self.peers, None);
- let clients = network.clients();
- wait_for_genesis_committed_with_max_retries(&clients, 0, self.genesis_max_retries);
-
- client.submit_blocking(SetParameter::new(Parameter::Block(self.block_limits)))?;
-
- let unit_names = (UnitName::MIN..).take(self.peers as usize);
- let units = clients
- .into_iter()
- .zip(unit_names)
- .map(|(client, name)| {
- let unit = MeasurerUnit {
- config: self,
- client,
- name,
- signatory: KeyPair::random().into_parts().0,
- };
- unit.ready()
- })
- .collect::>>()?;
-
- let event_counter_handles = units
- .iter()
- .map(MeasurerUnit::spawn_event_counter)
- .collect::>();
-
- // START
- let timer = time::Instant::now();
- let transaction_submitter_handles = units
- .iter()
- .map(|unit| {
- let (shutdown_sender, shutdown_reciever) = mpsc::channel();
- let handle = unit.spawn_transaction_submitter(shutdown_reciever);
- (handle, shutdown_sender)
- })
- .collect::>();
-
- // Wait for slowest peer to commit required number of blocks
- for handle in event_counter_handles {
- handle.join().expect("Event counter panicked")?;
- }
-
- // END
- let elapsed_secs = timer.elapsed().as_secs_f64();
-
- // Stop transaction submitters
- for (handle, shutdown_sender) in transaction_submitter_handles {
- shutdown_sender
- .send(())
- .expect("Failed to send shutdown signal");
- handle.join().expect("Transaction submitter panicked");
- }
-
- let blocks_out_of_measure = 2 + MeasurerUnit::PREPARATION_BLOCKS_NUMBER * self.peers;
- let state_view = network
- .first_peer
- .irohad
- .as_ref()
- .expect("Must be some")
- .state()
- .view();
- let mut blocks =
- state_view.all_blocks(NonZeroUsize::new(blocks_out_of_measure as usize + 1).unwrap());
- let (txs_accepted, txs_rejected) = (0..self.blocks)
- .map(|_| {
- let block = blocks
- .next()
- .expect("The block is not yet in state. Need more sleep?");
- (
- block.transactions().filter(|tx| tx.error.is_none()).count(),
- block.transactions().filter(|tx| tx.error.is_some()).count(),
- )
- })
- .fold((0, 0), |acc, pair| (acc.0 + pair.0, acc.1 + pair.1));
- #[allow(clippy::float_arithmetic, clippy::cast_precision_loss)]
- let tps = txs_accepted as f64 / elapsed_secs;
- iroha_logger::info!(%tps, %txs_accepted, %elapsed_secs, %txs_rejected);
- Ok(tps)
- }
-}
-
-struct MeasurerUnit {
- pub config: Config,
- pub client: Client,
- pub name: UnitName,
- pub signatory: PublicKey,
-}
-
-type UnitName = u32;
-
-impl MeasurerUnit {
- /// Number of blocks that will be committed by [`Self::ready()`] call
- const PREPARATION_BLOCKS_NUMBER: u32 = 2;
-
- /// Submit initial transactions for measurement
- fn ready(self) -> Result {
- let register_me = Register::account(Account::new(self.account_id()));
- self.client.submit_blocking(register_me)?;
-
- let mint_a_rose = Mint::asset_numeric(1_u32, self.asset_id());
- self.client.submit_blocking(mint_a_rose)?;
-
- Ok(self)
- }
-
- /// Spawn who checks if all the expected blocks are committed
- fn spawn_event_counter(&self) -> thread::JoinHandle> {
- let listener = self.client.clone();
- let (init_sender, init_receiver) = mpsc::channel();
- let event_filter = BlockEventFilter::default().for_status(BlockStatus::Applied);
- let blocks_expected = self.config.blocks as usize;
- let name = self.name;
- let handle = thread::spawn(move || -> Result<()> {
- let mut event_iterator = listener.listen_for_events([event_filter])?;
- init_sender.send(())?;
- for i in 1..=blocks_expected {
- let _event = event_iterator.next().expect("Event stream closed")?;
- iroha_logger::info!(name, block = i, "Received block committed event");
- }
- Ok(())
- });
- init_receiver
- .recv()
- .expect("Failed to initialize an event counter");
-
- handle
- }
-
- /// Spawn who periodically submits transactions
- fn spawn_transaction_submitter(
- &self,
- shutdown_signal: mpsc::Receiver<()>,
- ) -> thread::JoinHandle<()> {
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
-
- let submitter = self.client.clone();
- let interval_us_per_tx = self.config.interval_us_per_tx;
- let instructions = self.instructions();
- let alice_id = ALICE_ID.clone();
-
- let mut nonce = nonzero!(1_u32);
-
- thread::spawn(move || {
- for instruction in instructions {
- match shutdown_signal.try_recv() {
- Err(mpsc::TryRecvError::Empty) => {
- let mut transaction =
- TransactionBuilder::new(chain_id.clone(), alice_id.clone())
- .with_instructions([instruction]);
- transaction.set_nonce(nonce); // Use nonce to avoid transaction duplication within the same thread
-
- let transaction = submitter.sign_transaction(transaction);
- if let Err(error) = submitter.submit_transaction(&transaction) {
- iroha_logger::error!(?error, "Failed to submit transaction");
- }
-
- nonce = nonce.checked_add(1).unwrap_or_else(|| nonzero!(1_u32));
- thread::sleep(time::Duration::from_micros(interval_us_per_tx));
- }
- Err(mpsc::TryRecvError::Disconnected) => {
- panic!("Unexpected disconnection of shutdown sender");
- }
- Ok(()) => {
- iroha_logger::info!("Shutdown transaction submitter");
- return;
- }
- }
- }
- })
- }
-
- fn instructions(&self) -> impl Iterator- {
- std::iter::once(self.mint()).cycle()
- }
-
- fn mint(&self) -> InstructionBox {
- Mint::asset_numeric(1_u32, self.asset_id()).into()
- }
-
- fn account_id(&self) -> AccountId {
- AccountId::new("wonderland".parse().expect("Valid"), self.signatory.clone())
- }
-
- fn asset_id(&self) -> AssetId {
- AssetId::new("rose#wonderland".parse().expect("Valid"), self.account_id())
- }
-}
diff --git a/crates/iroha/examples/million_accounts_genesis.rs b/crates/iroha/examples/million_accounts_genesis.rs
deleted file mode 100644
index aa50d6ab98a..00000000000
--- a/crates/iroha/examples/million_accounts_genesis.rs
+++ /dev/null
@@ -1,92 +0,0 @@
-//! This file contains examples from the Rust tutorial.
-use std::{thread, time::Duration};
-
-use iroha::{
- crypto::KeyPair,
- data_model::{isi::InstructionBox, prelude::*},
-};
-use iroha_genesis::{GenesisBlock, GenesisBuilder};
-use iroha_primitives::unique_vec;
-use iroha_test_network::{
- get_chain_id, get_key_pair, wait_for_genesis_committed, Peer as TestPeer, PeerBuilder,
- TestRuntime,
-};
-use iroha_test_samples::load_sample_wasm;
-use irohad::samples::get_config;
-use tokio::runtime::Runtime;
-
-fn generate_genesis(
- num_domains: u32,
- chain_id: ChainId,
- genesis_key_pair: &KeyPair,
- topology: Vec,
-) -> GenesisBlock {
- let mut builder = GenesisBuilder::default();
-
- let signatory_alice = get_key_pair(iroha_test_network::Signatory::Alice)
- .into_parts()
- .0;
- for i in 0_u32..num_domains {
- builder = builder
- .domain(format!("wonderland-{i}").parse().expect("Valid"))
- .account(signatory_alice.clone())
- .asset(
- format!("xor-{i}").parse().expect("Valid"),
- AssetType::Numeric(NumericSpec::default()),
- )
- .finish_domain();
- }
-
- let executor = Executor::new(load_sample_wasm("default_executor"));
- builder.build_and_sign(chain_id, executor, topology, genesis_key_pair)
-}
-
-fn main_genesis() {
- let mut peer = ::new().expect("Failed to create peer");
-
- let chain_id = get_chain_id();
- let genesis_key_pair = get_key_pair(iroha_test_network::Signatory::Genesis);
- let topology = vec![peer.id.clone()];
- let configuration = get_config(
- unique_vec![peer.id.clone()],
- chain_id.clone(),
- get_key_pair(iroha_test_network::Signatory::Peer),
- genesis_key_pair.public_key(),
- );
- let rt = Runtime::test();
- let genesis = generate_genesis(1_000_000_u32, chain_id, &genesis_key_pair, topology);
-
- let builder = PeerBuilder::new()
- .with_genesis(genesis)
- .with_config(configuration);
-
- // This only submits the genesis. It doesn't check if the accounts
- // are created, because that check is 1) not needed for what the
- // test is actually for, 2) incredibly slow, making this sort of
- // test impractical, 3) very likely to overflow memory on systems
- // with less than 16GiB of free memory.
- rt.block_on(builder.start_with_peer(&mut peer));
-}
-
-fn create_million_accounts_directly() {
- let (_rt, _peer, test_client) = ::new().start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
- for i in 0_u32..1_000_000_u32 {
- let domain_id: DomainId = format!("wonderland-{i}").parse().expect("Valid");
- let normal_account_id = AccountId::new(domain_id.clone(), KeyPair::random().into_parts().0);
- let create_domain = Register::domain(Domain::new(domain_id));
- let create_account = Register::account(Account::new(normal_account_id.clone()));
- if test_client
- .submit_all::([create_domain.into(), create_account.into()])
- .is_err()
- {
- thread::sleep(Duration::from_millis(100));
- }
- }
- thread::sleep(Duration::from_secs(1000));
-}
-
-fn main() {
- create_million_accounts_directly();
- main_genesis();
-}
diff --git a/crates/iroha/examples/register_1000_triggers.rs b/crates/iroha/examples/register_1000_triggers.rs
deleted file mode 100644
index 5d17b7c414d..00000000000
--- a/crates/iroha/examples/register_1000_triggers.rs
+++ /dev/null
@@ -1,91 +0,0 @@
-//! Example of registering multiple triggers
-//! Used to show Iroha's trigger deduplication capabilities
-
-use std::num::NonZeroU64;
-
-use iroha::{
- client::Client,
- crypto::KeyPair,
- data_model::{
- parameter::{Parameter, SmartContractParameter},
- prelude::*,
- trigger::TriggerId,
- },
-};
-use iroha_genesis::{GenesisBlock, GenesisBuilder};
-use iroha_primitives::unique_vec;
-use iroha_test_network::{
- get_chain_id, get_key_pair, wait_for_genesis_committed_with_max_retries, Peer as TestPeer,
- PeerBuilder, TestClient, TestRuntime,
-};
-use iroha_test_samples::{gen_account_in, load_sample_wasm};
-use irohad::samples::get_config;
-use tokio::runtime::Runtime;
-
-fn generate_genesis(
- num_triggers: u32,
- chain_id: ChainId,
- genesis_key_pair: &KeyPair,
- topology: Vec,
-) -> GenesisBlock {
- let builder = GenesisBuilder::default()
- .append_instruction(SetParameter::new(Parameter::Executor(
- SmartContractParameter::Fuel(NonZeroU64::MAX),
- )))
- .append_instruction(SetParameter::new(Parameter::Executor(
- SmartContractParameter::Memory(NonZeroU64::MAX),
- )));
-
- let (account_id, _account_keypair) = gen_account_in("wonderland");
-
- let build_trigger = |trigger_id: TriggerId| {
- Trigger::new(
- trigger_id.clone(),
- Action::new(
- load_sample_wasm("mint_rose_trigger"),
- Repeats::Indefinitely,
- account_id.clone(),
- ExecuteTriggerEventFilter::new()
- .for_trigger(trigger_id)
- .under_authority(account_id.clone()),
- ),
- )
- };
-
- let builder = (0..num_triggers)
- .map(|i| {
- let trigger_id = i.to_string().parse::().unwrap();
- let trigger = build_trigger(trigger_id);
- Register::trigger(trigger)
- })
- .fold(builder, GenesisBuilder::append_instruction);
-
- let executor = Executor::new(load_sample_wasm("default_executor"));
- builder.build_and_sign(chain_id, executor, topology, genesis_key_pair)
-}
-
-fn main() {
- let mut peer: TestPeer = ::new().expect("Failed to create peer");
-
- let chain_id = get_chain_id();
- let genesis_key_pair = get_key_pair(iroha_test_network::Signatory::Genesis);
- let topology = vec![peer.id.clone()];
- let configuration = get_config(
- unique_vec![peer.id.clone()],
- chain_id.clone(),
- get_key_pair(iroha_test_network::Signatory::Peer),
- genesis_key_pair.public_key(),
- );
-
- let genesis = generate_genesis(1_000_u32, chain_id, &genesis_key_pair, topology);
-
- let builder = PeerBuilder::new()
- .with_genesis(genesis)
- .with_config(configuration);
-
- let rt = Runtime::test();
- let test_client = Client::test(&peer.api_address);
- rt.block_on(builder.start_with_peer(&mut peer));
-
- wait_for_genesis_committed_with_max_retries(&vec![test_client.clone()], 0, 600);
-}
diff --git a/crates/iroha/src/client.rs b/crates/iroha/src/client.rs
index df283c6c6c5..ecdd1da2e25 100644
--- a/crates/iroha/src/client.rs
+++ b/crates/iroha/src/client.rs
@@ -70,12 +70,7 @@ impl TransactionResponseHandler {
pub struct StatusResponseHandler;
impl StatusResponseHandler {
- pub(crate) fn handle(resp: &Response>) -> Result {
- let slice = Self::handle_raw(resp)?;
- serde_json::from_slice(slice).wrap_err("Failed to decode body")
- }
-
- fn handle_raw(resp: &Response>) -> Result<&Vec> {
+ fn handle(resp: &Response>) -> Result<&Vec> {
if resp.status() != StatusCode::OK {
return Err(ResponseReport::with_msg("Unexpected status response", resp)
.unwrap_or_else(core::convert::identity)
@@ -361,6 +356,12 @@ impl Client {
Self::listen_for_tx_confirmation_loop(&mut event_iterator, hash),
)
.await
+ .wrap_err_with(|| {
+ eyre!(
+ "haven't got tx confirmation within {:?} (configured with `transaction_status_timeout`)",
+ self.transaction_status_timeout
+ )
+ })
.map_err(Into::into)
.and_then(std::convert::identity);
event_iterator.close().await;
@@ -614,7 +615,7 @@ impl Client {
.prepare_status_request::()
.header(http::header::ACCEPT, "application/x-parity-scale");
let resp = req.build()?.send()?;
- let scaled_resp = StatusResponseHandler::handle_raw(&resp).cloned()?;
+ let scaled_resp = StatusResponseHandler::handle(&resp).cloned()?;
DecodeAll::decode_all(&mut scaled_resp.as_slice()).map_err(|err| eyre!("{err}"))
}
diff --git a/crates/iroha/src/config.rs b/crates/iroha/src/config.rs
index 2092f79ebd5..48948780bf0 100644
--- a/crates/iroha/src/config.rs
+++ b/crates/iroha/src/config.rs
@@ -19,6 +19,8 @@ use crate::{
mod user;
+pub use user::Root as UserConfig;
+
#[allow(missing_docs)]
pub const DEFAULT_TRANSACTION_TIME_TO_LIVE: Duration = Duration::from_secs(100);
#[allow(missing_docs)]
diff --git a/crates/iroha/src/lib.rs b/crates/iroha/src/lib.rs
index 6185fb12c10..a88e5aef996 100644
--- a/crates/iroha/src/lib.rs
+++ b/crates/iroha/src/lib.rs
@@ -6,51 +6,5 @@ pub mod http;
mod http_default;
pub mod query;
-pub mod samples {
- //! Module containing sample configurations for tests and benchmarks.
-
- use eyre::Result;
- use iroha_telemetry::metrics::Status;
- use url::Url;
-
- use crate::{
- client::{Client, StatusResponseHandler},
- config::{
- Config, DEFAULT_TRANSACTION_NONCE, DEFAULT_TRANSACTION_STATUS_TIMEOUT,
- DEFAULT_TRANSACTION_TIME_TO_LIVE,
- },
- crypto::KeyPair,
- data_model::ChainId,
- http_default::DefaultRequestBuilder,
- };
-
- /// Get sample client configuration.
- pub fn get_client_config(chain_id: ChainId, key_pair: KeyPair, torii_api_url: Url) -> Config {
- let account_id = format!("{}@wonderland", key_pair.public_key())
- .parse()
- .expect("should be valid");
- Config {
- chain: chain_id,
- key_pair,
- torii_api_url,
- account: account_id,
- basic_auth: None,
- transaction_ttl: DEFAULT_TRANSACTION_TIME_TO_LIVE,
- transaction_status_timeout: DEFAULT_TRANSACTION_STATUS_TIMEOUT,
- transaction_add_nonce: DEFAULT_TRANSACTION_NONCE,
- }
- }
-
- /// Gets network status seen from the peer in json format
- ///
- /// # Errors
- /// Fails if sending request or decoding fails
- pub fn get_status_json(client: &Client) -> Result {
- let req = client.prepare_status_request::();
- let resp = req.build()?.send()?;
- StatusResponseHandler::handle(&resp)
- }
-}
-
pub use iroha_crypto as crypto;
pub use iroha_data_model as data_model;
diff --git a/crates/iroha/tests/integration/asset.rs b/crates/iroha/tests/integration/asset.rs
index b06871b1007..e605b0c3432 100644
--- a/crates/iroha/tests/integration/asset.rs
+++ b/crates/iroha/tests/integration/asset.rs
@@ -1,5 +1,3 @@
-use std::thread;
-
use eyre::Result;
use iroha::{
client,
@@ -11,7 +9,6 @@ use iroha::{
transaction::error::TransactionRejectionReason,
},
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_executor_data_model::permission::asset::CanTransferAsset;
use iroha_test_network::*;
use iroha_test_samples::{gen_account_in, ALICE_ID, BOB_ID};
@@ -20,8 +17,8 @@ use iroha_test_samples::{gen_account_in, ALICE_ID, BOB_ID};
// This test is also covered at the UI level in the iroha_cli tests
// in test_register_asset_definitions.py
fn client_register_asset_should_add_asset_once_but_not_twice() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_620).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -36,22 +33,21 @@ fn client_register_asset_should_add_asset_once_but_not_twice() -> Result<()> {
0_u32,
));
- test_client
- .submit_all::([create_asset.into(), register_asset.clone().into()])?;
+ test_client.submit_all_blocking::([
+ create_asset.into(),
+ register_asset.clone().into(),
+ ])?;
// Registering an asset to an account which doesn't have one
// should result in asset being created
- test_client.poll(move |client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(Numeric::ZERO)
- }))
- })?;
+ let asset = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id))
+ .execute_all()?
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .unwrap();
+ assert_eq!(*asset.value(), AssetValue::Numeric(Numeric::ZERO));
// But registering an asset to account already having one should fail
assert!(test_client.submit_blocking(register_asset).is_err());
@@ -61,8 +57,8 @@ fn client_register_asset_should_add_asset_once_but_not_twice() -> Result<()> {
#[test]
fn unregister_asset_should_remove_asset_from_account() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_555).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -76,33 +72,29 @@ fn unregister_asset_should_remove_asset_from_account() -> Result<()> {
let register_asset = Register::asset(Asset::new(asset_id.clone(), 0_u32)).into();
let unregister_asset = Unregister::asset(asset_id);
- test_client.submit_all([create_asset, register_asset])?;
+ test_client.submit_all_blocking([create_asset, register_asset])?;
- // Wait for asset to be registered
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
+ // Check for asset to be registered
+ let assets = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?;
- Ok(assets
- .iter()
- .any(|asset| *asset.id().definition() == asset_definition_id))
- })?;
+ assert!(assets
+ .iter()
+ .any(|asset| *asset.id().definition() == asset_definition_id));
- test_client.submit(unregister_asset)?;
+ test_client.submit_blocking(unregister_asset)?;
// ... and check that it is removed after Unregister
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
+ let assets = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?;
- Ok(assets
- .iter()
- .all(|asset| *asset.id().definition() != asset_definition_id))
- })?;
+ assert!(assets
+ .iter()
+ .all(|asset| *asset.id().definition() != asset_definition_id));
Ok(())
}
@@ -111,8 +103,8 @@ fn unregister_asset_should_remove_asset_from_account() -> Result<()> {
// This test is also covered at the UI level in the iroha_cli tests
// in test_mint_assets.py
fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_000).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -130,25 +122,23 @@ fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount() ->
);
let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()];
let tx = test_client.build_transaction(instructions, metadata);
- test_client.submit_transaction(&tx)?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(quantity)
- }))
- })?;
+ test_client.submit_transaction_blocking(&tx)?;
+
+ let asset = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id))
+ .execute_all()?
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .unwrap();
+ assert_eq!(*asset.value(), AssetValue::Numeric(quantity));
Ok(())
}
#[test]
fn client_add_big_asset_quantity_to_existing_asset_should_increase_asset_amount() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_510).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -158,7 +148,7 @@ fn client_add_big_asset_quantity_to_existing_asset_should_increase_asset_amount(
let create_asset =
Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
let metadata = iroha::data_model::metadata::Metadata::default();
- //When
+ // When
let quantity = Numeric::new(2_u128.pow(65), 0);
let mint = Mint::asset_numeric(
quantity,
@@ -166,25 +156,23 @@ fn client_add_big_asset_quantity_to_existing_asset_should_increase_asset_amount(
);
let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()];
let tx = test_client.build_transaction(instructions, metadata);
- test_client.submit_transaction(&tx)?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(quantity)
- }))
- })?;
+ test_client.submit_transaction_blocking(&tx)?;
+
+ let asset = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id))
+ .execute_all()?
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .unwrap();
+ assert_eq!(*asset.value(), AssetValue::Numeric(quantity));
Ok(())
}
#[test]
fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_515).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -203,18 +191,16 @@ fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> {
);
let instructions: [InstructionBox; 2] = [create_asset.into(), mint.into()];
let tx = test_client.build_transaction(instructions, metadata);
- test_client.submit_transaction(&tx)?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(quantity)
- }))
- })?;
+ test_client.submit_transaction_blocking(&tx)?;
+
+ let asset = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .unwrap();
+ assert_eq!(*asset.value(), AssetValue::Numeric(quantity));
// Add some fractional part
let quantity2 = numeric!(0.55);
@@ -226,65 +212,16 @@ fn client_add_asset_with_decimal_should_increase_asset_amount() -> Result<()> {
let sum = quantity
.checked_add(quantity2)
.ok_or_else(|| eyre::eyre!("overflow"))?;
- test_client.submit(mint)?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(sum)
- }))
- })?;
- Ok(())
-}
+ test_client.submit_blocking(mint)?;
-#[test]
-// This test is also covered at the UI level in the iroha_cli tests
-// in test_register_asset_definitions.py
-fn client_add_asset_with_name_length_more_than_limit_should_not_commit_transaction() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_520).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
- let pipeline_time = Config::pipeline_time();
-
- // Given
- let normal_asset_definition_id = "xor#wonderland"
- .parse::()
- .expect("Valid");
- let create_asset =
- Register::asset_definition(AssetDefinition::numeric(normal_asset_definition_id.clone()));
- test_client.submit(create_asset)?;
- iroha_logger::info!("Creating asset");
-
- let too_long_asset_name = "0".repeat(2_usize.pow(14));
- let incorrect_asset_definition_id = (too_long_asset_name + "#wonderland")
- .parse::()
- .expect("Valid");
- let create_asset = Register::asset_definition(AssetDefinition::numeric(
- incorrect_asset_definition_id.clone(),
- ));
-
- test_client.submit(create_asset)?;
- iroha_logger::info!("Creating another asset");
- thread::sleep(pipeline_time * 4);
-
- let mut asset_definition_ids = test_client
- .query(client::asset::all_definitions())
- .execute_all()
- .expect("Failed to execute request.")
+ let asset = test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id))
+ .execute_all()?
.into_iter()
- .map(|asset| asset.id().clone());
- iroha_logger::debug!(
- "Collected asset definitions ID's: {:?}",
- &asset_definition_ids
- );
-
- assert!(asset_definition_ids
- .any(|asset_definition_id| asset_definition_id == normal_asset_definition_id));
- assert!(!asset_definition_ids
- .any(|asset_definition_id| asset_definition_id == incorrect_asset_definition_id));
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .unwrap();
+ assert_eq!(*asset.value(), AssetValue::Numeric(sum));
Ok(())
}
@@ -294,8 +231,8 @@ fn client_add_asset_with_name_length_more_than_limit_should_not_commit_transacti
#[allow(clippy::expect_fun_call)]
#[test]
fn find_rate_and_make_exchange_isi_should_succeed() {
- let (_rt, _peer, test_client) = ::new().with_port(10_675).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let (dex_id, _dex_keypair) = gen_account_in("exchange");
let (seller_id, seller_keypair) = gen_account_in("company");
@@ -388,8 +325,8 @@ fn find_rate_and_make_exchange_isi_should_succeed() {
#[test]
fn transfer_asset_definition() {
- let (_rt, _peer, test_client) = ::new().with_port(11_060).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let bob_id = BOB_ID.clone();
@@ -426,8 +363,8 @@ fn transfer_asset_definition() {
#[test]
fn fail_if_dont_satisfy_spec() {
- let (_rt, _peer, test_client) = ::new().with_port(11_125).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let bob_id = BOB_ID.clone();
diff --git a/crates/iroha/tests/integration/asset_propagation.rs b/crates/iroha/tests/integration/asset_propagation.rs
index 33254264e6b..342152e2dcc 100644
--- a/crates/iroha/tests/integration/asset_propagation.rs
+++ b/crates/iroha/tests/integration/asset_propagation.rs
@@ -1,11 +1,8 @@
-use std::thread;
-
use eyre::Result;
use iroha::{
client,
data_model::{parameter::BlockParameter, prelude::*},
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_test_network::*;
use iroha_test_samples::gen_account_in;
use nonzero_ext::nonzero;
@@ -16,13 +13,15 @@ use nonzero_ext::nonzero;
fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount_on_another_peer(
) -> Result<()> {
// Given
- let (_rt, network, client) = Network::start_test_with_runtime(4, Some(10_450));
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
-
- client.submit_blocking(SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(1_u64)),
- )))?;
+ let (network, rt) = NetworkBuilder::new()
+ .with_peers(4)
+ .with_genesis_instruction(SetParameter::new(Parameter::Block(
+ BlockParameter::MaxTransactions(nonzero!(1_u64)),
+ )))
+ .start_blocking()?;
+ let mut peers = network.peers().iter();
+ let peer_a = peers.next().unwrap();
+ let peer_b = peers.next().unwrap();
let create_domain = Register::domain(Domain::new("domain".parse()?));
let (account_id, _account_keypair) = gen_account_in("domain");
@@ -30,32 +29,30 @@ fn client_add_asset_quantity_to_existing_asset_should_increase_asset_amount_on_a
let asset_definition_id = "xor#domain".parse::()?;
let create_asset =
Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- client.submit_all::([
+ peer_a.client().submit_all_blocking::([
create_domain.into(),
create_account.into(),
create_asset.into(),
])?;
- thread::sleep(pipeline_time * 3);
- //When
+
+ // When
let quantity = numeric!(200);
- client.submit(Mint::asset_numeric(
+ peer_a.client().submit_blocking(Mint::asset_numeric(
quantity,
AssetId::new(asset_definition_id.clone(), account_id.clone()),
))?;
- thread::sleep(pipeline_time);
+ rt.block_on(async { network.ensure_blocks(3).await })?;
- //Then
- let peer = network.peers.values().last().unwrap();
- client::Client::test(&peer.api_address).poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
+ // Then
+ let asset = peer_b
+ .client()
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id))
+ .execute_all()?
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .expect("should be");
+ assert_eq!(*asset.value(), AssetValue::Numeric(quantity));
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(quantity)
- }))
- })?;
Ok(())
}
diff --git a/crates/iroha/tests/integration/events/data.rs b/crates/iroha/tests/integration/events/data.rs
index 217623a8df0..cce8763dc57 100644
--- a/crates/iroha/tests/integration/events/data.rs
+++ b/crates/iroha/tests/integration/events/data.rs
@@ -1,6 +1,8 @@
-use std::{fmt::Write as _, sync::mpsc, thread};
+use std::fmt::Write as _;
+use assert_matches::assert_matches;
use eyre::Result;
+use futures_util::StreamExt;
use iroha::data_model::{prelude::*, transaction::WasmSmartContract};
use iroha_executor_data_model::permission::{
account::CanModifyAccountMetadata, domain::CanModifyDomainMetadata,
@@ -8,6 +10,7 @@ use iroha_executor_data_model::permission::{
use iroha_test_network::*;
use iroha_test_samples::{ALICE_ID, BOB_ID};
use parity_scale_codec::Encode as _;
+use tokio::task::spawn_blocking;
/// Return string containing exported memory, dummy allocator, and
/// host function imports which you can embed into your wasm module.
@@ -79,13 +82,13 @@ fn produce_instructions() -> Vec {
.collect::>()
}
-#[test]
-fn instruction_execution_should_produce_events() -> Result<()> {
- transaction_execution_should_produce_events(produce_instructions(), 10_665)
+#[tokio::test]
+async fn instruction_execution_should_produce_events() -> Result<()> {
+ transaction_execution_should_produce_events(produce_instructions()).await
}
-#[test]
-fn wasm_execution_should_produce_events() -> Result<()> {
+#[tokio::test]
+async fn wasm_execution_should_produce_events() -> Result<()> {
#![allow(clippy::integer_division)]
let isi_hex: Vec = produce_instructions()
.into_iter()
@@ -124,105 +127,84 @@ fn wasm_execution_should_produce_events() -> Result<()> {
isi_calls = isi_calls
);
- transaction_execution_should_produce_events(
- WasmSmartContract::from_compiled(wat.into_bytes()),
- 10_615,
- )
+ transaction_execution_should_produce_events(WasmSmartContract::from_compiled(wat.into_bytes()))
+ .await
}
-fn transaction_execution_should_produce_events(
- executable: impl Into,
- port: u16,
+async fn transaction_execution_should_produce_events(
+ executable: impl Into + Send,
) -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(port).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
-
- // spawn event reporter
- let listener = client.clone();
- let (init_sender, init_receiver) = mpsc::channel();
- let (event_sender, event_receiver) = mpsc::channel();
- let event_filter = DataEventFilter::Any;
- thread::spawn(move || -> Result<()> {
- let event_iterator = listener.listen_for_events([event_filter])?;
- init_sender.send(())?;
- for event in event_iterator {
- event_sender.send(event)?
- }
- Ok(())
- });
-
- // submit transaction to produce events
- init_receiver.recv()?;
- let transaction = client.build_transaction(executable, Metadata::default());
- client.submit_transaction_blocking(&transaction)?;
-
- // assertion
- iroha_logger::info!("Listening for events");
- for i in 0..4_usize {
- let event: DataEvent = event_receiver.recv()??.try_into()?;
- iroha_logger::info!("Event: {:?}", event);
- assert!(matches!(event, DataEvent::Domain(_)));
- if let DataEvent::Domain(domain_event) = event {
- assert!(matches!(domain_event, DomainEvent::Created(_)));
-
- if let DomainEvent::Created(created_domain) = domain_event {
- let domain_id = DomainId::new(i.to_string().parse().expect("Valid"));
- assert_eq!(domain_id, *created_domain.id());
- }
- }
+ let network = NetworkBuilder::new().start().await?;
+ let mut events_stream = network
+ .client()
+ .listen_for_events_async([DataEventFilter::Any])
+ .await?;
+
+ {
+ let client = network.client();
+ let tx = client.build_transaction(executable, <_>::default());
+ spawn_blocking(move || client.submit_transaction_blocking(&tx)).await??;
+ }
+
+ for i in 0..4 {
+ let event = events_stream
+ .next()
+ .await
+ .expect("there are at least 4 events")?;
+
+ let domain = assert_matches!(
+ event,
+ EventBox::Data(DataEvent::Domain(DomainEvent::Created(domain))) => domain
+ );
+ assert_eq!(domain.id().name().as_ref(), i.to_string())
}
Ok(())
}
-#[test]
-fn produce_multiple_events() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_645).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
-
- // Spawn event reporter
- let listener = client.clone();
- let (init_sender, init_receiver) = mpsc::channel();
- let (event_sender, event_receiver) = mpsc::channel();
- let event_filter = DataEventFilter::Any;
- thread::spawn(move || -> Result<()> {
- let event_iterator = listener.listen_for_events([event_filter])?;
- init_sender.send(())?;
- for event in event_iterator {
- event_sender.send(event)?
- }
- Ok(())
- });
-
- // Wait for event listener
- init_receiver.recv()?;
+#[tokio::test]
+#[allow(clippy::too_many_lines)]
+async fn produce_multiple_events() -> Result<()> {
+ let network = NetworkBuilder::new().start().await?;
+ let mut events_stream = network
+ .client()
+ .listen_for_events_async([DataEventFilter::Any])
+ .await?;
- // Registering role
- let alice_id = ALICE_ID.clone();
+ // Register role
let role_id = "TEST_ROLE".parse::()?;
let permission_1 = CanModifyAccountMetadata {
- account: alice_id.clone(),
+ account: ALICE_ID.clone(),
};
let permission_2 = CanModifyDomainMetadata {
- domain: alice_id.domain().clone(),
+ domain: ALICE_ID.domain().clone(),
};
- let role = iroha::data_model::role::Role::new(role_id.clone(), alice_id.clone())
+ let role = Role::new(role_id.clone(), ALICE_ID.clone())
.add_permission(permission_1.clone())
.add_permission(permission_2.clone());
- let instructions = [Register::role(role.clone())];
- client.submit_all_blocking(instructions)?;
+ let register_role = Register::role(role.clone());
- // Grants role to Bob
+ // Grant the role to Bob
let bob_id = BOB_ID.clone();
- let grant_role = Grant::account_role(role_id.clone(), bob_id.clone());
- client.submit_blocking(grant_role)?;
+ let grant_role = Grant::account_role(role_id.clone(), BOB_ID.clone());
- // Unregister role
+ // Unregister the role
let unregister_role = Unregister::role(role_id.clone());
- client.submit_blocking(unregister_role)?;
+
+ {
+ let client = network.client();
+ spawn_blocking(move || {
+ client.submit_all_blocking::([
+ register_role.into(),
+ grant_role.into(),
+ unregister_role.into(),
+ ])
+ })
+ .await??;
+ }
// Inspect produced events
- let event: DataEvent = event_receiver.recv()??.try_into()?;
+ let event: DataEvent = events_stream.next().await.unwrap()?.try_into()?;
assert!(matches!(event, DataEvent::Role(_)));
if let DataEvent::Role(role_event) = event {
assert!(matches!(role_event, RoleEvent::Created(_)));
@@ -238,16 +220,16 @@ fn produce_multiple_events() -> Result<()> {
}
if let DataEvent::Domain(DomainEvent::Account(AccountEvent::RoleGranted(event))) =
- event_receiver.recv()??.try_into()?
+ events_stream.next().await.unwrap()?.try_into()?
{
- assert_eq!(*event.account(), alice_id);
+ assert_eq!(*event.account(), *ALICE_ID);
assert_eq!(*event.role(), role_id);
} else {
panic!("Expected event is not an AccountEvent::RoleGranted")
}
if let DataEvent::Domain(DomainEvent::Account(AccountEvent::RoleGranted(event))) =
- event_receiver.recv()??.try_into()?
+ events_stream.next().await.unwrap()?.try_into()?
{
assert_eq!(*event.account(), bob_id);
assert_eq!(*event.role(), role_id);
@@ -256,7 +238,7 @@ fn produce_multiple_events() -> Result<()> {
}
if let DataEvent::Domain(DomainEvent::Account(AccountEvent::RoleRevoked(event))) =
- event_receiver.recv()??.try_into()?
+ events_stream.next().await.unwrap()?.try_into()?
{
assert_eq!(*event.account(), bob_id);
assert_eq!(*event.role(), role_id);
@@ -265,15 +247,17 @@ fn produce_multiple_events() -> Result<()> {
}
if let DataEvent::Domain(DomainEvent::Account(AccountEvent::RoleRevoked(event))) =
- event_receiver.recv()??.try_into()?
+ events_stream.next().await.unwrap()?.try_into()?
{
- assert_eq!(*event.account(), alice_id);
+ assert_eq!(*event.account(), *ALICE_ID);
assert_eq!(*event.role(), role_id);
} else {
panic!("Expected event is not an AccountEvent::RoleRevoked")
}
- if let DataEvent::Role(RoleEvent::Deleted(event)) = event_receiver.recv()??.try_into()? {
+ if let DataEvent::Role(RoleEvent::Deleted(event)) =
+ events_stream.next().await.unwrap()?.try_into()?
+ {
assert_eq!(event, role_id);
} else {
panic!("Expected event is not an RoleEvent::Deleted")
diff --git a/crates/iroha/tests/integration/events/notification.rs b/crates/iroha/tests/integration/events/notification.rs
index 5bf381c1543..662e96c011a 100644
--- a/crates/iroha/tests/integration/events/notification.rs
+++ b/crates/iroha/tests/integration/events/notification.rs
@@ -1,14 +1,15 @@
-use std::{sync::mpsc, thread, time::Duration};
+use std::time::Duration;
-use eyre::{eyre, Result, WrapErr};
+use eyre::Result;
+use futures_util::StreamExt;
use iroha::data_model::prelude::*;
use iroha_test_network::*;
use iroha_test_samples::ALICE_ID;
+use tokio::{task::spawn_blocking, time::timeout};
-#[test]
-fn trigger_completion_success_should_produce_event() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_050).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+#[tokio::test]
+async fn trigger_completion_success_should_produce_event() -> Result<()> {
+ let network = NetworkBuilder::new().start().await?;
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -27,34 +28,28 @@ fn trigger_completion_success_should_produce_event() -> Result<()> {
.under_authority(asset_id.account().clone()),
),
));
- test_client.submit_blocking(register_trigger)?;
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(register_trigger)).await??;
- let call_trigger = ExecuteTrigger::new(trigger_id.clone());
+ let mut events = network
+ .client()
+ .listen_for_events_async([TriggerCompletedEventFilter::new()
+ .for_trigger(trigger_id.clone())
+ .for_outcome(TriggerCompletedOutcomeType::Success)])
+ .await?;
- let thread_client = test_client.clone();
- let (sender, receiver) = mpsc::channel();
- let _handle = thread::spawn(move || -> Result<()> {
- let mut event_it = thread_client.listen_for_events([TriggerCompletedEventFilter::new()
- .for_trigger(trigger_id)
- .for_outcome(TriggerCompletedOutcomeType::Success)])?;
- if event_it.next().is_some() {
- sender.send(())?;
- return Ok(());
- }
- Err(eyre!("No events emitted"))
- });
+ let call_trigger = ExecuteTrigger::new(trigger_id);
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(call_trigger)).await??;
- test_client.submit(call_trigger)?;
+ let _ = timeout(Duration::from_secs(5), events.next()).await?;
- receiver
- .recv_timeout(Duration::from_secs(60))
- .wrap_err("Failed to receive event message")
+ Ok(())
}
-#[test]
-fn trigger_completion_failure_should_produce_event() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_055).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+#[tokio::test]
+async fn trigger_completion_failure_should_produce_event() -> Result<()> {
+ let network = NetworkBuilder::new().start().await?;
let account_id = ALICE_ID.clone();
let trigger_id = "fail_box".parse::()?;
@@ -71,26 +66,21 @@ fn trigger_completion_failure_should_produce_event() -> Result<()> {
.under_authority(account_id),
),
));
- test_client.submit_blocking(register_trigger)?;
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(register_trigger)).await??;
- let call_trigger = ExecuteTrigger::new(trigger_id.clone());
+ let mut events = network
+ .client()
+ .listen_for_events_async([TriggerCompletedEventFilter::new()
+ .for_trigger(trigger_id.clone())
+ .for_outcome(TriggerCompletedOutcomeType::Failure)])
+ .await?;
- let thread_client = test_client.clone();
- let (sender, receiver) = mpsc::channel();
- let _handle = thread::spawn(move || -> Result<()> {
- let mut event_it = thread_client.listen_for_events([TriggerCompletedEventFilter::new()
- .for_trigger(trigger_id)
- .for_outcome(TriggerCompletedOutcomeType::Failure)])?;
- if event_it.next().is_some() {
- sender.send(())?;
- return Ok(());
- }
- Err(eyre!("No events emitted"))
- });
+ let call_trigger = ExecuteTrigger::new(trigger_id);
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(call_trigger)).await??;
- test_client.submit(call_trigger)?;
+ let _ = timeout(Duration::from_secs(5), events.next()).await?;
- receiver
- .recv_timeout(Duration::from_secs(60))
- .wrap_err("Failed to receive event message")
+ Ok(())
}
diff --git a/crates/iroha/tests/integration/events/pipeline.rs b/crates/iroha/tests/integration/events/pipeline.rs
index d8078b0d8b3..26427dff2c0 100644
--- a/crates/iroha/tests/integration/events/pipeline.rs
+++ b/crates/iroha/tests/integration/events/pipeline.rs
@@ -1,134 +1,81 @@
-use std::thread::{self, JoinHandle};
+use std::time::Duration;
+use assert_matches::assert_matches;
use eyre::Result;
-use iroha::{
- crypto::HashOf,
- data_model::{
- events::pipeline::{
- BlockEvent, BlockEventFilter, BlockStatus, TransactionEventFilter, TransactionStatus,
- },
- isi::error::InstructionExecutionError,
- parameter::BlockParameter,
- prelude::*,
- query::error::FindError,
- transaction::error::TransactionRejectionReason,
- ValidationFail,
- },
+use futures_util::StreamExt;
+use iroha::data_model::{
+ events::pipeline::{TransactionEventFilter, TransactionStatus},
+ isi::error::InstructionExecutionError,
+ prelude::*,
+ query::error::FindError,
+ transaction::error::TransactionRejectionReason,
+ ValidationFail,
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_test_network::*;
-use nonzero_ext::nonzero;
+use tokio::{task::spawn_blocking, time::timeout};
-// Needed to re-enable ignored tests.
-const PEER_COUNT: usize = 7;
-
-#[ignore = "ignore, more in #2851"]
-#[test]
-fn transaction_with_no_instructions_should_be_committed() -> Result<()> {
- test_with_instruction_and_status_and_port(None, &TransactionStatus::Approved, 10_250)
+#[tokio::test]
+async fn transaction_with_ok_instruction_should_be_committed() -> Result<()> {
+ let register = Register::domain(Domain::new("looking_glass".parse()?));
+ test_with_instruction_and_status([register], &TransactionStatus::Approved).await
}
-#[ignore = "ignore, more in #2851"]
-// #[ignore = "Experiment"]
-#[test]
-fn transaction_with_fail_instruction_should_be_rejected() -> Result<()> {
+#[tokio::test]
+async fn transaction_with_fail_instruction_should_be_rejected() -> Result<()> {
let unknown_domain_id = "dummy".parse::()?;
let fail_isi = Unregister::domain(unknown_domain_id.clone());
- test_with_instruction_and_status_and_port(
- Some(fail_isi.into()),
+ test_with_instruction_and_status(
+ [fail_isi],
&TransactionStatus::Rejected(Box::new(TransactionRejectionReason::Validation(
ValidationFail::InstructionFailed(InstructionExecutionError::Find(FindError::Domain(
unknown_domain_id,
))),
))),
- 10_350,
)
+ .await
}
-fn test_with_instruction_and_status_and_port(
- instruction: Option,
+async fn test_with_instruction_and_status(
+ exec: impl Into + Send,
should_be: &TransactionStatus,
- port: u16,
) -> Result<()> {
- let (_rt, network, client) =
- Network::start_test_with_runtime(PEER_COUNT.try_into()?, Some(port));
- let clients = network.clients();
- wait_for_genesis_committed(&clients, 0);
- let pipeline_time = Config::pipeline_time();
-
- client.submit_blocking(SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(1_u64)),
- )))?;
-
// Given
- let submitter = client;
- let transaction = submitter.build_transaction(instruction, Metadata::default());
- let hash = transaction.hash();
- let mut handles = Vec::new();
- for listener in clients {
- let checker = Checker { listener, hash };
- let handle_validating = checker.clone().spawn(TransactionStatus::Queued);
- handles.push(handle_validating);
- let handle_validated = checker.spawn(should_be.clone());
- handles.push(handle_validated);
- }
+ let network = NetworkBuilder::new().start().await?;
+ let client = network.client();
+
// When
- submitter.submit_transaction(&transaction)?;
- thread::sleep(pipeline_time * 2);
- // Then
- for handle in handles {
- handle.join().expect("Thread panicked")
- }
- Ok(())
-}
+ let transaction = client.build_transaction(exec, Metadata::default());
+ let hash = transaction.hash();
+ let mut events = client
+ .listen_for_events_async([TransactionEventFilter::default().for_hash(hash)])
+ .await?;
+ spawn_blocking(move || client.submit_transaction(&transaction)).await??;
-#[derive(Clone)]
-struct Checker {
- listener: iroha::client::Client,
- hash: HashOf,
-}
+ // Then
+ timeout(Duration::from_secs(5), async move {
+ assert_matches!(
+ events.next().await.unwrap().unwrap(),
+ EventBox::Pipeline(PipelineEventBox::Transaction(TransactionEvent {
+ status: TransactionStatus::Queued,
+ ..
+ }))
+ );
+ assert_matches!(
+ events.next().await.unwrap().unwrap(),
+ EventBox::Pipeline(PipelineEventBox::Transaction(TransactionEvent {
+ status,
+ ..
+ })) if status == *should_be
+ );
+ })
+ .await?;
-impl Checker {
- fn spawn(self, status_kind: TransactionStatus) -> JoinHandle<()> {
- thread::spawn(move || {
- let mut event_iterator = self
- .listener
- .listen_for_events([TransactionEventFilter::default()
- .for_status(status_kind)
- .for_hash(self.hash)])
- .expect("Failed to create event iterator.");
- let event_result = event_iterator.next().expect("Stream closed");
- let _event = event_result.expect("Must be valid");
- })
- }
+ Ok(())
}
#[test]
+#[ignore = "TODO: implement with the help of Kura Inspector, "]
fn applied_block_must_be_available_in_kura() {
- let (_rt, peer, client) = ::new().with_port(11_040).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
-
- let event_filter = BlockEventFilter::default().for_status(BlockStatus::Applied);
- let mut event_iter = client
- .listen_for_events([event_filter])
- .expect("Failed to subscribe for events");
-
- client
- .submit(Unregister::domain("dummy".parse().unwrap()))
- .expect("Failed to submit transaction");
-
- let event: BlockEvent = event_iter
- .next()
- .expect("Block must be committed")
- .expect("Block must be committed")
- .try_into()
- .expect("Received unexpected event");
-
- peer.irohad
- .as_ref()
- .expect("Must be some")
- .kura()
- .get_block_by_height(event.header().height().try_into().unwrap())
- .expect("Block applied event was received earlier");
+ unimplemented!();
}
diff --git a/crates/iroha/tests/integration/extra_functional/connected_peers.rs b/crates/iroha/tests/integration/extra_functional/connected_peers.rs
index 4bc748200d3..7dbab107963 100644
--- a/crates/iroha/tests/integration/extra_functional/connected_peers.rs
+++ b/crates/iroha/tests/integration/extra_functional/connected_peers.rs
@@ -1,130 +1,127 @@
-use std::thread;
-
-use eyre::{Context, Result};
-use iroha::{
- client::Client,
- data_model::{
- isi::{Register, Unregister},
- peer::Peer as DataModelPeer,
- },
+use std::iter::once;
+
+use assert_matches::assert_matches;
+use eyre::Result;
+use futures_util::{stream::FuturesUnordered, StreamExt};
+use iroha::data_model::{
+ isi::{Register, Unregister},
+ peer::Peer,
};
-use iroha_config::parameters::actual::Root as Config;
-use iroha_primitives::unique_vec;
+use iroha_config_base::toml::WriteExt;
use iroha_test_network::*;
-use rand::{seq::SliceRandom, thread_rng, Rng};
-use tokio::runtime::Runtime;
+use rand::{prelude::IteratorRandom, seq::SliceRandom, thread_rng};
+use tokio::{task::spawn_blocking, time::timeout};
-#[ignore = "ignore, more in #2851"]
-#[test]
-fn connected_peers_with_f_2_1_2() -> Result<()> {
- connected_peers_with_f(2, Some(11_020))
+#[tokio::test]
+async fn connected_peers_with_f_2_1_2() -> Result<()> {
+ connected_peers_with_f(2).await
}
-#[test]
-fn connected_peers_with_f_1_0_1() -> Result<()> {
- connected_peers_with_f(1, Some(11_000))
+#[tokio::test]
+async fn connected_peers_with_f_1_0_1() -> Result<()> {
+ connected_peers_with_f(1).await
}
-#[test]
-fn register_new_peer() -> Result<()> {
- let (_rt, network, _) = Network::start_test_with_runtime(4, Some(11_180));
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
-
- let mut peer_clients: Vec<_> = Network::peers(&network)
- .zip(Network::clients(&network))
- .collect();
-
- check_status(&peer_clients, 1);
-
- // Start new peer
- let mut configuration = Config::test();
- configuration.sumeragi.trusted_peers.value_mut().others =
- unique_vec![peer_clients.choose(&mut thread_rng()).unwrap().0.id.clone()];
- let rt = Runtime::test();
- let new_peer = rt.block_on(
- PeerBuilder::new()
- .with_config(configuration)
- .with_into_genesis(WithGenesis::None)
- .with_port(11_225)
- .start(),
- );
-
- let register_peer = Register::peer(DataModelPeer::new(new_peer.id.clone()));
- peer_clients
- .choose(&mut thread_rng())
- .unwrap()
- .1
- .submit_blocking(register_peer)?;
- peer_clients.push((&new_peer, Client::test(&new_peer.api_address)));
- thread::sleep(pipeline_time * 2 * 20); // Wait for some time to allow peers to connect
+#[tokio::test]
+async fn register_new_peer() -> Result<()> {
+ let network = NetworkBuilder::new().with_peers(4).start().await?;
+
+ let peer = NetworkPeer::generate();
+ peer.start(
+ network
+ .config()
+ // only one random peer
+ .write(["sumeragi", "trusted_peers"], [network.peer().id()]),
+ None,
+ )
+ .await;
- check_status(&peer_clients, 2);
+ let register = Register::peer(Peer::new(peer.id()));
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(register)).await??;
+
+ timeout(network.sync_timeout(), peer.once_block(2)).await?;
Ok(())
}
/// Test the number of connected peers, changing the number of faults tolerated down and up
-fn connected_peers_with_f(faults: u64, start_port: Option) -> Result<()> {
+// Note: sometimes fails due to https://github.com/hyperledger/iroha/issues/5104
+async fn connected_peers_with_f(faults: usize) -> Result<()> {
let n_peers = 3 * faults + 1;
- let (_rt, network, _) = Network::start_test_with_runtime(
- (n_peers)
- .try_into()
- .wrap_err("`faults` argument `u64` value too high, cannot convert to `u32`")?,
- start_port,
- );
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
+ let network = NetworkBuilder::new().with_peers(n_peers).start().await?;
- let mut peer_clients: Vec<_> = Network::peers(&network)
- .zip(Network::clients(&network))
- .collect();
+ assert_peers_status(network.peers().iter(), 1, n_peers as u64 - 1).await;
- check_status(&peer_clients, 1);
+ let mut randomized_peers = network
+ .peers()
+ .iter()
+ .choose_multiple(&mut thread_rng(), n_peers);
+ let removed_peer = randomized_peers.remove(0);
// Unregister a peer: committed with f = `faults` then `status.peers` decrements
- let removed_peer_idx = rand::thread_rng().gen_range(0..peer_clients.len());
- let (removed_peer, _) = &peer_clients[removed_peer_idx];
- let unregister_peer = Unregister::peer(removed_peer.id.clone());
- peer_clients
- .choose(&mut thread_rng())
- .unwrap()
- .1
- .submit_blocking(unregister_peer)?;
- thread::sleep(pipeline_time * 2); // Wait for some time to allow peers to connect
- let (removed_peer, removed_peer_client) = peer_clients.remove(removed_peer_idx);
-
- thread::sleep(pipeline_time * 2); // Wait for some time to allow peers to disconnect
-
- check_status(&peer_clients, 2);
- let status = removed_peer_client.get_status()?;
+ let client = randomized_peers.choose(&mut thread_rng()).unwrap().client();
+ let unregister_peer = Unregister::peer(removed_peer.id());
+ spawn_blocking(move || client.submit_blocking(unregister_peer)).await??;
+ timeout(
+ network.sync_timeout(),
+ randomized_peers
+ .iter()
+ .map(|peer| peer.once_block(2))
+ .collect::>()
+ .collect::>(),
+ )
+ .await?;
+ assert_peers_status(randomized_peers.iter().copied(), 2, n_peers as u64 - 2).await;
+
+ let status = removed_peer.status().await?;
// Peer might have been disconnected before getting the block
- assert!(status.blocks == 1 || status.blocks == 2);
+ assert_matches!(status.blocks, 1 | 2);
assert_eq!(status.peers, 0);
// Re-register the peer: committed with f = `faults` - 1 then `status.peers` increments
- let register_peer = Register::peer(DataModelPeer::new(removed_peer.id.clone()));
- peer_clients
+ let register_peer = Register::peer(Peer::new(removed_peer.id()));
+ let client = randomized_peers
+ .iter()
.choose(&mut thread_rng())
.unwrap()
- .1
- .submit_blocking(register_peer)?;
- peer_clients.insert(removed_peer_idx, (removed_peer, removed_peer_client));
- thread::sleep(pipeline_time * 2); // Wait for some time to allow peers to connect
+ .client();
+ spawn_blocking(move || client.submit_blocking(register_peer)).await??;
+ network.ensure_blocks(3).await?;
- check_status(&peer_clients, 3);
+ assert_peers_status(
+ randomized_peers.iter().copied().chain(once(removed_peer)),
+ 3,
+ n_peers as u64 - 1,
+ )
+ .await;
Ok(())
}
-fn check_status(peer_clients: &[(&Peer, Client)], expected_blocks: u64) {
- let n_peers = peer_clients.len() as u64;
-
- for (_, peer_client) in peer_clients {
- let status = peer_client.get_status().unwrap();
-
- assert_eq!(status.peers, n_peers - 1);
- assert_eq!(status.blocks, expected_blocks);
- }
+async fn assert_peers_status(
+ peers: impl Iterator
- + Send,
+ expected_blocks: u64,
+ expected_peers: u64,
+) {
+ peers
+ .map(|peer| async {
+ let status = peer.status().await.expect("peer should be able to reply");
+ assert_eq!(
+ status.peers,
+ expected_peers,
+ "unexpected peers for {}",
+ peer.id()
+ );
+ assert_eq!(
+ status.blocks,
+ expected_blocks,
+ "expected blocks for {}",
+ peer.id()
+ );
+ })
+ .collect::>()
+ .collect::>()
+ .await;
}
diff --git a/crates/iroha/tests/integration/extra_functional/genesis.rs b/crates/iroha/tests/integration/extra_functional/genesis.rs
index 3f1e7275b9b..8d680759e94 100644
--- a/crates/iroha/tests/integration/extra_functional/genesis.rs
+++ b/crates/iroha/tests/integration/extra_functional/genesis.rs
@@ -1,33 +1,54 @@
+use eyre::Context;
+use futures_util::{stream::FuturesUnordered, StreamExt};
use iroha::data_model::{
domain::{Domain, DomainId},
isi::Register,
};
-use iroha_test_network::{wait_for_genesis_committed, NetworkBuilder};
+use iroha_test_network::NetworkBuilder;
+use tokio::{task::spawn_blocking, time::timeout};
-#[test]
-fn all_peers_submit_genesis() {
- multiple_genesis_peers(4, 4, 13_800);
+#[tokio::test]
+async fn all_peers_submit_genesis() -> eyre::Result<()> {
+ multiple_genesis_peers(4, 4).await
}
-#[test]
-fn multiple_genesis_4_peers_3_genesis() {
- multiple_genesis_peers(4, 3, 13_820);
+#[tokio::test]
+async fn multiple_genesis_4_peers_3_genesis() -> eyre::Result<()> {
+ multiple_genesis_peers(4, 3).await
}
-#[test]
-fn multiple_genesis_4_peers_2_genesis() {
- multiple_genesis_peers(4, 2, 13_840);
+#[tokio::test]
+async fn multiple_genesis_4_peers_2_genesis() -> eyre::Result<()> {
+ multiple_genesis_peers(4, 2).await
}
-fn multiple_genesis_peers(n_peers: u32, n_genesis_peers: u32, port: u16) {
- let (_rt, network, client) = NetworkBuilder::new(n_peers, Some(port))
- .with_genesis_peers(n_genesis_peers)
- .create_with_runtime();
- wait_for_genesis_committed(&network.clients(), 0);
+async fn multiple_genesis_peers(n_peers: usize, n_genesis_peers: usize) -> eyre::Result<()> {
+ let network = NetworkBuilder::new().with_peers(n_peers).build();
+ timeout(
+ network.peer_startup_timeout(),
+ network
+ .peers()
+ .iter()
+ .enumerate()
+ .map(|(i, peer)| {
+ let cfg = network.config();
+ let genesis = (i < n_genesis_peers).then_some(network.genesis());
+ async move {
+ peer.start(cfg, genesis).await;
+ peer.once_block(1).await;
+ }
+ })
+ .collect::>()
+ .collect::>(),
+ )
+ .await?;
+ let client = network.client();
let domain_id: DomainId = "foo".parse().expect("Valid");
let create_domain = Register::domain(Domain::new(domain_id));
- client
- .submit_blocking(create_domain)
- .expect("Failed to register domain");
+ spawn_blocking(move || client.submit_blocking(create_domain))
+ .await?
+ .wrap_err("Failed to register domain")?;
+
+ Ok(())
}
diff --git a/crates/iroha/tests/integration/extra_functional/mod.rs b/crates/iroha/tests/integration/extra_functional/mod.rs
index 6e35d278cbd..df11a06313e 100644
--- a/crates/iroha/tests/integration/extra_functional/mod.rs
+++ b/crates/iroha/tests/integration/extra_functional/mod.rs
@@ -5,4 +5,3 @@ mod normal;
mod offline_peers;
mod restart_peer;
mod unregister_peer;
-mod unstable_network;
diff --git a/crates/iroha/tests/integration/extra_functional/multiple_blocks_created.rs b/crates/iroha/tests/integration/extra_functional/multiple_blocks_created.rs
index f66da0e4425..b5335e1af58 100644
--- a/crates/iroha/tests/integration/extra_functional/multiple_blocks_created.rs
+++ b/crates/iroha/tests/integration/extra_functional/multiple_blocks_created.rs
@@ -1,28 +1,40 @@
-use std::thread;
+use std::{num::NonZero, time::Duration};
use eyre::Result;
+use futures_util::StreamExt;
use iroha::{
- client::{self, Client},
- data_model::{parameter::BlockParameter, prelude::*},
+ client::{self},
+ data_model::prelude::*,
+};
+use iroha_data_model::{
+ events::pipeline::{BlockEventFilter, TransactionEventFilter},
+ parameter::BlockParameter,
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_test_network::*;
use iroha_test_samples::gen_account_in;
-use nonzero_ext::nonzero;
+use rand::{prelude::IteratorRandom, thread_rng};
+use tokio::{
+ sync::{mpsc, watch},
+ task::{spawn_blocking, JoinSet},
+ time::{sleep, timeout},
+};
-const N_BLOCKS: usize = 510;
+/// Bombard random peers with random mints in multiple rounds, ensuring they all have
+/// a consistent total amount in the end.
+#[tokio::test]
+async fn multiple_blocks_created() -> Result<()> {
+ const N_ROUNDS: u64 = 50;
+ const N_MAX_TXS_PER_BLOCK: u64 = 10;
-#[ignore = "Takes a lot of time."]
-#[test]
-fn long_multiple_blocks_created() -> Result<()> {
// Given
- let (_rt, network, client) = Network::start_test_with_runtime(4, Some(10_965));
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
-
- client.submit_blocking(SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(1_u64)),
- )))?;
+ let network = NetworkBuilder::new()
+ .with_peers(4)
+ .with_genesis_instruction(SetParameter(Parameter::Block(
+ BlockParameter::MaxTransactions(NonZero::new(N_MAX_TXS_PER_BLOCK).expect("valid")),
+ )))
+ .with_pipeline_time(Duration::from_secs(1))
+ .start()
+ .await?;
let create_domain = Register::domain(Domain::new("domain".parse()?));
let (account_id, _account_keypair) = gen_account_in("domain");
@@ -31,41 +43,174 @@ fn long_multiple_blocks_created() -> Result<()> {
let create_asset =
Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- client.submit_all::([
- create_domain.into(),
- create_account.into(),
- create_asset.into(),
- ])?;
-
- thread::sleep(pipeline_time);
-
- let mut account_has_quantity = Numeric::ZERO;
- let quantity = numeric!(1);
- //When
- for _ in 0..N_BLOCKS {
- let mint_asset = Mint::asset_numeric(
- quantity,
- AssetId::new(asset_definition_id.clone(), account_id.clone()),
- );
- client.submit(mint_asset)?;
- account_has_quantity = account_has_quantity.checked_add(quantity).unwrap();
- thread::sleep(pipeline_time / 4);
+ {
+ let client = network.client();
+ spawn_blocking(move || {
+ client.clone().submit_all::([
+ create_domain.into(),
+ create_account.into(),
+ create_asset.into(),
+ ])
+ })
+ .await??;
+ }
+
+ network.ensure_blocks(2).await?;
+
+ let blocks = BlocksTracker::start(&network);
+
+ // When
+ let mut total: u128 = 0;
+ for _ in 1..=N_ROUNDS {
+ let txs = (1..=N_MAX_TXS_PER_BLOCK)
+ .choose(&mut thread_rng())
+ .expect("there is a room to choose from");
+ println!("submitting {txs} transactions to random peers");
+ for _ in 0..txs {
+ let value = (0..999_999)
+ .choose(&mut thread_rng())
+ .expect("there is quite a room to choose from");
+ total += value;
+
+ let client = network.client();
+ let tx = client.build_transaction(
+ [Mint::asset_numeric(
+ Numeric::new(value, 0),
+ AssetId::new(asset_definition_id.clone(), account_id.clone()),
+ )],
+ <_>::default(),
+ );
+ spawn_blocking(move || client.submit_transaction(&tx)).await??;
+ }
+
+ timeout(network.sync_timeout(), blocks.sync()).await?;
+ }
+
+ // ensuring all have the same total
+ sleep(Duration::from_secs(2)).await;
+ println!("all peers should have total={total}");
+ let expected_value = AssetValue::Numeric(Numeric::new(total, 0));
+ for peer in network.peers() {
+ let client = peer.client();
+ let expected_value = expected_value.clone();
+ let account_id = account_id.clone();
+ let definition = asset_definition_id.clone();
+ let assets = spawn_blocking(move || {
+ client
+ .query(client::asset::all())
+ .filter_with(|asset| {
+ asset.id.account.eq(account_id) & asset.id.definition_id.eq(definition)
+ })
+ .execute_all()
+ })
+ .await??;
+ assert_eq!(assets.len(), 1);
+ let asset = assets.into_iter().next().unwrap();
+ assert_eq!(*asset.value(), expected_value);
}
- thread::sleep(pipeline_time * 5);
-
- //Then
- let peer = network.peers().last().unwrap();
- Client::test(&peer.api_address).poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(account_has_quantity)
- }))
- })?;
Ok(())
}
+
+// TODO: consider making a part of `iroha_test_network`
+struct BlocksTracker {
+ sync_tx: watch::Sender,
+ _children: JoinSet<()>,
+}
+
+impl BlocksTracker {
+ fn start(network: &Network) -> Self {
+ enum PeerEvent {
+ Block(u64),
+ Transaction,
+ }
+
+ let mut children = JoinSet::new();
+
+ let (block_tx, mut block_rx) = mpsc::channel::<(PeerEvent, usize)>(10);
+ for (i, peer) in network.peers().iter().cloned().enumerate() {
+ let tx = block_tx.clone();
+ children.spawn(async move {
+ let mut events = peer
+ .client()
+ .listen_for_events_async([
+ EventFilterBox::from(BlockEventFilter::default()),
+ TransactionEventFilter::default().into(),
+ ])
+ .await
+ .expect("peer should be up");
+ while let Some(Ok(event)) = events.next().await {
+ match event {
+ EventBox::Pipeline(PipelineEventBox::Block(x))
+ if matches!(*x.status(), BlockStatus::Applied) =>
+ {
+ let _ = tx
+ .send((PeerEvent::Block(x.header().height().get()), i))
+ .await;
+ }
+ EventBox::Pipeline(PipelineEventBox::Transaction(x))
+ if matches!(*x.status(), TransactionStatus::Queued) =>
+ {
+ let _ = tx.send((PeerEvent::Transaction, i)).await;
+ }
+ _ => {}
+ }
+ }
+ });
+ }
+
+ let peers_count = network.peers().len();
+ let (sync_tx, _sync_rx) = watch::channel(false);
+ let sync_clone = sync_tx.clone();
+ children.spawn(async move {
+ #[derive(Copy, Clone)]
+ struct PeerState {
+ height: u64,
+ mutated: bool,
+ }
+
+ let mut blocks = vec![
+ PeerState {
+ height: 0,
+ mutated: false
+ };
+ peers_count
+ ];
+ loop {
+ tokio::select! {
+ Some((event, i)) = block_rx.recv() => {
+ let state = blocks.get_mut(i).unwrap();
+ match event {
+ PeerEvent::Block(height) => {
+ state.height = height;
+ state.mutated = false;
+ }
+ PeerEvent::Transaction => {
+ state.mutated = true;
+ }
+ }
+
+ let max_height = blocks.iter().map(|x| x.height).max().expect("there is at least 1");
+ let is_sync = blocks.iter().all(|x| x.height == max_height && !x.mutated);
+ sync_tx.send_modify(|flag| *flag = is_sync);
+ }
+ }
+ }
+ });
+
+ Self {
+ sync_tx: sync_clone,
+ _children: children,
+ }
+ }
+
+ async fn sync(&self) {
+ let mut recv = self.sync_tx.subscribe();
+ loop {
+ if *recv.borrow_and_update() {
+ return;
+ }
+ recv.changed().await.unwrap()
+ }
+ }
+}
diff --git a/crates/iroha/tests/integration/extra_functional/normal.rs b/crates/iroha/tests/integration/extra_functional/normal.rs
index 4185cd2c6fd..09daf1f2d4d 100644
--- a/crates/iroha/tests/integration/extra_functional/normal.rs
+++ b/crates/iroha/tests/integration/extra_functional/normal.rs
@@ -1,3 +1,4 @@
+use eyre::Result;
use iroha::{
client,
data_model::{asset::AssetDefinitionId, parameter::BlockParameter, prelude::*},
@@ -6,48 +7,45 @@ use iroha_test_network::*;
use nonzero_ext::nonzero;
#[test]
-fn tranasctions_should_be_applied() {
- let (_rt, network, iroha) = NetworkBuilder::new(4, Some(11_300)).create_with_runtime();
- wait_for_genesis_committed(&network.clients(), 0);
- iroha
- .submit_blocking(SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(1_u64)),
- )))
- .unwrap();
-
- let domain_id = "and".parse::().unwrap();
+fn transactions_should_be_applied() -> Result<()> {
+ let (network, _rt) = NetworkBuilder::new().with_peers(4).start_blocking()?;
+ let iroha = network.client();
+ iroha.submit_blocking(SetParameter::new(Parameter::Block(
+ BlockParameter::MaxTransactions(nonzero!(1_u64)),
+ )))?;
+
+ let domain_id = "and".parse::()?;
let account_id = "ed01201F803CB23B1AAFB958368DF2F67CB78A2D1DFB47FFFC3133718F165F54DFF677@and"
- .parse::()
- .unwrap();
- let asset_definition_id = "MAY#and".parse::().unwrap();
+ .parse::()?;
+ let asset_definition_id = "MAY#and".parse::()?;
let asset_id =
"MAY##ed01201F803CB23B1AAFB958368DF2F67CB78A2D1DFB47FFFC3133718F165F54DFF677@and"
- .parse()
- .unwrap();
+ .parse()?;
let create_domain = Register::domain(Domain::new(domain_id));
- iroha.submit_blocking(create_domain).unwrap();
+ iroha.submit_blocking(create_domain)?;
let create_asset =
Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- iroha.submit_blocking(create_asset).unwrap();
+ iroha.submit_blocking(create_asset)?;
let create_account = Register::account(Account::new(account_id.clone()));
- iroha.submit_blocking(create_account).unwrap();
+ iroha.submit_blocking(create_account)?;
let mint_asset = Mint::asset_numeric(
numeric!(57_787_013_353_273_097_936_105_299_296),
AssetId::new(asset_definition_id.clone(), account_id.clone()),
);
- iroha.submit_blocking(mint_asset).unwrap();
+ iroha.submit_blocking(mint_asset)?;
let mint_asset =
Mint::asset_numeric(numeric!(1), AssetId::new(asset_definition_id, account_id));
- iroha.submit_blocking(mint_asset).unwrap();
+ iroha.submit_blocking(mint_asset)?;
iroha
.query(client::asset::all())
.filter_with(|asset| asset.id.eq(asset_id))
- .execute_single()
- .unwrap();
+ .execute_single()?;
+
+ Ok(())
}
diff --git a/crates/iroha/tests/integration/extra_functional/offline_peers.rs b/crates/iroha/tests/integration/extra_functional/offline_peers.rs
index cecd19ee96d..33344eb66d6 100644
--- a/crates/iroha/tests/integration/extra_functional/offline_peers.rs
+++ b/crates/iroha/tests/integration/extra_functional/offline_peers.rs
@@ -1,53 +1,71 @@
-use eyre::Result;
+use eyre::{OptionExt, Result};
+use futures_util::stream::{FuturesUnordered, StreamExt};
use iroha::{
- client::{self, Client},
+ client::{self},
crypto::KeyPair,
data_model::{
peer::{Peer as DataModelPeer, PeerId},
prelude::*,
},
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_primitives::addr::socket_addr;
use iroha_test_network::*;
use iroha_test_samples::ALICE_ID;
+use tokio::task::spawn_blocking;
-#[test]
-fn genesis_block_is_committed_with_some_offline_peers() -> Result<()> {
+#[tokio::test]
+async fn genesis_block_is_committed_with_some_offline_peers() -> Result<()> {
// Given
- let (_rt, network, client) = NetworkBuilder::new(4, Some(10_560))
- .with_offline_peers(1)
- .create_with_runtime();
- wait_for_genesis_committed(&network.clients(), 1);
-
- //When
let alice_id = ALICE_ID.clone();
let roses = "rose#wonderland".parse()?;
let alice_has_roses = numeric!(13);
- //Then
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(alice_id))
- .execute_all()?;
- let asset = assets
+ // When
+ let network = NetworkBuilder::new().with_peers(4).build();
+ let cfg = network.config();
+ let genesis = network.genesis();
+ network
+ .peers()
+ .iter()
+ // only 2 out of 4
+ .take(2)
+ .enumerate()
+ .map(|(i, peer)| peer.start(cfg.clone(), (i == 0).then_some(genesis)))
+ .collect::>()
+ .collect::>()
+ .await;
+ network.ensure_blocks(1).await?;
+
+ // Then
+ let client = network
+ .peers()
.iter()
- .find(|asset| *asset.id().definition() == roses)
- .unwrap();
- assert_eq!(AssetValue::Numeric(alice_has_roses), *asset.value());
+ .find(|x| x.is_running())
+ .expect("there are two running peers")
+ .client();
+ spawn_blocking(move || -> Result<()> {
+ let assets = client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(alice_id))
+ .execute_all()?;
+ let asset = assets
+ .iter()
+ .find(|asset| *asset.id().definition() == roses)
+ .ok_or_eyre("asset should be found")?;
+ assert_eq!(AssetValue::Numeric(alice_has_roses), *asset.value());
+ Ok(())
+ })
+ .await??;
+
Ok(())
}
-#[test]
-fn register_offline_peer() -> Result<()> {
- let n_peers = 4;
-
- let (_rt, network, client) = Network::start_test_with_runtime(n_peers, Some(11_160));
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
- let peer_clients = Network::clients(&network);
+#[tokio::test]
+async fn register_offline_peer() -> Result<()> {
+ const N_PEERS: usize = 4;
- check_status(&peer_clients, 1);
+ let network = NetworkBuilder::new().with_peers(N_PEERS).start().await?;
+ check_status(&network, N_PEERS as u64 - 1).await;
let address = socket_addr!(128.0.0.2:8085);
let key_pair = KeyPair::random();
@@ -56,22 +74,24 @@ fn register_offline_peer() -> Result<()> {
let register_peer = Register::peer(DataModelPeer::new(peer_id));
// Wait for some time to allow peers to connect
- client.submit_blocking(register_peer)?;
- std::thread::sleep(pipeline_time * 2);
+ let client = network.client();
+ spawn_blocking(move || client.submit_blocking(register_peer)).await??;
+ network.ensure_blocks(2).await?;
- // Make sure status hasn't change
- check_status(&peer_clients, 2);
+ // Make sure peers count hasn't changed
+ check_status(&network, N_PEERS as u64 - 1).await;
Ok(())
}
-fn check_status(peer_clients: &[Client], expected_blocks: u64) {
- let n_peers = peer_clients.len() as u64;
-
- for peer_client in peer_clients {
- let status = peer_client.get_status().unwrap();
+async fn check_status(network: &Network, expected_peers: u64) {
+ for peer in network.peers() {
+ let client = peer.client();
+ let status = spawn_blocking(move || client.get_status())
+ .await
+ .expect("no panic")
+ .expect("status should not fail");
- assert_eq!(status.peers, n_peers - 1);
- assert_eq!(status.blocks, expected_blocks);
+ assert_eq!(status.peers, expected_peers);
}
}
diff --git a/crates/iroha/tests/integration/extra_functional/restart_peer.rs b/crates/iroha/tests/integration/extra_functional/restart_peer.rs
index 4b51e7c2d8d..b6681c4b645 100644
--- a/crates/iroha/tests/integration/extra_functional/restart_peer.rs
+++ b/crates/iroha/tests/integration/extra_functional/restart_peer.rs
@@ -1,96 +1,68 @@
-use std::thread;
-
use eyre::Result;
use iroha::{
- client::{self, Client},
+ client::{self},
data_model::prelude::*,
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_test_network::*;
use iroha_test_samples::ALICE_ID;
-use rand::{seq::SliceRandom, thread_rng, Rng};
-use tokio::runtime::Runtime;
+use tokio::{task::spawn_blocking, time::timeout};
-#[test]
-fn restarted_peer_should_have_the_same_asset_amount() -> Result<()> {
- let account_id = ALICE_ID.clone();
- let asset_definition_id = "xor#wonderland".parse::().unwrap();
+#[tokio::test]
+async fn restarted_peer_should_restore_its_state() -> Result<()> {
+ let asset_definition_id = "xor#wonderland".parse::()?;
let quantity = numeric!(200);
- let mut removed_peer = {
- let n_peers = 4;
-
- let (_rt, network, _) = Network::start_test_with_runtime(n_peers, Some(11_205));
- wait_for_genesis_committed(&network.clients(), 0);
- let pipeline_time = Config::pipeline_time();
- let peer_clients = Network::clients(&network);
+ let network = NetworkBuilder::new().with_peers(4).start().await?;
+ let peers = network.peers();
- let create_asset =
- Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- peer_clients
- .choose(&mut thread_rng())
- .unwrap()
- .submit_blocking(create_asset)?;
+ // create state on the first peer
+ let peer_a = &peers[0];
+ let client = peer_a.client();
+ let asset_definition_clone = asset_definition_id.clone();
+ spawn_blocking(move || {
+ client
+ .submit_all_blocking::([
+ Register::asset_definition(AssetDefinition::numeric(
+ asset_definition_clone.clone(),
+ ))
+ .into(),
+ Mint::asset_numeric(
+ quantity,
+ AssetId::new(asset_definition_clone, ALICE_ID.clone()),
+ )
+ .into(),
+ ])
+ .unwrap();
+ })
+ .await?;
+ network.ensure_blocks(2).await?;
- let mint_asset = Mint::asset_numeric(
- quantity,
- AssetId::new(asset_definition_id.clone(), account_id.clone()),
- );
- peer_clients
- .choose(&mut thread_rng())
- .unwrap()
- .submit_blocking(mint_asset)?;
+ // shutdown all
+ network.shutdown().await;
- // Wait for observing peer to get the block
- thread::sleep(pipeline_time);
+ // restart another one, **without a genesis** even
+ let peer_b = &peers[1];
+ let config = network.config();
+ assert_ne!(peer_a, peer_b);
+ timeout(network.peer_startup_timeout(), async move {
+ peer_b.start(config, None).await;
+ peer_b.once_block(2).await;
+ })
+ .await?;
- let assets = peer_clients
- .choose(&mut thread_rng())
- .unwrap()
+ // ensure it has the state
+ let client = peer_b.client();
+ let asset = spawn_blocking(move || {
+ client
.query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
- let asset = assets
- .into_iter()
- .find(|asset| *asset.id().definition() == asset_definition_id)
- .expect("Asset not found");
- assert_eq!(AssetValue::Numeric(quantity), *asset.value());
-
- let mut all_peers: Vec<_> = core::iter::once(network.first_peer)
- .chain(network.peers.into_values())
- .collect();
- let removed_peer_idx = rand::thread_rng().gen_range(0..all_peers.len());
- let mut removed_peer = all_peers.swap_remove(removed_peer_idx);
- removed_peer.terminate();
- removed_peer
- };
- // All peers have been stopped here
-
- // Restart just one peer and check if it updates itself from the blockstore
- {
- let rt = Runtime::test();
- rt.block_on(
- PeerBuilder::new()
- .with_dir(removed_peer.temp_dir.as_ref().unwrap().clone())
- .start_with_peer(&mut removed_peer),
- );
- let removed_peer_client = Client::test(&removed_peer.api_address);
- wait_for_genesis_committed(&vec![removed_peer_client.clone()], 0);
-
- removed_peer_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
- iroha_logger::error!(?assets);
-
- let account_asset = assets
- .into_iter()
- .find(|asset| *asset.id().definition() == asset_definition_id)
- .expect("Asset not found");
+ .filter_with(|asset| asset.id.account.eq(ALICE_ID.clone()))
+ .execute_all()
+ })
+ .await??
+ .into_iter()
+ .find(|asset| *asset.id().definition() == asset_definition_id)
+ .expect("Asset not found");
+ assert_eq!(AssetValue::Numeric(quantity), *asset.value());
- Ok(AssetValue::Numeric(quantity) == *account_asset.value())
- })?
- }
Ok(())
}
diff --git a/crates/iroha/tests/integration/extra_functional/unregister_peer.rs b/crates/iroha/tests/integration/extra_functional/unregister_peer.rs
index d5e485c7d45..8593b49fa06 100644
--- a/crates/iroha/tests/integration/extra_functional/unregister_peer.rs
+++ b/crates/iroha/tests/integration/extra_functional/unregister_peer.rs
@@ -1,142 +1,126 @@
-use std::thread;
+use std::time::Duration;
+use assert_matches::assert_matches;
use eyre::Result;
use iroha::{
client,
+ client::Client,
data_model::{parameter::BlockParameter, prelude::*},
};
-use iroha_config::parameters::actual::Root as Config;
-use iroha_test_network::*;
+use iroha_test_network::{NetworkBuilder, NetworkPeer};
use iroha_test_samples::gen_account_in;
use nonzero_ext::nonzero;
+use tokio::{task::spawn_blocking, time::sleep};
+
+#[tokio::test]
+async fn network_stable_after_add_and_after_remove_peer() -> Result<()> {
+ const PIPELINE_TIME: Duration = Duration::from_millis(300);
-// Note the test is marked as `unstable`, not the network.
-#[ignore = "ignore, more in #2851"]
-#[test]
-fn unstable_network_stable_after_add_and_after_remove_peer() -> Result<()> {
// Given a network
- let (rt, network, genesis_client, pipeline_time, account_id, asset_definition_id) = init()?;
- wait_for_genesis_committed(&network.clients(), 0);
+ let mut network = NetworkBuilder::new()
+ .with_pipeline_time(PIPELINE_TIME)
+ .with_peers(4)
+ .with_genesis_instruction(SetParameter::new(Parameter::Block(
+ BlockParameter::MaxTransactions(nonzero!(1_u64)),
+ )))
+ .start()
+ .await?;
+ let client = network.client();
+
+ let (account, _account_keypair) = gen_account_in("domain");
+ let asset_def: AssetDefinitionId = "xor#domain".parse()?;
+ {
+ let client = client.clone();
+ let account = account.clone();
+ let asset_def = asset_def.clone();
+ spawn_blocking(move || {
+ client.submit_all_blocking::([
+ Register::domain(Domain::new("domain".parse()?)).into(),
+ Register::account(Account::new(account)).into(),
+ Register::asset_definition(AssetDefinition::numeric(asset_def)).into(),
+ ])
+ })
+ .await??; // blocks=2
+ }
// When assets are minted
- mint(
- &asset_definition_id,
- &account_id,
- &genesis_client,
- pipeline_time,
- numeric!(100),
- )?;
+ mint(&client, &asset_def, &account, numeric!(100)).await?;
+ network.ensure_blocks(3).await?;
// and a new peer is registered
- let (peer, peer_client) = rt.block_on(network.add_peer());
+ let new_peer = NetworkPeer::generate();
+ let new_peer_id = new_peer.id();
+ let new_peer_client = new_peer.client();
+ network.add_peer(&new_peer);
+ new_peer.start(network.config(), None).await;
+ {
+ let client = client.clone();
+ let id = new_peer_id.clone();
+ spawn_blocking(move || client.submit_blocking(Register::peer(Peer::new(id)))).await??;
+ }
+ network.ensure_blocks(4).await?;
// Then the new peer should already have the mint result.
- check_assets(
- &peer_client,
- &account_id,
- &asset_definition_id,
- numeric!(100),
+ assert_eq!(
+ find_asset(&new_peer_client, &account, &asset_def).await?,
+ numeric!(100)
);
- // Also, when a peer is unregistered
- let remove_peer = Unregister::peer(peer.id.clone());
- genesis_client.submit(remove_peer)?;
- thread::sleep(pipeline_time * 2);
- // We can mint without error.
- mint(
- &asset_definition_id,
- &account_id,
- &genesis_client,
- pipeline_time,
- numeric!(200),
- )?;
+
+ // When a peer is unregistered
+ {
+ let client = client.clone();
+ spawn_blocking(move || client.submit_blocking(Unregister::peer(new_peer_id))).await??;
+ // blocks=6
+ }
+ network.remove_peer(&new_peer);
+ // We can mint without an error.
+ mint(&client, &asset_def, &account, numeric!(200)).await?;
// Assets are increased on the main network.
- check_assets(
- &genesis_client,
- &account_id,
- &asset_definition_id,
- numeric!(300),
+ network.ensure_blocks(6).await?;
+ assert_eq!(
+ find_asset(&client, &account, &asset_def).await?,
+ numeric!(300)
);
// But not on the unregistered peer's network.
- check_assets(
- &peer_client,
- &account_id,
- &asset_definition_id,
- numeric!(100),
+ sleep(PIPELINE_TIME * 5).await;
+ assert_eq!(
+ find_asset(&new_peer_client, &account, &asset_def).await?,
+ numeric!(100)
);
+
Ok(())
}
-fn check_assets(
- iroha: &client::Client,
- account_id: &AccountId,
- asset_definition_id: &AssetDefinitionId,
- quantity: Numeric,
-) {
- iroha
- .poll_with_period(Config::block_sync_gossip_time(), 15, |client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
+async fn find_asset(
+ client: &Client,
+ account: &AccountId,
+ asset_definition: &AssetDefinitionId,
+) -> Result {
+ let account_id = account.clone();
+ let client = client.clone();
+ let asset = spawn_blocking(move || {
+ client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()
+ })
+ .await??
+ .into_iter()
+ .find(|asset| asset.id().definition() == asset_definition)
+ .expect("asset should be there");
- Ok(assets.iter().any(|asset| {
- asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(quantity)
- }))
- })
- .expect("Test case failure");
+ assert_matches!(asset.value(), AssetValue::Numeric(quantity) => Ok(*quantity))
}
-fn mint(
+async fn mint(
+ client: &Client,
asset_definition_id: &AssetDefinitionId,
account_id: &AccountId,
- client: &client::Client,
- pipeline_time: std::time::Duration,
quantity: Numeric,
-) -> Result {
+) -> Result<()> {
let mint_asset = Mint::asset_numeric(
quantity,
AssetId::new(asset_definition_id.clone(), account_id.clone()),
);
- client.submit(mint_asset)?;
- thread::sleep(pipeline_time * 5);
- iroha_logger::info!("Mint");
- Ok(quantity)
-}
-
-fn init() -> Result<(
- tokio::runtime::Runtime,
- iroha_test_network::Network,
- iroha::client::Client,
- std::time::Duration,
- AccountId,
- AssetDefinitionId,
-)> {
- let (rt, network, client) = Network::start_test_with_runtime(4, Some(10_925));
- let pipeline_time = Config::pipeline_time();
- iroha_logger::info!("Started");
-
- let set_max_txns_in_block = SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(1_u64)),
- ));
-
- let create_domain = Register::domain(Domain::new("domain".parse()?));
- let (account_id, _account_keypair) = gen_account_in("domain");
- let create_account = Register::account(Account::new(account_id.clone()));
- let asset_definition_id: AssetDefinitionId = "xor#domain".parse()?;
- let create_asset =
- Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- client.submit_all_blocking::([
- set_max_txns_in_block.into(),
- create_domain.into(),
- create_account.into(),
- create_asset.into(),
- ])?;
- iroha_logger::info!("Init");
- Ok((
- rt,
- network,
- client,
- pipeline_time,
- account_id,
- asset_definition_id,
- ))
+ let client = client.clone();
+ spawn_blocking(move || client.submit_blocking(mint_asset)).await??;
+ Ok(())
}
diff --git a/crates/iroha/tests/integration/extra_functional/unstable_network.rs b/crates/iroha/tests/integration/extra_functional/unstable_network.rs
deleted file mode 100644
index 31dc816084a..00000000000
--- a/crates/iroha/tests/integration/extra_functional/unstable_network.rs
+++ /dev/null
@@ -1,122 +0,0 @@
-use std::thread;
-
-use iroha::{
- client,
- data_model::{
- parameter::{BlockParameter, Parameter},
- prelude::*,
- },
-};
-use iroha_config::parameters::actual::Root as Config;
-use iroha_test_network::*;
-use iroha_test_samples::ALICE_ID;
-use nonzero_ext::nonzero;
-use rand::seq::SliceRandom;
-
-#[test]
-fn unstable_network_5_peers_1_fault() {
- let n_peers = 4;
- let n_transactions = 20;
- unstable_network(n_peers, 1, n_transactions, false, 10_805);
-}
-
-#[test]
-fn soft_fork() {
- let n_peers = 4;
- let n_transactions = 20;
- unstable_network(n_peers, 0, n_transactions, true, 10_830);
-}
-
-#[test]
-fn unstable_network_8_peers_1_fault() {
- let n_peers = 7;
- let n_transactions = 20;
- unstable_network(n_peers, 1, n_transactions, false, 10_850);
-}
-
-#[test]
-#[ignore = "This test does not guarantee to have positive outcome given a fixed time."]
-fn unstable_network_9_peers_2_faults() {
- unstable_network(7, 2, 5, false, 10_890);
-}
-
-fn unstable_network(
- n_peers: u32,
- n_offline_peers: u32,
- n_transactions: usize,
- force_soft_fork: bool,
- port: u16,
-) {
- if let Err(error) = iroha_logger::install_panic_hook() {
- eprintln!("Installing panic hook failed: {error}");
- }
-
- // Given
- let mut configuration = Config::test();
- #[cfg(debug_assertions)]
- {
- configuration.sumeragi.debug_force_soft_fork = force_soft_fork;
- }
- let (_rt, network, iroha) = NetworkBuilder::new(n_peers + n_offline_peers, Some(port))
- .with_config(configuration)
- // Note: it is strange that we have `n_offline_peers` but don't set it as offline
- .with_offline_peers(0)
- .create_with_runtime();
- wait_for_genesis_committed(&network.clients(), n_offline_peers);
- iroha
- .submit_blocking(SetParameter::new(Parameter::Block(
- BlockParameter::MaxTransactions(nonzero!(5_u64)),
- )))
- .unwrap();
-
- let pipeline_time = Config::pipeline_time();
-
- let account_id = ALICE_ID.clone();
- let asset_definition_id: AssetDefinitionId = "camomile#wonderland".parse().expect("Valid");
- let register_asset =
- Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- iroha
- .submit_blocking(register_asset)
- .expect("Failed to register asset");
- // Initially there are 0 camomile
- let mut account_has_quantity = Numeric::ZERO;
-
- let mut rng = rand::thread_rng();
- let freezers = network.get_freeze_status_handles();
-
- //When
- for _i in 0..n_transactions {
- // Make random peers faulty.
- for f in freezers.choose_multiple(&mut rng, n_offline_peers as usize) {
- f.freeze();
- }
-
- let quantity = Numeric::ONE;
- let mint_asset = Mint::asset_numeric(
- quantity,
- AssetId::new(asset_definition_id.clone(), account_id.clone()),
- );
- iroha.submit(mint_asset).expect("Failed to create asset.");
- account_has_quantity = account_has_quantity.checked_add(quantity).unwrap();
- thread::sleep(pipeline_time);
-
- iroha
- .poll_with_period(Config::pipeline_time(), 4, |client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(account_has_quantity)
- }))
- })
- .expect("Test case failure.");
-
- // Return all peers to normal function.
- for f in &freezers {
- f.unfreeze();
- }
- }
-}
diff --git a/crates/iroha/tests/integration/multisig.rs b/crates/iroha/tests/integration/multisig.rs
index ddd57eec4b9..9d1e82767b3 100644
--- a/crates/iroha/tests/integration/multisig.rs
+++ b/crates/iroha/tests/integration/multisig.rs
@@ -20,17 +20,15 @@ use nonzero_ext::nonzero;
#[test]
fn mutlisig() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_400).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
-
- test_client.submit_all_blocking([
- SetParameter::new(Parameter::SmartContract(SmartContractParameter::Fuel(
- nonzero!(100_000_000_u64),
- ))),
- SetParameter::new(Parameter::Executor(SmartContractParameter::Fuel(nonzero!(
- 100_000_000_u64
- )))),
- ])?;
+ let (network, _rt) = NetworkBuilder::new()
+ .with_genesis_instruction(SetParameter::new(Parameter::SmartContract(
+ SmartContractParameter::Fuel(nonzero!(100_000_000_u64)),
+ )))
+ .with_genesis_instruction(SetParameter::new(Parameter::Executor(
+ SmartContractParameter::Fuel(nonzero!(100_000_000_u64)),
+ )))
+ .start_blocking()?;
+ let test_client = network.client();
let account_id = ALICE_ID.clone();
let multisig_register_trigger_id = "multisig_register".parse::()?;
diff --git a/crates/iroha/tests/integration/non_mintable.rs b/crates/iroha/tests/integration/non_mintable.rs
index cd9954eefca..15e446e118d 100644
--- a/crates/iroha/tests/integration/non_mintable.rs
+++ b/crates/iroha/tests/integration/non_mintable.rs
@@ -8,8 +8,8 @@ use iroha_test_samples::ALICE_ID;
#[test]
fn non_mintable_asset_can_be_minted_once_but_not_twice() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_625).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -31,41 +31,28 @@ fn non_mintable_asset_can_be_minted_once_but_not_twice() -> Result<()> {
let tx = test_client.build_transaction(instructions, metadata);
// We can register and mint the non-mintable token
- test_client.submit_transaction(&tx)?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
- Ok(assets.iter().any(|asset| {
+ test_client.submit_transaction_blocking(&tx)?;
+ assert!(test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?
+ .iter()
+ .any(|asset| {
*asset.id().definition() == asset_definition_id
&& *asset.value() == AssetValue::Numeric(numeric!(200))
- }))
- })?;
+ }));
// We can submit the request to mint again.
- test_client.submit_all([mint])?;
-
// However, this will fail
- assert!(test_client
- .poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == AssetValue::Numeric(numeric!(400))
- }))
- })
- .is_err());
+ assert!(test_client.submit_all_blocking([mint]).is_err());
+
Ok(())
}
#[test]
fn non_mintable_asset_cannot_be_minted_if_registered_with_non_zero_value() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_610).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -80,18 +67,19 @@ fn non_mintable_asset_cannot_be_minted_if_registered_with_non_zero_value() -> Re
let register_asset = Register::asset(Asset::new(asset_id.clone(), 1_u32));
// We can register the non-mintable token
- test_client
- .submit_all::([create_asset.into(), register_asset.clone().into()])?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
- Ok(assets.iter().any(|asset| {
+ test_client.submit_all_blocking::([
+ create_asset.into(),
+ register_asset.clone().into(),
+ ])?;
+ assert!(test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?
+ .iter()
+ .any(|asset| {
*asset.id().definition() == asset_definition_id
&& *asset.value() == AssetValue::Numeric(numeric!(1))
- }))
- })?;
+ }));
// But only once
assert!(test_client.submit_blocking(register_asset).is_err());
@@ -105,8 +93,8 @@ fn non_mintable_asset_cannot_be_minted_if_registered_with_non_zero_value() -> Re
#[test]
fn non_mintable_asset_can_be_minted_if_registered_with_zero_value() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_630).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -122,21 +110,20 @@ fn non_mintable_asset_can_be_minted_if_registered_with_zero_value() -> Result<()
let mint = Mint::asset_numeric(1u32, asset_id);
// We can register the non-mintable token wih zero value and then mint it
- test_client.submit_all::([
+ test_client.submit_all_blocking::([
create_asset.into(),
register_asset.into(),
mint.into(),
])?;
- test_client.poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(account_id.clone()))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
+ assert!(test_client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(account_id.clone()))
+ .execute_all()?
+ .iter()
+ .any(|asset| {
*asset.id().definition() == asset_definition_id
&& *asset.value() == AssetValue::Numeric(numeric!(1))
- }))
- })?;
+ }));
+
Ok(())
}
diff --git a/crates/iroha/tests/integration/pagination.rs b/crates/iroha/tests/integration/pagination.rs
index 1c0e81ae97e..72eb240afd8 100644
--- a/crates/iroha/tests/integration/pagination.rs
+++ b/crates/iroha/tests/integration/pagination.rs
@@ -8,8 +8,8 @@ use nonzero_ext::nonzero;
#[test]
fn limits_should_work() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_690).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
register_assets(&client)?;
@@ -26,8 +26,8 @@ fn limits_should_work() -> Result<()> {
#[test]
fn reported_length_should_be_accurate() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(11_200).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
register_assets(&client)?;
@@ -60,8 +60,8 @@ fn fetch_size_should_work() -> Result<()> {
QueryWithFilter, QueryWithParams,
};
- let (_rt, _peer, client) = ::new().with_port(11_120).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
register_assets(&client)?;
diff --git a/crates/iroha/tests/integration/permissions.rs b/crates/iroha/tests/integration/permissions.rs
index 0302a520595..3bb8ea0d8a8 100644
--- a/crates/iroha/tests/integration/permissions.rs
+++ b/crates/iroha/tests/integration/permissions.rs
@@ -1,4 +1,4 @@
-use std::{thread, time::Duration};
+use std::time::Duration;
use eyre::Result;
use iroha::{
@@ -13,51 +13,37 @@ use iroha_executor_data_model::permission::{
asset::{CanModifyAssetMetadata, CanTransferAsset},
domain::CanModifyDomainMetadata,
};
-use iroha_genesis::GenesisBlock;
-use iroha_test_network::{PeerBuilder, *};
+use iroha_test_network::*;
use iroha_test_samples::{gen_account_in, ALICE_ID, BOB_ID};
+use tokio::{join, time::timeout};
-#[test]
-fn genesis_transactions_are_validated_by_executor() {
+#[tokio::test]
+async fn genesis_transactions_are_validated_by_executor() {
// `wonderland` domain is owned by Alice,
- // so default executor will deny genesis account to register asset definition.
+ // so the default executor will deny a genesis account to register asset definition.
let asset_definition_id = "xor#wonderland".parse().expect("Valid");
let invalid_instruction =
Register::asset_definition(AssetDefinition::numeric(asset_definition_id));
- let genesis = GenesisBlock::test_with_instructions([invalid_instruction], vec![]);
-
- let (_rt, _peer, test_client) = ::new()
- .with_genesis(genesis)
- .with_port(11_115)
- .start_with_runtime();
-
- check_no_blocks(&test_client);
-}
-
-fn check_no_blocks(test_client: &Client) {
- const POLL_PERIOD: Duration = Duration::from_millis(1000);
- const MAX_RETRIES: u32 = 3;
-
- // Checking that peer contains no blocks multiple times
- // See also `wait_for_genesis_committed()`
- for _ in 0..MAX_RETRIES {
- match test_client.get_status() {
- Ok(status) => {
- assert!(status.blocks == 0);
- thread::sleep(POLL_PERIOD);
- }
- Err(error) => {
- // Connection failed meaning that Iroha panicked on invalid genesis.
- // Not a very good way to check it, but it's the best we can do in the current situation.
-
- iroha_logger::info!(
- ?error,
- "Failed to get status, Iroha probably panicked on invalid genesis, test passed"
- );
- break;
- }
- }
- }
+ let network = NetworkBuilder::new()
+ .with_genesis_instruction(invalid_instruction)
+ .build();
+ let peer = network.peer();
+
+ timeout(Duration::from_secs(3), async {
+ join!(
+ // Peer should start...
+ peer.start(network.config(), Some(network.genesis())),
+ peer.once(|event| matches!(event, PeerLifecycleEvent::ServerStarted)),
+ // ...but it should shortly exit with an error
+ peer.once(|event| match event {
+ // TODO: handle "Invalid genesis" in a more granular way
+ PeerLifecycleEvent::Terminated { status } => !status.success(),
+ _ => false,
+ })
+ )
+ })
+ .await
+ .expect("peer should panic within timeout");
}
fn get_assets(iroha: &Client, id: &AccountId) -> Vec {
@@ -73,8 +59,8 @@ fn get_assets(iroha: &Client, id: &AccountId) -> Vec {
fn permissions_disallow_asset_transfer() {
let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
- let (_rt, _peer, iroha) = ::new().with_port(10_725).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
// Given
let alice_id = ALICE_ID.clone();
@@ -128,7 +114,8 @@ fn permissions_disallow_asset_transfer() {
fn permissions_disallow_asset_burn() {
let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
- let (_rt, _peer, iroha) = ::new().with_port(10_735).start_with_runtime();
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
let alice_id = ALICE_ID.clone();
let bob_id = BOB_ID.clone();
@@ -179,8 +166,8 @@ fn permissions_disallow_asset_burn() {
#[test]
#[ignore = "ignore, more in #2851"]
fn account_can_query_only_its_own_domain() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_740).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
// Given
let domain_id: DomainId = "wonderland".parse()?;
@@ -209,7 +196,8 @@ fn account_can_query_only_its_own_domain() -> Result<()> {
fn permissions_differ_not_only_by_names() {
let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
- let (_rt, _not_drop, client) = ::new().with_port(10_745).start_with_runtime();
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
let alice_id = ALICE_ID.clone();
let (mouse_id, mouse_keypair) = gen_account_in("outfit");
@@ -297,12 +285,11 @@ fn permissions_differ_not_only_by_names() {
}
#[test]
-#[allow(deprecated)]
fn stored_vs_granted_permission_payload() -> Result<()> {
let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
- let (_rt, _peer, iroha) = ::new().with_port(10_730).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
// Given
let alice_id = ALICE_ID.clone();
@@ -351,10 +338,9 @@ fn stored_vs_granted_permission_payload() -> Result<()> {
}
#[test]
-#[allow(deprecated)]
fn permissions_are_unified() {
- let (_rt, _peer, iroha) = ::new().with_port(11_230).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
// Given
let alice_id = ALICE_ID.clone();
@@ -380,8 +366,8 @@ fn permissions_are_unified() {
#[test]
fn associated_permissions_removed_on_unregister() {
- let (_rt, _peer, iroha) = ::new().with_port(11_240).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
let bob_id = BOB_ID.clone();
let kingdom_id: DomainId = "kingdom".parse().expect("Valid");
@@ -432,8 +418,8 @@ fn associated_permissions_removed_on_unregister() {
#[test]
fn associated_permissions_removed_from_role_on_unregister() {
- let (_rt, _peer, iroha) = ::new().with_port(11_255).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
let role_id: RoleId = "role".parse().expect("Valid");
let kingdom_id: DomainId = "kingdom".parse().expect("Valid");
diff --git a/crates/iroha/tests/integration/queries/account.rs b/crates/iroha/tests/integration/queries/account.rs
index 83b63d17e05..cb9327ecc9d 100644
--- a/crates/iroha/tests/integration/queries/account.rs
+++ b/crates/iroha/tests/integration/queries/account.rs
@@ -7,8 +7,8 @@ use iroha_test_samples::{gen_account_in, ALICE_ID};
#[test]
fn find_accounts_with_asset() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_760).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
// Registering new asset definition
let definition_id = "test_coin#wonderland"
diff --git a/crates/iroha/tests/integration/queries/asset.rs b/crates/iroha/tests/integration/queries/asset.rs
index b7f66833047..a249df880cb 100644
--- a/crates/iroha/tests/integration/queries/asset.rs
+++ b/crates/iroha/tests/integration/queries/asset.rs
@@ -11,8 +11,8 @@ use iroha_test_samples::{gen_account_in, ALICE_ID};
#[test]
#[allow(clippy::too_many_lines)]
fn find_asset_total_quantity() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_765).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
// Register new domain
let domain_id: DomainId = "looking_glass".parse()?;
diff --git a/crates/iroha/tests/integration/queries/mod.rs b/crates/iroha/tests/integration/queries/mod.rs
index bf07f4d29c3..bb7c70ac7a1 100644
--- a/crates/iroha/tests/integration/queries/mod.rs
+++ b/crates/iroha/tests/integration/queries/mod.rs
@@ -15,8 +15,8 @@ mod smart_contract;
#[test]
fn too_big_fetch_size_is_not_allowed() {
- let (_rt, _peer, client) = ::new().with_port(11_130).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
let err = client
.query(client::asset::all())
diff --git a/crates/iroha/tests/integration/queries/query_errors.rs b/crates/iroha/tests/integration/queries/query_errors.rs
index 69d7586c9b1..a7af9fd2260 100644
--- a/crates/iroha/tests/integration/queries/query_errors.rs
+++ b/crates/iroha/tests/integration/queries/query_errors.rs
@@ -2,14 +2,13 @@ use iroha::{
client,
data_model::{prelude::QueryBuilderExt, query::builder::SingleQueryError},
};
+use iroha_test_network::NetworkBuilder;
use iroha_test_samples::gen_account_in;
#[test]
fn non_existent_account_is_specific_error() {
- let (_rt, _peer, client) = ::new()
- .with_port(10_670)
- .start_with_runtime();
- // we cannot wait for genesis committment
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
let err = client
.query(client::account::all())
diff --git a/crates/iroha/tests/integration/queries/role.rs b/crates/iroha/tests/integration/queries/role.rs
index 6eecb164f77..dde318bbc5a 100644
--- a/crates/iroha/tests/integration/queries/role.rs
+++ b/crates/iroha/tests/integration/queries/role.rs
@@ -21,8 +21,8 @@ fn create_role_ids() -> [RoleId; 5] {
#[test]
fn find_roles() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_525).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let role_ids = create_role_ids();
@@ -53,8 +53,8 @@ fn find_roles() -> Result<()> {
#[test]
fn find_role_ids() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_530).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let role_ids = create_role_ids();
@@ -79,8 +79,8 @@ fn find_role_ids() -> Result<()> {
#[test]
fn find_role_by_id() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_535).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let role_id: RoleId = "root".parse().expect("Valid");
let new_role = Role::new(role_id.clone(), ALICE_ID.clone());
@@ -102,8 +102,8 @@ fn find_role_by_id() -> Result<()> {
#[test]
fn find_unregistered_role_by_id() {
- let (_rt, _peer, test_client) = ::new().with_port(10_540).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let role_id: RoleId = "root".parse().expect("Valid");
@@ -122,8 +122,8 @@ fn find_unregistered_role_by_id() {
#[test]
fn find_roles_by_account_id() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_545).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let role_ids = create_role_ids();
let alice_id = ALICE_ID.clone();
diff --git a/crates/iroha/tests/integration/queries/smart_contract.rs b/crates/iroha/tests/integration/queries/smart_contract.rs
index ab2742bd8a6..a07b9060461 100644
--- a/crates/iroha/tests/integration/queries/smart_contract.rs
+++ b/crates/iroha/tests/integration/queries/smart_contract.rs
@@ -8,8 +8,8 @@ use iroha_test_samples::load_sample_wasm;
#[test]
fn live_query_is_dropped_after_smart_contract_end() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(11_140).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
let transaction = client.build_transaction(
load_sample_wasm("query_assets_and_save_cursor"),
@@ -38,8 +38,8 @@ fn live_query_is_dropped_after_smart_contract_end() -> Result<()> {
#[test]
fn smart_contract_can_filter_queries() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(11_260).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
let transaction = client.build_transaction(
load_sample_wasm("smart_contract_can_filter_queries"),
diff --git a/crates/iroha/tests/integration/roles.rs b/crates/iroha/tests/integration/roles.rs
index 57938464e29..437e2ccfeab 100644
--- a/crates/iroha/tests/integration/roles.rs
+++ b/crates/iroha/tests/integration/roles.rs
@@ -11,8 +11,8 @@ use serde_json::json;
#[test]
fn register_empty_role() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_695).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let role_id = "root".parse().expect("Valid");
let register_role = Register::role(Role::new(role_id, ALICE_ID.clone()));
@@ -33,10 +33,8 @@ fn register_empty_role() -> Result<()> {
/// @s8sato added: This test represents #2081 case.
#[test]
fn register_and_grant_role_for_metadata_access() -> Result<()> {
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
-
- let (_rt, _peer, test_client) = ::new().with_port(10_700).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let (mouse_id, mouse_keypair) = gen_account_in("wonderland");
@@ -56,7 +54,7 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> {
// Mouse grants role to Alice
let grant_role = Grant::account_role(role_id.clone(), alice_id.clone());
- let grant_role_tx = TransactionBuilder::new(chain_id, mouse_id.clone())
+ let grant_role_tx = TransactionBuilder::new(network.chain_id(), mouse_id.clone())
.with_instructions([grant_role])
.sign(mouse_keypair.private_key());
test_client.submit_transaction_blocking(&grant_role_tx)?;
@@ -77,8 +75,8 @@ fn register_and_grant_role_for_metadata_access() -> Result<()> {
#[test]
fn unregistered_role_removed_from_account() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_705).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let role_id: RoleId = "root".parse().expect("Valid");
let alice_id = ALICE_ID.clone();
@@ -120,8 +118,8 @@ fn unregistered_role_removed_from_account() -> Result<()> {
#[test]
fn role_with_invalid_permissions_is_not_accepted() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_025).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let role_id = "ACCESS_TO_ACCOUNT_METADATA".parse()?;
let role = Role::new(role_id, ALICE_ID.clone()).add_permission(CanControlDomainLives);
@@ -147,8 +145,8 @@ fn role_with_invalid_permissions_is_not_accepted() -> Result<()> {
// so that they don't get deduplicated eagerly but rather in the executor
// This way, if the executor compares permissions just as JSON strings, the test will fail
fn role_permissions_are_deduplicated() {
- let (_rt, _peer, test_client) = ::new().with_port(11_235).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let allow_alice_to_transfer_rose_1 = Permission::new(
"CanTransferAsset".parse().unwrap(),
@@ -186,10 +184,8 @@ fn role_permissions_are_deduplicated() {
#[test]
fn grant_revoke_role_permissions() -> Result<()> {
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
-
- let (_rt, _peer, test_client) = ::new().with_port(11_245).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let (mouse_id, mouse_keypair) = gen_account_in("wonderland");
@@ -211,7 +207,7 @@ fn grant_revoke_role_permissions() -> Result<()> {
// Mouse grants role to Alice
let grant_role = Grant::account_role(role_id.clone(), alice_id.clone());
- let grant_role_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone())
+ let grant_role_tx = TransactionBuilder::new(network.chain_id(), mouse_id.clone())
.with_instructions([grant_role])
.sign(mouse_keypair.private_key());
test_client.submit_transaction_blocking(&grant_role_tx)?;
@@ -240,7 +236,7 @@ fn grant_revoke_role_permissions() -> Result<()> {
.expect_err("shouldn't be able to modify metadata");
// Alice can modify Mouse's metadata after permission is granted to role
- let grant_role_permission_tx = TransactionBuilder::new(chain_id.clone(), mouse_id.clone())
+ let grant_role_permission_tx = TransactionBuilder::new(network.chain_id(), mouse_id.clone())
.with_instructions([grant_role_permission])
.sign(mouse_keypair.private_key());
test_client.submit_transaction_blocking(&grant_role_permission_tx)?;
@@ -252,7 +248,7 @@ fn grant_revoke_role_permissions() -> Result<()> {
test_client.submit_blocking(set_key_value.clone())?;
// Alice can't modify Mouse's metadata after permission is removed from role
- let revoke_role_permission_tx = TransactionBuilder::new(chain_id.clone(), mouse_id)
+ let revoke_role_permission_tx = TransactionBuilder::new(network.chain_id(), mouse_id)
.with_instructions([revoke_role_permission])
.sign(mouse_keypair.private_key());
test_client.submit_transaction_blocking(&revoke_role_permission_tx)?;
diff --git a/crates/iroha/tests/integration/set_parameter.rs b/crates/iroha/tests/integration/set_parameter.rs
index 78821fd9464..00dd8517677 100644
--- a/crates/iroha/tests/integration/set_parameter.rs
+++ b/crates/iroha/tests/integration/set_parameter.rs
@@ -1,36 +1,34 @@
-use std::time::Duration;
-
use eyre::Result;
use iroha::{
client,
data_model::{
- parameter::{Parameter, Parameters, SumeragiParameter, SumeragiParameters},
+ parameter::{Parameter, Parameters},
prelude::*,
},
};
+use iroha_data_model::parameter::BlockParameter;
use iroha_test_network::*;
+use nonzero_ext::nonzero;
#[test]
fn can_change_parameter_value() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_135).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new()
+ .with_genesis_instruction(SetParameter(Parameter::Block(
+ BlockParameter::MaxTransactions(nonzero!(16u64)),
+ )))
+ .start_blocking()?;
+ let test_client = network.client();
let old_params: Parameters = test_client.query_single(client::parameter::all())?;
- assert_eq!(
- old_params.sumeragi().block_time(),
- SumeragiParameters::default().block_time()
- );
+ assert_eq!(old_params.block.max_transactions, nonzero!(16u64));
- let block_time = 40_000;
- let parameter = Parameter::Sumeragi(SumeragiParameter::BlockTimeMs(block_time));
- let set_param_isi = SetParameter::new(parameter);
- test_client.submit_blocking(set_param_isi)?;
+ let new_value = nonzero!(32u64);
+ test_client.submit_blocking(SetParameter(Parameter::Block(
+ BlockParameter::MaxTransactions(new_value),
+ )))?;
- let sumeragi_params = test_client.query_single(client::parameter::all())?.sumeragi;
- assert_eq!(
- sumeragi_params.block_time(),
- Duration::from_millis(block_time)
- );
+ let params = test_client.query_single(client::parameter::all())?;
+ assert_eq!(params.block.max_transactions, new_value);
Ok(())
}
diff --git a/crates/iroha/tests/integration/sorting.rs b/crates/iroha/tests/integration/sorting.rs
index 729ab76a77f..3bf941afcd5 100644
--- a/crates/iroha/tests/integration/sorting.rs
+++ b/crates/iroha/tests/integration/sorting.rs
@@ -33,8 +33,8 @@ fn correct_pagination_assets_after_creating_new_one() {
let sorting = Sorting::by_metadata_key(sort_by_metadata_key.clone());
let account_id = ALICE_ID.clone();
- let (_rt, _peer, test_client) = ::new().with_port(10_635).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let mut tester_assets = vec![];
let mut register_asset_definitions = vec![];
@@ -120,8 +120,8 @@ fn correct_pagination_assets_after_creating_new_one() {
#[test]
#[allow(clippy::too_many_lines)]
fn correct_sorting_of_entities() {
- let (_rt, _peer, test_client) = ::new().with_port(10_640).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let sort_by_metadata_key = "test_sort".parse::().expect("Valid");
@@ -294,8 +294,8 @@ fn correct_sorting_of_entities() {
fn sort_only_elements_which_have_sorting_key() -> Result<()> {
const TEST_DOMAIN: &str = "neverland";
- let (_rt, _peer, test_client) = ::new().with_port(10_680).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
let domain_id: DomainId = TEST_DOMAIN.parse().unwrap();
test_client
diff --git a/crates/iroha/tests/integration/status_response.rs b/crates/iroha/tests/integration/status_response.rs
index 17b6c9dd734..41e4982cff3 100644
--- a/crates/iroha/tests/integration/status_response.rs
+++ b/crates/iroha/tests/integration/status_response.rs
@@ -1,8 +1,8 @@
use eyre::Result;
-use iroha::{data_model::prelude::*, samples::get_status_json};
+use iroha::{client, data_model::prelude::*};
use iroha_telemetry::metrics::Status;
use iroha_test_network::*;
-use iroha_test_samples::gen_account_in;
+use tokio::task::spawn_blocking;
fn status_eq_excluding_uptime_and_queue(lhs: &Status, rhs: &Status) -> bool {
lhs.peers == rhs.peers
@@ -12,43 +12,43 @@ fn status_eq_excluding_uptime_and_queue(lhs: &Status, rhs: &Status) -> bool {
&& lhs.view_changes == rhs.view_changes
}
-#[test]
-fn json_and_scale_statuses_equality() -> Result<()> {
- let (_rt, network, client) = Network::start_test_with_runtime(2, Some(11_280));
- wait_for_genesis_committed(&network.clients(), 0);
+async fn check(client: &client::Client, blocks: u64) -> Result<()> {
+ let status_json = reqwest::get(client.torii_url.join("/status").unwrap())
+ .await?
+ .json()
+ .await?;
- let json_status_zero = get_status_json(&client).unwrap();
+ let status_scale = {
+ let client = client.clone();
+ spawn_blocking(move || client.get_status()).await??
+ };
- let scale_status_zero_decoded = client.get_status().unwrap();
+ assert!(status_eq_excluding_uptime_and_queue(
+ &status_json,
+ &status_scale
+ ));
+ assert_eq!(status_json.blocks, blocks);
- assert!(
- status_eq_excluding_uptime_and_queue(&json_status_zero, &scale_status_zero_decoded),
- "get_status() result is not equal to decoded get_status_scale_encoded()"
- );
+ Ok(())
+}
- let coins = ["xor", "btc", "eth", "doge"];
+#[tokio::test]
+async fn json_and_scale_statuses_equality() -> Result<()> {
+ let network = NetworkBuilder::new().start().await?;
+ let client = network.client();
- let (account_id, _account_keypair) = gen_account_in("domain");
+ check(&client, 1).await?;
- for coin in coins {
- let asset_definition_id = format!("{coin}#wonderland").parse::()?;
- let create_asset =
- Register::asset_definition(AssetDefinition::numeric(asset_definition_id.clone()));
- let mint_asset = Mint::asset_numeric(
- 1234u32,
- AssetId::new(asset_definition_id, account_id.clone()),
- );
- client.submit_all::([create_asset.into(), mint_asset.into()])?;
+ {
+ let client = client.clone();
+ spawn_blocking(move || {
+ client.submit_blocking(Register::domain(Domain::new("looking_glass".parse()?)))
+ })
}
+ .await??;
+ network.ensure_blocks(2).await?;
- let json_status_coins = get_status_json(&client).unwrap();
-
- let scale_status_coins_decoded = client.get_status().unwrap();
-
- assert!(
- status_eq_excluding_uptime_and_queue(&json_status_coins, &scale_status_coins_decoded),
- "get_status() result is not equal to decoded get_status_scale_encoded()"
- );
+ check(&client, 2).await?;
Ok(())
}
diff --git a/crates/iroha/tests/integration/transfer_asset.rs b/crates/iroha/tests/integration/transfer_asset.rs
index 20ba04a331a..aed42d95995 100644
--- a/crates/iroha/tests/integration/transfer_asset.rs
+++ b/crates/iroha/tests/integration/transfer_asset.rs
@@ -21,14 +21,14 @@ fn simulate_transfer_numeric() {
AssetDefinition::numeric,
Mint::asset_numeric,
Transfer::asset_numeric,
- 10_710,
)
}
#[test]
fn simulate_transfer_store_asset() {
- let (_rt, _peer, iroha) = ::new().with_port(11_145).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
+
let (alice_id, mouse_id) = generate_two_ids();
let create_mouse = create_mouse(mouse_id.clone());
let asset_definition_id: AssetDefinitionId = "camomile#wonderland".parse().unwrap();
@@ -55,19 +55,17 @@ fn simulate_transfer_store_asset() {
);
iroha
- .submit(transfer_asset)
+ .submit_blocking(transfer_asset)
.expect("Failed to transfer asset.");
- iroha
- .poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(mouse_id.clone()))
- .execute_all()?;
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id && *asset.id().account() == mouse_id
- }))
- })
- .expect("Test case failure.");
+ assert!(iroha
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(mouse_id.clone()))
+ .execute_all()
+ .unwrap()
+ .into_iter()
+ .any(|asset| {
+ *asset.id().definition() == asset_definition_id && *asset.id().account() == mouse_id
+ }));
}
fn simulate_transfer(
@@ -76,16 +74,13 @@ fn simulate_transfer(
asset_definition_ctr: impl FnOnce(AssetDefinitionId) -> ::With,
mint_ctr: impl FnOnce(T, AssetId) -> Mint,
transfer_ctr: impl FnOnce(AssetId, T, AccountId) -> Transfer,
- port_number: u16,
) where
T: std::fmt::Debug + Clone + Into,
Mint: Instruction,
Transfer: Instruction,
{
- let (_rt, _peer, iroha) = ::new()
- .with_port(port_number)
- .start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let iroha = network.client();
let (alice_id, mouse_id) = generate_two_ids();
let create_mouse = create_mouse(mouse_id.clone());
@@ -114,22 +109,19 @@ fn simulate_transfer(
mouse_id.clone(),
);
iroha
- .submit(transfer_asset)
+ .submit_blocking(transfer_asset)
.expect("Failed to transfer asset.");
- iroha
- .poll(|client| {
- let assets = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.account.eq(mouse_id.clone()))
- .execute_all()?;
-
- Ok(assets.iter().any(|asset| {
- *asset.id().definition() == asset_definition_id
- && *asset.value() == amount_to_transfer.clone().into()
- && *asset.id().account() == mouse_id
- }))
- })
- .expect("Test case failure.");
+ assert!(iroha
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.account.eq(mouse_id.clone()))
+ .execute_all()
+ .unwrap()
+ .into_iter()
+ .any(|asset| {
+ *asset.id().definition() == asset_definition_id
+ && *asset.value() == amount_to_transfer.clone().into()
+ && *asset.id().account() == mouse_id
+ }));
}
fn generate_two_ids() -> (AccountId, AccountId) {
diff --git a/crates/iroha/tests/integration/transfer_domain.rs b/crates/iroha/tests/integration/transfer_domain.rs
index 6a8c5276fda..414346bacd2 100644
--- a/crates/iroha/tests/integration/transfer_domain.rs
+++ b/crates/iroha/tests/integration/transfer_domain.rs
@@ -1,7 +1,6 @@
use eyre::Result;
use iroha::{
client,
- client::Client,
crypto::KeyPair,
data_model::{prelude::*, transaction::error::TransactionRejectionReason},
};
@@ -12,18 +11,14 @@ use iroha_executor_data_model::permission::{
domain::CanUnregisterDomain,
trigger::CanUnregisterTrigger,
};
-use iroha_genesis::GenesisBlock;
use iroha_primitives::json::Json;
-use iroha_test_network::{Peer as TestPeer, *};
+use iroha_test_network::*;
use iroha_test_samples::{gen_account_in, ALICE_ID, BOB_ID, SAMPLE_GENESIS_ACCOUNT_ID};
-use tokio::runtime::Runtime;
#[test]
fn domain_owner_domain_permissions() -> Result<()> {
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
-
- let (_rt, _peer, test_client) = ::new().with_port(11_080).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let kingdom_id: DomainId = "kingdom".parse()?;
let (bob_id, bob_keypair) = gen_account_in("kingdom");
@@ -38,7 +33,7 @@ fn domain_owner_domain_permissions() -> Result<()> {
test_client.submit_blocking(Register::account(bob))?;
// Asset definitions can't be registered by "bob@kingdom" by default
- let transaction = TransactionBuilder::new(chain_id.clone(), bob_id.clone())
+ let transaction = TransactionBuilder::new(network.chain_id(), bob_id.clone())
.with_instructions([Register::asset_definition(coin.clone())])
.sign(bob_keypair.private_key());
let err = test_client
@@ -66,7 +61,7 @@ fn domain_owner_domain_permissions() -> Result<()> {
permission.clone(),
bob_id.clone(),
))?;
- let transaction = TransactionBuilder::new(chain_id, bob_id.clone())
+ let transaction = TransactionBuilder::new(network.chain_id(), bob_id.clone())
.with_instructions([Register::asset_definition(coin)])
.sign(bob_keypair.private_key());
test_client.submit_transaction_blocking(&transaction)?;
@@ -96,8 +91,8 @@ fn domain_owner_domain_permissions() -> Result<()> {
#[test]
fn domain_owner_account_permissions() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_075).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let kingdom_id: DomainId = "kingdom".parse()?;
let (mad_hatter_id, _mad_hatter_keypair) = gen_account_in("kingdom");
@@ -138,10 +133,9 @@ fn domain_owner_account_permissions() -> Result<()> {
#[test]
fn domain_owner_asset_definition_permissions() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_085).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
let kingdom_id: DomainId = "kingdom".parse()?;
let (bob_id, bob_keypair) = gen_account_in("kingdom");
let (rabbit_id, _rabbit_keypair) = gen_account_in("kingdom");
@@ -163,7 +157,7 @@ fn domain_owner_asset_definition_permissions() -> Result<()> {
// register asset definitions by "bob@kingdom" so he is owner of it
let coin = AssetDefinition::numeric(coin_id.clone());
- let transaction = TransactionBuilder::new(chain_id, bob_id.clone())
+ let transaction = TransactionBuilder::new(network.chain_id(), bob_id.clone())
.with_instructions([Register::asset_definition(coin)])
.sign(bob_keypair.private_key());
test_client.submit_transaction_blocking(&transaction)?;
@@ -203,10 +197,8 @@ fn domain_owner_asset_definition_permissions() -> Result<()> {
#[test]
fn domain_owner_asset_permissions() -> Result<()> {
- let chain_id = ChainId::from("00000000-0000-0000-0000-000000000000");
-
- let (_rt, _peer, test_client) = ::new().with_port(11_090).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let kingdom_id: DomainId = "kingdom".parse()?;
@@ -228,7 +220,7 @@ fn domain_owner_asset_permissions() -> Result<()> {
// register asset definitions by "bob@kingdom" so he is owner of it
let coin = AssetDefinition::numeric(coin_id.clone());
let store = AssetDefinition::store(store_id.clone());
- let transaction = TransactionBuilder::new(chain_id, bob_id.clone())
+ let transaction = TransactionBuilder::new(network.chain_id(), bob_id.clone())
.with_instructions([
Register::asset_definition(coin),
Register::asset_definition(store),
@@ -269,8 +261,8 @@ fn domain_owner_asset_permissions() -> Result<()> {
#[test]
fn domain_owner_trigger_permissions() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_095).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let kingdom_id: DomainId = "kingdom".parse()?;
@@ -325,8 +317,8 @@ fn domain_owner_trigger_permissions() -> Result<()> {
#[test]
fn domain_owner_transfer() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_100).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
let kingdom_id: DomainId = "kingdom".parse()?;
@@ -365,31 +357,29 @@ fn domain_owner_transfer() -> Result<()> {
#[test]
fn not_allowed_to_transfer_other_user_domain() -> Result<()> {
- let mut peer = TestPeer::new().expect("Failed to create peer");
- let topology = vec![peer.id.clone()];
-
let users_domain: DomainId = "users".parse()?;
let foo_domain: DomainId = "foo".parse()?;
-
let user1 = AccountId::new(users_domain.clone(), KeyPair::random().into_parts().0);
let user2 = AccountId::new(users_domain.clone(), KeyPair::random().into_parts().0);
let genesis_account = SAMPLE_GENESIS_ACCOUNT_ID.clone();
- let instructions: [InstructionBox; 6] = [
- Register::domain(Domain::new(users_domain.clone())).into(),
- Register::account(Account::new(user1.clone())).into(),
- Register::account(Account::new(user2.clone())).into(),
- Register::domain(Domain::new(foo_domain.clone())).into(),
- Transfer::domain(genesis_account.clone(), foo_domain.clone(), user1.clone()).into(),
- Transfer::domain(genesis_account.clone(), users_domain.clone(), user1.clone()).into(),
- ];
- let genesis = GenesisBlock::test_with_instructions(instructions, topology);
-
- let rt = Runtime::test();
- let builder = PeerBuilder::new().with_genesis(genesis).with_port(11_110);
- rt.block_on(builder.start_with_peer(&mut peer));
- let client = Client::test(&peer.api_address);
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new()
+ .with_genesis_instruction(Register::domain(Domain::new(users_domain.clone())))
+ .with_genesis_instruction(Register::account(Account::new(user1.clone())))
+ .with_genesis_instruction(Register::account(Account::new(user2.clone())))
+ .with_genesis_instruction(Register::domain(Domain::new(foo_domain.clone())))
+ .with_genesis_instruction(Transfer::domain(
+ genesis_account.clone(),
+ foo_domain.clone(),
+ user1.clone(),
+ ))
+ .with_genesis_instruction(Transfer::domain(
+ genesis_account.clone(),
+ users_domain.clone(),
+ user1.clone(),
+ ))
+ .start_blocking()?;
+ let client = network.client();
let domain = client
.query(client::domain::all())
diff --git a/crates/iroha/tests/integration/triggers/by_call_trigger.rs b/crates/iroha/tests/integration/triggers/by_call_trigger.rs
index 194c4901fbe..edd2e954f7d 100644
--- a/crates/iroha/tests/integration/triggers/by_call_trigger.rs
+++ b/crates/iroha/tests/integration/triggers/by_call_trigger.rs
@@ -3,7 +3,7 @@ use std::{sync::mpsc, thread, time::Duration};
use executor_custom_data_model::mint_rose_args::MintRoseArgs;
use eyre::{eyre, Result, WrapErr};
use iroha::{
- client::{self, Client},
+ client::{self},
crypto::KeyPair,
data_model::{
prelude::*,
@@ -12,23 +12,22 @@ use iroha::{
},
};
use iroha_executor_data_model::permission::trigger::CanRegisterTrigger;
-use iroha_genesis::GenesisBlock;
-use iroha_logger::info;
-use iroha_test_network::{Peer as TestPeer, *};
+use iroha_test_network::*;
use iroha_test_samples::{load_sample_wasm, ALICE_ID};
-use tokio::runtime::Runtime;
+
+use crate::integration::triggers::get_asset_value;
const TRIGGER_NAME: &str = "mint_rose";
#[test]
fn call_execute_trigger() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_005).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
let asset_id = AssetId::new(asset_definition_id, account_id);
- let prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_value = get_asset_value(&test_client, asset_id.clone());
let instruction = Mint::asset_numeric(1u32, asset_id.clone());
let register_trigger = build_register_trigger_isi(asset_id.account(), vec![instruction.into()]);
@@ -38,7 +37,7 @@ fn call_execute_trigger() -> Result<()> {
let call_trigger = ExecuteTrigger::new(trigger_id);
test_client.submit_blocking(call_trigger)?;
- let new_value = get_asset_value(&mut test_client, asset_id);
+ let new_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_value, prev_value.checked_add(Numeric::ONE).unwrap());
Ok(())
@@ -46,8 +45,8 @@ fn call_execute_trigger() -> Result<()> {
#[test]
fn execute_trigger_should_produce_event() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_010).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -82,15 +81,15 @@ fn execute_trigger_should_produce_event() -> Result<()> {
#[test]
fn infinite_recursion_should_produce_one_call_per_block() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_015).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
let asset_id = AssetId::new(asset_definition_id, account_id);
let trigger_id = TRIGGER_NAME.parse()?;
let call_trigger = ExecuteTrigger::new(trigger_id);
- let prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_value = get_asset_value(&test_client, asset_id.clone());
let instructions = vec![
Mint::asset_numeric(1u32, asset_id.clone()).into(),
@@ -101,7 +100,7 @@ fn infinite_recursion_should_produce_one_call_per_block() -> Result<()> {
test_client.submit_blocking(call_trigger)?;
- let new_value = get_asset_value(&mut test_client, asset_id);
+ let new_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_value, prev_value.checked_add(Numeric::ONE).unwrap());
Ok(())
@@ -109,8 +108,8 @@ fn infinite_recursion_should_produce_one_call_per_block() -> Result<()> {
#[test]
fn trigger_failure_should_not_cancel_other_triggers_execution() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_020).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -150,13 +149,13 @@ fn trigger_failure_should_not_cancel_other_triggers_execution() -> Result<()> {
test_client.submit_blocking(register_trigger)?;
// Saving current asset value
- let prev_asset_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_asset_value = get_asset_value(&test_client, asset_id.clone());
// Executing bad trigger
test_client.submit_blocking(ExecuteTrigger::new(bad_trigger_id))?;
// Checking results
- let new_asset_value = get_asset_value(&mut test_client, asset_id);
+ let new_asset_value = get_asset_value(&test_client, asset_id);
assert_eq!(
new_asset_value,
prev_asset_value.checked_add(Numeric::ONE).unwrap()
@@ -166,8 +165,8 @@ fn trigger_failure_should_not_cancel_other_triggers_execution() -> Result<()> {
#[test]
fn trigger_should_not_be_executed_with_zero_repeats_count() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_025).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -189,7 +188,7 @@ fn trigger_should_not_be_executed_with_zero_repeats_count() -> Result<()> {
test_client.submit_blocking(register_trigger)?;
// Saving current asset value
- let prev_asset_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_asset_value = get_asset_value(&test_client, asset_id.clone());
// Executing trigger first time
let execute_trigger = ExecuteTrigger::new(trigger_id.clone());
@@ -220,7 +219,7 @@ fn trigger_should_not_be_executed_with_zero_repeats_count() -> Result<()> {
);
// Checking results
- let new_asset_value = get_asset_value(&mut test_client, asset_id);
+ let new_asset_value = get_asset_value(&test_client, asset_id);
assert_eq!(
new_asset_value,
prev_asset_value.checked_add(Numeric::ONE).unwrap()
@@ -231,8 +230,8 @@ fn trigger_should_not_be_executed_with_zero_repeats_count() -> Result<()> {
#[test]
fn trigger_should_be_able_to_modify_its_own_repeats_count() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_030).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -257,7 +256,7 @@ fn trigger_should_be_able_to_modify_its_own_repeats_count() -> Result<()> {
test_client.submit_blocking(register_trigger)?;
// Saving current asset value
- let prev_asset_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_asset_value = get_asset_value(&test_client, asset_id.clone());
// Executing trigger first time
let execute_trigger = ExecuteTrigger::new(trigger_id);
@@ -267,7 +266,7 @@ fn trigger_should_be_able_to_modify_its_own_repeats_count() -> Result<()> {
test_client.submit_blocking(execute_trigger)?;
// Checking results
- let new_asset_value = get_asset_value(&mut test_client, asset_id);
+ let new_asset_value = get_asset_value(&test_client, asset_id);
assert_eq!(
new_asset_value,
prev_asset_value.checked_add(numeric!(2)).unwrap()
@@ -278,9 +277,8 @@ fn trigger_should_be_able_to_modify_its_own_repeats_count() -> Result<()> {
#[test]
fn only_account_with_permission_can_register_trigger() -> Result<()> {
- // Building a configuration
- let (_rt, _peer, test_client) = ::new().with_port(10_035).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let domain_id = ALICE_ID.domain().clone();
let alice_account_id = ALICE_ID.clone();
@@ -319,20 +317,20 @@ fn only_account_with_permission_can_register_trigger() -> Result<()> {
.filter_with(|account| account.id.eq(rabbit_account_id.clone()))
.execute_single()
.expect("Account not found");
- info!("Rabbit is found.");
+ println!("Rabbit is found.");
// Trying register the trigger without permissions
let _ = rabbit_client
.submit_blocking(Register::trigger(trigger.clone()))
.expect_err("Trigger should not be registered!");
- info!("Rabbit couldn't register the trigger");
+ println!("Rabbit couldn't register the trigger");
// Give permissions to the rabbit
test_client.submit_blocking(Grant::account_permission(
permission_on_registration,
rabbit_account_id,
))?;
- info!("Rabbit has got the permission");
+ println!("Rabbit has got the permission");
// Trying register the trigger with permissions
rabbit_client
@@ -351,8 +349,8 @@ fn only_account_with_permission_can_register_trigger() -> Result<()> {
#[test]
fn unregister_trigger() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_040).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let account_id = ALICE_ID.clone();
@@ -426,21 +424,14 @@ fn trigger_in_genesis() -> Result<()> {
),
);
- let mut peer = TestPeer::new().expect("Failed to create peer");
- let topology = vec![peer.id.clone()];
-
- // Registering trigger in genesis
- let genesis = GenesisBlock::test_with_instructions([Register::trigger(trigger)], topology);
-
- let rt = Runtime::test();
- let builder = PeerBuilder::new().with_genesis(genesis).with_port(10_045);
- rt.block_on(builder.start_with_peer(&mut peer));
- let mut test_client = Client::test(&peer.api_address);
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new()
+ .with_genesis_instruction(Register::trigger(trigger))
+ .start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let asset_id = AssetId::new(asset_definition_id, account_id);
- let prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_value = get_asset_value(&test_client, asset_id.clone());
// Executing trigger
test_client
@@ -454,7 +445,7 @@ fn trigger_in_genesis() -> Result<()> {
test_client.submit_blocking(call_trigger)?;
// Checking result
- let new_value = get_asset_value(&mut test_client, asset_id);
+ let new_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_value, prev_value.checked_add(Numeric::ONE).unwrap());
Ok(())
@@ -462,8 +453,8 @@ fn trigger_in_genesis() -> Result<()> {
#[test]
fn trigger_should_be_able_to_modify_other_trigger() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_085).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -502,7 +493,7 @@ fn trigger_should_be_able_to_modify_other_trigger() -> Result<()> {
test_client.submit_blocking(register_trigger)?;
// Saving current asset value
- let prev_asset_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_asset_value = get_asset_value(&test_client, asset_id.clone());
// Executing triggers
let execute_trigger_unregister = ExecuteTrigger::new(trigger_id_unregister);
@@ -514,7 +505,7 @@ fn trigger_should_be_able_to_modify_other_trigger() -> Result<()> {
// Checking results
// First trigger should cancel second one, so value should stay the same
- let new_asset_value = get_asset_value(&mut test_client, asset_id);
+ let new_asset_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_asset_value, prev_asset_value);
Ok(())
@@ -522,8 +513,8 @@ fn trigger_should_be_able_to_modify_other_trigger() -> Result<()> {
#[test]
fn trigger_burn_repetitions() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_070).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
@@ -558,8 +549,8 @@ fn trigger_burn_repetitions() -> Result<()> {
#[test]
fn unregistering_one_of_two_triggers_with_identical_wasm_should_not_cause_original_wasm_loss(
) -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(11_105).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let account_id = ALICE_ID.clone();
let first_trigger_id = "mint_rose_1".parse::()?;
@@ -598,20 +589,6 @@ fn unregistering_one_of_two_triggers_with_identical_wasm_should_not_cause_origin
Ok(())
}
-fn get_asset_value(client: &mut Client, asset_id: AssetId) -> Numeric {
- let asset = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.eq(asset_id))
- .execute_single()
- .unwrap();
-
- let AssetValue::Numeric(val) = *asset.value() else {
- panic!("Unexpected asset value");
- };
-
- val
-}
-
fn build_register_trigger_isi(
account_id: &AccountId,
trigger_instructions: Vec,
@@ -633,13 +610,13 @@ fn build_register_trigger_isi(
#[test]
fn call_execute_trigger_with_args() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(11_265).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
let asset_id = AssetId::new(asset_definition_id, account_id.clone());
- let prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_value = get_asset_value(&test_client, asset_id.clone());
let trigger_id = TRIGGER_NAME.parse::()?;
let trigger = Trigger::new(
@@ -660,7 +637,7 @@ fn call_execute_trigger_with_args() -> Result<()> {
let call_trigger = ExecuteTrigger::new(trigger_id).with_args(args);
test_client.submit_blocking(call_trigger)?;
- let new_value = get_asset_value(&mut test_client, asset_id);
+ let new_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_value, prev_value.checked_add(numeric!(42)).unwrap());
Ok(())
diff --git a/crates/iroha/tests/integration/triggers/data_trigger.rs b/crates/iroha/tests/integration/triggers/data_trigger.rs
index 2970fe57f9b..b882356b21c 100644
--- a/crates/iroha/tests/integration/triggers/data_trigger.rs
+++ b/crates/iroha/tests/integration/triggers/data_trigger.rs
@@ -5,8 +5,8 @@ use iroha_test_samples::{gen_account_in, ALICE_ID};
#[test]
fn must_execute_both_triggers() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_650).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let account_id = ALICE_ID.clone();
let asset_definition_id = "rose#wonderland".parse()?;
diff --git a/crates/iroha/tests/integration/triggers/event_trigger.rs b/crates/iroha/tests/integration/triggers/event_trigger.rs
index b0e67f982bf..001e18acb78 100644
--- a/crates/iroha/tests/integration/triggers/event_trigger.rs
+++ b/crates/iroha/tests/integration/triggers/event_trigger.rs
@@ -1,20 +1,19 @@
use eyre::Result;
-use iroha::{
- client::{self, Client},
- data_model::prelude::*,
-};
+use iroha::data_model::prelude::*;
use iroha_test_network::*;
use iroha_test_samples::ALICE_ID;
+use crate::integration::triggers::get_asset_value;
+
#[test]
fn test_mint_asset_when_new_asset_definition_created() -> Result<()> {
- let (_rt, _peer, mut test_client) = ::new().with_port(10_770).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse()?;
let account_id = ALICE_ID.clone();
let asset_id = AssetId::new(asset_definition_id, account_id.clone());
- let prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let prev_value = get_asset_value(&test_client, asset_id.clone());
let instruction = Mint::asset_numeric(1u32, asset_id.clone());
let register_trigger = Register::trigger(Trigger::new(
@@ -33,22 +32,8 @@ fn test_mint_asset_when_new_asset_definition_created() -> Result<()> {
Register::asset_definition(AssetDefinition::numeric(tea_definition_id));
test_client.submit_blocking(register_tea_definition)?;
- let new_value = get_asset_value(&mut test_client, asset_id);
+ let new_value = get_asset_value(&test_client, asset_id);
assert_eq!(new_value, prev_value.checked_add(Numeric::ONE).unwrap());
Ok(())
}
-
-fn get_asset_value(client: &mut Client, asset_id: AssetId) -> Numeric {
- let asset = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.eq(asset_id))
- .execute_single()
- .unwrap();
-
- let AssetValue::Numeric(val) = *asset.value() else {
- panic!("Unexpected asset value");
- };
-
- val
-}
diff --git a/crates/iroha/tests/integration/triggers/mod.rs b/crates/iroha/tests/integration/triggers/mod.rs
index f0d2c08b2d6..74c374352cc 100644
--- a/crates/iroha/tests/integration/triggers/mod.rs
+++ b/crates/iroha/tests/integration/triggers/mod.rs
@@ -1,6 +1,24 @@
+use assert_matches::assert_matches;
+use iroha::{client, client::Client};
+use iroha_data_model::{
+ asset::{AssetId, AssetValue},
+ prelude::{Numeric, QueryBuilderExt},
+};
+
mod by_call_trigger;
mod data_trigger;
mod event_trigger;
mod orphans;
+// FIXME: rewrite all in async and with shorter timings
mod time_trigger;
mod trigger_rollback;
+
+fn get_asset_value(client: &Client, asset_id: AssetId) -> Numeric {
+ let asset = client
+ .query(client::asset::all())
+ .filter_with(|asset| asset.id.eq(asset_id))
+ .execute_single()
+ .unwrap();
+
+ assert_matches!(*asset.value(), AssetValue::Numeric(val) => val)
+}
diff --git a/crates/iroha/tests/integration/triggers/orphans.rs b/crates/iroha/tests/integration/triggers/orphans.rs
index 19b497a4e74..725007dbc64 100644
--- a/crates/iroha/tests/integration/triggers/orphans.rs
+++ b/crates/iroha/tests/integration/triggers/orphans.rs
@@ -2,25 +2,19 @@ use iroha::{
client::Client,
data_model::{prelude::*, query::trigger::FindTriggers},
};
-use iroha_test_network::{wait_for_genesis_committed, Peer, PeerBuilder};
+use iroha_test_network::*;
use iroha_test_samples::gen_account_in;
-use tokio::runtime::Runtime;
-fn find_trigger(iroha: &Client, trigger_id: TriggerId) -> Option {
+fn find_trigger(iroha: &Client, trigger_id: &TriggerId) -> Option {
iroha
.query(FindTriggers::new())
- .filter_with(|trigger| trigger.id.eq(trigger_id))
+ .filter_with(|trigger| trigger.id.eq(trigger_id.clone()))
.execute_single()
.ok()
.map(|trigger| trigger.id)
}
-fn set_up_trigger(
- port: u16,
-) -> eyre::Result<(Runtime, Peer, Client, DomainId, AccountId, TriggerId)> {
- let (rt, peer, iroha) = ::new().with_port(port).start_with_runtime();
- wait_for_genesis_committed(&[iroha.clone()], 0);
-
+fn set_up_trigger(iroha: &Client) -> eyre::Result<(DomainId, AccountId, TriggerId)> {
let failand: DomainId = "failand".parse()?;
let create_failand = Register::domain(Domain::new(failand.clone()));
@@ -43,36 +37,33 @@ fn set_up_trigger(
create_the_one_who_fails.into(),
register_fail_on_account_events.into(),
])?;
- Ok((
- rt,
- peer,
- iroha,
- failand,
- the_one_who_fails,
- fail_on_account_events,
- ))
+ Ok((failand, the_one_who_fails, fail_on_account_events))
}
#[test]
fn trigger_must_be_removed_on_action_authority_account_removal() -> eyre::Result<()> {
- let (_rt, _peer, iroha, _, the_one_who_fails, fail_on_account_events) = set_up_trigger(10_565)?;
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let iroha = network.client();
+ let (_, the_one_who_fails, fail_on_account_events) = set_up_trigger(&iroha)?;
assert_eq!(
- find_trigger(&iroha, fail_on_account_events.clone()),
+ find_trigger(&iroha, &fail_on_account_events),
Some(fail_on_account_events.clone())
);
iroha.submit_blocking(Unregister::account(the_one_who_fails.clone()))?;
- assert_eq!(find_trigger(&iroha, fail_on_account_events.clone()), None);
+ assert_eq!(find_trigger(&iroha, &fail_on_account_events), None);
Ok(())
}
#[test]
fn trigger_must_be_removed_on_action_authority_domain_removal() -> eyre::Result<()> {
- let (_rt, _peer, iroha, failand, _, fail_on_account_events) = set_up_trigger(10_505)?;
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let iroha = network.client();
+ let (failand, _, fail_on_account_events) = set_up_trigger(&iroha)?;
assert_eq!(
- find_trigger(&iroha, fail_on_account_events.clone()),
+ find_trigger(&iroha, &fail_on_account_events),
Some(fail_on_account_events.clone())
);
iroha.submit_blocking(Unregister::domain(failand.clone()))?;
- assert_eq!(find_trigger(&iroha, fail_on_account_events.clone()), None);
+ assert_eq!(find_trigger(&iroha, &fail_on_account_events), None);
Ok(())
}
diff --git a/crates/iroha/tests/integration/triggers/time_trigger.rs b/crates/iroha/tests/integration/triggers/time_trigger.rs
index 94d5a79d662..be1e9fc49cc 100644
--- a/crates/iroha/tests/integration/triggers/time_trigger.rs
+++ b/crates/iroha/tests/integration/triggers/time_trigger.rs
@@ -6,7 +6,6 @@ use iroha::{
data_model::{
asset::AssetId,
events::pipeline::{BlockEventFilter, BlockStatus},
- parameter::SumeragiParameters,
prelude::*,
Level,
},
@@ -14,14 +13,9 @@ use iroha::{
use iroha_test_network::*;
use iroha_test_samples::{gen_account_in, load_sample_wasm, ALICE_ID};
-/// Default estimation of consensus duration.
-pub fn pipeline_time() -> Duration {
- let default_parameters = SumeragiParameters::default();
+use crate::integration::triggers::get_asset_value;
- default_parameters.pipeline_time(0, 0)
-}
-
-fn curr_time() -> core::time::Duration {
+fn curr_time() -> Duration {
use std::time::SystemTime;
SystemTime::now()
@@ -31,10 +25,12 @@ fn curr_time() -> core::time::Duration {
#[test]
fn mint_asset_after_3_sec() -> Result<()> {
- let (_rt, _peer, test_client) = ::new().with_port(10_665).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new()
+ .with_default_pipeline_time()
+ .start_blocking()?;
+ let test_client = network.client();
// Sleep to certainly bypass time interval analyzed by genesis
- std::thread::sleep(pipeline_time());
+ std::thread::sleep(network.pipeline_time());
let asset_definition_id = "rose#wonderland"
.parse::()
@@ -47,8 +43,12 @@ fn mint_asset_after_3_sec() -> Result<()> {
})?;
let start_time = curr_time();
- // Create trigger with schedule which is in the future to the new block but within block estimation time
- let schedule = TimeSchedule::starting_at(start_time + Duration::from_secs(3));
+ const GAP: Duration = Duration::from_secs(3);
+ assert!(
+ GAP < network.pipeline_time(),
+ "Schedule should be in the future but within block estimation"
+ );
+ let schedule = TimeSchedule::starting_at(start_time + GAP);
let instruction = Mint::asset_numeric(1_u32, asset_id.clone());
let register_trigger = Register::trigger(Trigger::new(
"mint_rose".parse().expect("Valid"),
@@ -69,7 +69,7 @@ fn mint_asset_after_3_sec() -> Result<()> {
assert_eq!(init_quantity, after_registration_quantity);
// Sleep long enough that trigger start is in the past
- std::thread::sleep(pipeline_time());
+ std::thread::sleep(network.pipeline_time());
test_client.submit_blocking(Log::new(Level::DEBUG, "Just to create block".to_string()))?;
let after_wait_quantity = test_client.query_single(FindAssetQuantityById {
@@ -88,14 +88,14 @@ fn mint_asset_after_3_sec() -> Result<()> {
fn pre_commit_trigger_should_be_executed() -> Result<()> {
const CHECKS_COUNT: usize = 5;
- let (_rt, _peer, mut test_client) = ::new().with_port(10_600).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let test_client = network.client();
let asset_definition_id = "rose#wonderland".parse().expect("Valid");
let account_id = ALICE_ID.clone();
let asset_id = AssetId::new(asset_definition_id, account_id.clone());
- let mut prev_value = get_asset_value(&mut test_client, asset_id.clone());
+ let mut prev_value = get_asset_value(&test_client, asset_id.clone());
// Start listening BEFORE submitting any transaction not to miss any block committed event
let event_listener = get_block_committed_event_listener(&test_client)?;
@@ -113,7 +113,7 @@ fn pre_commit_trigger_should_be_executed() -> Result<()> {
test_client.submit(register_trigger)?;
for _ in event_listener.take(CHECKS_COUNT) {
- let new_value = get_asset_value(&mut test_client, asset_id.clone());
+ let new_value = get_asset_value(&test_client, asset_id.clone());
assert_eq!(new_value, prev_value.checked_add(Numeric::ONE).unwrap());
prev_value = new_value;
@@ -134,8 +134,10 @@ fn mint_nft_for_every_user_every_1_sec() -> Result<()> {
const TRIGGER_PERIOD: Duration = Duration::from_millis(1000);
const EXPECTED_COUNT: u64 = 4;
- let (_rt, _peer, mut test_client) = ::new().with_port(10_780).start_with_runtime();
- wait_for_genesis_committed(&vec![test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new()
+ .with_default_pipeline_time()
+ .start_blocking()?;
+ let test_client = network.client();
let alice_id = ALICE_ID.clone();
@@ -181,7 +183,7 @@ fn mint_nft_for_every_user_every_1_sec() -> Result<()> {
// Time trigger will be executed on block commits, so we have to produce some transactions
submit_sample_isi_on_every_block_commit(
event_listener,
- &mut test_client,
+ &test_client,
&alice_id,
TRIGGER_PERIOD,
usize::try_from(EXPECTED_COUNT)?,
@@ -222,25 +224,10 @@ fn get_block_committed_event_listener(
client.listen_for_events([block_filter])
}
-/// Get asset numeric value
-fn get_asset_value(client: &mut Client, asset_id: AssetId) -> Numeric {
- let asset = client
- .query(client::asset::all())
- .filter_with(|asset| asset.id.eq(asset_id))
- .execute_single()
- .unwrap();
-
- let AssetValue::Numeric(val) = *asset.value() else {
- panic!("Unexpected asset value");
- };
-
- val
-}
-
/// Submit some sample ISIs to create new blocks
fn submit_sample_isi_on_every_block_commit(
block_committed_event_listener: impl Iterator
- >,
- test_client: &mut Client,
+ test_client: &Client,
account_id: &AccountId,
timeout: Duration,
times: usize,
diff --git a/crates/iroha/tests/integration/triggers/trigger_rollback.rs b/crates/iroha/tests/integration/triggers/trigger_rollback.rs
index 33215299914..9d807c326e3 100644
--- a/crates/iroha/tests/integration/triggers/trigger_rollback.rs
+++ b/crates/iroha/tests/integration/triggers/trigger_rollback.rs
@@ -8,8 +8,8 @@ use iroha_test_samples::ALICE_ID;
#[test]
fn failed_trigger_revert() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(11_150).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
//When
let trigger_id = "trigger".parse::()?;
diff --git a/crates/iroha/tests/integration/tx_chain_id.rs b/crates/iroha/tests/integration/tx_chain_id.rs
index 974211e668b..c885ed2bec6 100644
--- a/crates/iroha/tests/integration/tx_chain_id.rs
+++ b/crates/iroha/tests/integration/tx_chain_id.rs
@@ -5,8 +5,8 @@ use iroha_test_samples::gen_account_in;
#[test]
fn send_tx_with_different_chain_id() {
- let (_rt, _peer, test_client) = ::new().with_port(11_250).start_with_runtime();
- wait_for_genesis_committed(&[test_client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let test_client = network.client();
// Given
let (sender_id, sender_keypair) = gen_account_in("wonderland");
let (receiver_id, _receiver_keypair) = gen_account_in("wonderland");
@@ -31,8 +31,9 @@ fn send_tx_with_different_chain_id() {
register_asset.into(),
])
.unwrap();
- let chain_id_0 = ChainId::from("00000000-0000-0000-0000-000000000000"); // Value configured by default
+ let chain_id_0 = network.chain_id();
let chain_id_1 = ChainId::from("1");
+ assert_ne!(chain_id_0, chain_id_1);
let transfer_instruction = Transfer::asset_numeric(
AssetId::new("test_asset#wonderland".parse().unwrap(), sender_id.clone()),
@@ -49,6 +50,7 @@ fn send_tx_with_different_chain_id() {
.submit_transaction_blocking(&asset_transfer_tx_0)
.unwrap();
let _err = test_client
- .submit_transaction_blocking(&asset_transfer_tx_1)
+ // no need for "blocking" - it must be rejected synchronously
+ .submit_transaction(&asset_transfer_tx_1)
.unwrap_err();
}
diff --git a/crates/iroha/tests/integration/tx_history.rs b/crates/iroha/tests/integration/tx_history.rs
index 1b20be0054f..adcafebcf4d 100644
--- a/crates/iroha/tests/integration/tx_history.rs
+++ b/crates/iroha/tests/integration/tx_history.rs
@@ -1,22 +1,16 @@
-use std::thread;
-
use eyre::Result;
use iroha::{
client::transaction,
data_model::{prelude::*, query::parameters::Pagination},
};
-use iroha_config::parameters::actual::Root as Config;
use iroha_test_network::*;
use iroha_test_samples::ALICE_ID;
use nonzero_ext::nonzero;
-#[ignore = "ignore, more in #2851"]
#[test]
-fn client_has_rejected_and_acepted_txs_should_return_tx_history() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_715).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
-
- let pipeline_time = Config::pipeline_time();
+fn client_has_rejected_and_accepted_txs_should_return_tx_history() -> Result<()> {
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
// Given
let account_id = ALICE_ID.clone();
@@ -44,9 +38,8 @@ fn client_has_rejected_and_acepted_txs_should_return_tx_history() -> Result<()>
};
let instructions: Vec = vec![mint_asset.clone().into()];
let transaction = client.build_transaction(instructions, Metadata::default());
- client.submit_transaction(&transaction)?;
+ let _ = client.submit_transaction_blocking(&transaction);
}
- thread::sleep(pipeline_time * 5);
let transactions = client
.query(transaction::all())
diff --git a/crates/iroha/tests/integration/tx_rollback.rs b/crates/iroha/tests/integration/tx_rollback.rs
index b69828974d4..4c11cf5531e 100644
--- a/crates/iroha/tests/integration/tx_rollback.rs
+++ b/crates/iroha/tests/integration/tx_rollback.rs
@@ -5,8 +5,8 @@ use iroha_test_samples::ALICE_ID;
#[test]
fn client_sends_transaction_with_invalid_instruction_should_not_see_any_changes() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_720).start_with_runtime();
- wait_for_genesis_committed(&[client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
//When
let account_id = ALICE_ID.clone();
diff --git a/crates/iroha/tests/integration/upgrade.rs b/crates/iroha/tests/integration/upgrade.rs
index 7a2a4c81ad6..cadffa048e4 100644
--- a/crates/iroha/tests/integration/upgrade.rs
+++ b/crates/iroha/tests/integration/upgrade.rs
@@ -28,8 +28,8 @@ fn executor_upgrade_should_work() -> Result<()> {
.parse::()
.unwrap();
- let (_rt, _peer, client) = ::new().with_port(10_795).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
// Register `admin` domain and account
let admin_domain = Domain::new(admin_id.domain().clone());
@@ -68,8 +68,8 @@ fn executor_upgrade_should_work() -> Result<()> {
#[test]
fn executor_upgrade_should_run_migration() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(10_990).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
// Check that `CanUnregisterDomain` exists
assert!(client
@@ -121,8 +121,8 @@ fn executor_upgrade_should_run_migration() -> Result<()> {
#[test]
fn executor_upgrade_should_revoke_removed_permissions() -> Result<()> {
- let (_rt, _peer, client) = ::new().with_port(11_030).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
// Permission which will be removed by executor
let can_unregister_domain = CanUnregisterDomain {
@@ -205,8 +205,8 @@ fn executor_upgrade_should_revoke_removed_permissions() -> Result<()> {
fn executor_custom_instructions_simple() -> Result<()> {
use executor_custom_data_model::simple_isi::MintAssetForAllAccounts;
- let (_rt, _peer, client) = ::new().with_port(11_270).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
upgrade_executor(&client, "executor_custom_instructions_simple")?;
@@ -244,8 +244,8 @@ fn executor_custom_instructions_complex() -> Result<()> {
ConditionalExpr, CoreExpr, EvaluatesTo, Expression, Greater,
};
- let (_rt, _peer, client) = PeerBuilder::new().with_port(11_275).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
let executor_fuel_limit = SetParameter::new(Parameter::Executor(SmartContractParameter::Fuel(
nonzero!(1_000_000_000_u64),
@@ -300,8 +300,8 @@ fn executor_custom_instructions_complex() -> Result<()> {
#[test]
fn migration_fail_should_not_cause_any_effects() {
- let (_rt, _peer, client) = ::new().with_port(10_980).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
let assert_domain_does_not_exist = |client: &Client, domain_id: &DomainId| {
assert!(
@@ -333,8 +333,8 @@ fn migration_fail_should_not_cause_any_effects() {
#[test]
fn migration_should_cause_upgrade_event() {
- let (rt, _peer, client) = ::new().with_port(10_995).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, rt) = NetworkBuilder::new().start_blocking().unwrap();
+ let client = network.client();
let events_client = client.clone();
let task = rt.spawn(async move {
@@ -367,14 +367,14 @@ fn migration_should_cause_upgrade_event() {
fn define_custom_parameter() -> Result<()> {
use executor_custom_data_model::parameters::DomainLimits;
- let (_rt, _peer, client) = ::new().with_port(11_325).start_with_runtime();
- wait_for_genesis_committed(&vec![client.clone()], 0);
+ let (network, _rt) = NetworkBuilder::new().start_blocking()?;
+ let client = network.client();
let long_domain_name = "0".repeat(2_usize.pow(5)).parse::()?;
let create_domain = Register::domain(Domain::new(long_domain_name));
client.submit_blocking(create_domain)?;
- upgrade_executor(&client, "executor_with_custom_parameter").unwrap();
+ upgrade_executor(&client, "executor_with_custom_parameter")?;
let too_long_domain_name = "1".repeat(2_usize.pow(5)).parse::()?;
let create_domain = Register::domain(Domain::new(too_long_domain_name));
diff --git a/crates/iroha_config_base/src/toml.rs b/crates/iroha_config_base/src/toml.rs
index 8338632e198..2b7ab4adb1a 100644
--- a/crates/iroha_config_base/src/toml.rs
+++ b/crates/iroha_config_base/src/toml.rs
@@ -286,6 +286,19 @@ impl<'a> From<&'a mut Table> for Writer<'a> {
}
}
+/// Extension trait to implement writing with [`Writer`] directly into [`Table`] in a chained manner.
+pub trait WriteExt: Sized {
+ /// See [`Writer::write`].
+ fn write(self, path: P, value: T) -> Self;
+}
+
+impl WriteExt for Table {
+ fn write(mut self, path: P, value: T) -> Self {
+ Writer::new(&mut self).write(path, value);
+ self
+ }
+}
+
#[cfg(test)]
mod tests {
use expect_test::expect;
diff --git a/crates/iroha_core/Cargo.toml b/crates/iroha_core/Cargo.toml
index ed9ab45731c..22820a3eddd 100644
--- a/crates/iroha_core/Cargo.toml
+++ b/crates/iroha_core/Cargo.toml
@@ -18,8 +18,6 @@ categories.workspace = true
workspace = true
[features]
-default = ["telemetry"]
-
# Support lightweight telemetry, including diagnostics
telemetry = []
# Support Prometheus metrics. See https://prometheus.io/.
diff --git a/crates/iroha_core/src/sumeragi/main_loop.rs b/crates/iroha_core/src/sumeragi/main_loop.rs
index 2cdf071d362..050c95c1d66 100644
--- a/crates/iroha_core/src/sumeragi/main_loop.rs
+++ b/crates/iroha_core/src/sumeragi/main_loop.rs
@@ -41,6 +41,7 @@ pub struct Sumeragi {
/// subsystem.
pub transaction_cache: Vec,
/// Metrics for reporting number of view changes in current round
+ #[cfg(feature = "telemetry")]
pub view_changes_metric: iroha_telemetry::metrics::ViewChangesGauge,
/// Was there a commit in previous round?
@@ -123,60 +124,58 @@ impl Sumeragi {
&self,
latest_block: HashOf,
view_change_proof_chain: &mut ProofChain,
- ) -> (Option, bool) {
+ ) -> Result<(Option, bool), ReceiveNetworkPacketError> {
const MAX_CONTROL_MSG_IN_A_ROW: usize = 25;
let mut should_sleep = true;
for _ in 0..MAX_CONTROL_MSG_IN_A_ROW {
- if let Ok(msg) = self
- .control_message_receiver
- .try_recv()
- .map_err(|recv_error| {
- assert!(
- recv_error != mpsc::TryRecvError::Disconnected,
- "INTERNAL ERROR: Sumeragi control message pump disconnected"
- )
- })
- {
- should_sleep = false;
- if let Err(error) = view_change_proof_chain.insert_proof(
- msg.view_change_proof,
- &self.topology,
- latest_block,
- ) {
- trace!(%error, "Failed to add proof into view change proof chain")
+ match self.control_message_receiver.try_recv() {
+ Ok(msg) => {
+ should_sleep = false;
+ if let Err(error) = view_change_proof_chain.insert_proof(
+ msg.view_change_proof,
+ &self.topology,
+ latest_block,
+ ) {
+ trace!(%error, "Failed to add proof into view change proof chain")
+ }
+ }
+ Err(mpsc::TryRecvError::Disconnected) => {
+ return Err(ReceiveNetworkPacketError::ChannelDisconnected)
+ }
+ Err(err) => {
+ trace!(%err, "Failed to receive control message");
+ break;
}
- } else {
- break;
}
}
let block_msg =
- self.receive_block_message_network_packet(latest_block, view_change_proof_chain);
+ self.receive_block_message_network_packet(latest_block, view_change_proof_chain)?;
should_sleep &= block_msg.is_none();
- (block_msg, should_sleep)
+ Ok((block_msg, should_sleep))
}
fn receive_block_message_network_packet(
&self,
latest_block: HashOf,
view_change_proof_chain: &ProofChain,
- ) -> Option {
+ ) -> Result